| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "base/synchronization/condition_variable.h" | |
| 6 | |
| 7 #include <windows.h> | |
| 8 #include <stack> | |
| 9 | |
| 10 #include "base/compiler_specific.h" | |
| 11 #include "base/logging.h" | |
| 12 #include "base/synchronization/lock.h" | |
| 13 #include "base/threading/thread_restrictions.h" | |
| 14 #include "base/time/time.h" | |
| 15 | |
| 16 namespace { | |
| 17 // We can't use the linker supported delay-load for kernel32 so all this | |
| 18 // cruft here is to manually late-bind the needed functions. | |
| 19 typedef void (WINAPI *InitializeConditionVariableFn)(PCONDITION_VARIABLE); | |
| 20 typedef BOOL (WINAPI *SleepConditionVariableCSFn)(PCONDITION_VARIABLE, | |
| 21 PCRITICAL_SECTION, DWORD); | |
| 22 typedef void (WINAPI *WakeConditionVariableFn)(PCONDITION_VARIABLE); | |
| 23 typedef void (WINAPI *WakeAllConditionVariableFn)(PCONDITION_VARIABLE); | |
| 24 | |
| 25 InitializeConditionVariableFn initialize_condition_variable_fn; | |
| 26 SleepConditionVariableCSFn sleep_condition_variable_fn; | |
| 27 WakeConditionVariableFn wake_condition_variable_fn; | |
| 28 WakeAllConditionVariableFn wake_all_condition_variable_fn; | |
| 29 | |
| 30 bool BindVistaCondVarFunctions() { | |
| 31 HMODULE kernel32 = GetModuleHandleA("kernel32.dll"); | |
| 32 initialize_condition_variable_fn = | |
| 33 reinterpret_cast<InitializeConditionVariableFn>( | |
| 34 GetProcAddress(kernel32, "InitializeConditionVariable")); | |
| 35 if (!initialize_condition_variable_fn) | |
| 36 return false; | |
| 37 sleep_condition_variable_fn = | |
| 38 reinterpret_cast<SleepConditionVariableCSFn>( | |
| 39 GetProcAddress(kernel32, "SleepConditionVariableCS")); | |
| 40 if (!sleep_condition_variable_fn) | |
| 41 return false; | |
| 42 wake_condition_variable_fn = | |
| 43 reinterpret_cast<WakeConditionVariableFn>( | |
| 44 GetProcAddress(kernel32, "WakeConditionVariable")); | |
| 45 if (!wake_condition_variable_fn) | |
| 46 return false; | |
| 47 wake_all_condition_variable_fn = | |
| 48 reinterpret_cast<WakeAllConditionVariableFn>( | |
| 49 GetProcAddress(kernel32, "WakeAllConditionVariable")); | |
| 50 if (!wake_all_condition_variable_fn) | |
| 51 return false; | |
| 52 return true; | |
| 53 } | |
| 54 | |
| 55 } // namespace. | |
| 56 | |
| 57 namespace base { | |
| 58 // Abstract base class of the pimpl idiom. | |
| 59 class ConditionVarImpl { | |
| 60 public: | |
| 61 virtual ~ConditionVarImpl() {}; | |
| 62 virtual void Wait() = 0; | |
| 63 virtual void TimedWait(const TimeDelta& max_time) = 0; | |
| 64 virtual void Broadcast() = 0; | |
| 65 virtual void Signal() = 0; | |
| 66 }; | |
| 67 | |
| 68 /////////////////////////////////////////////////////////////////////////////// | |
| 69 // Windows Vista and Win7 implementation. | |
| 70 /////////////////////////////////////////////////////////////////////////////// | |
| 71 | |
| 72 class WinVistaCondVar: public ConditionVarImpl { | |
| 73 public: | |
| 74 WinVistaCondVar(Lock* user_lock); | |
| 75 ~WinVistaCondVar() override {} | |
| 76 // Overridden from ConditionVarImpl. | |
| 77 void Wait() override; | |
| 78 void TimedWait(const TimeDelta& max_time) override; | |
| 79 void Broadcast() override; | |
| 80 void Signal() override; | |
| 81 | |
| 82 private: | |
| 83 base::Lock& user_lock_; | |
| 84 CONDITION_VARIABLE cv_; | |
| 85 }; | |
| 86 | |
| 87 WinVistaCondVar::WinVistaCondVar(Lock* user_lock) | |
| 88 : user_lock_(*user_lock) { | |
| 89 initialize_condition_variable_fn(&cv_); | |
| 90 DCHECK(user_lock); | |
| 91 } | |
| 92 | |
| 93 void WinVistaCondVar::Wait() { | |
| 94 TimedWait(TimeDelta::FromMilliseconds(INFINITE)); | |
| 95 } | |
| 96 | |
| 97 void WinVistaCondVar::TimedWait(const TimeDelta& max_time) { | |
| 98 base::ThreadRestrictions::AssertWaitAllowed(); | |
| 99 DWORD timeout = static_cast<DWORD>(max_time.InMilliseconds()); | |
| 100 CRITICAL_SECTION* cs = user_lock_.lock_.native_handle(); | |
| 101 | |
| 102 #if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON) | |
| 103 user_lock_.CheckHeldAndUnmark(); | |
| 104 #endif | |
| 105 | |
| 106 if (FALSE == sleep_condition_variable_fn(&cv_, cs, timeout)) { | |
| 107 DCHECK(GetLastError() != WAIT_TIMEOUT); | |
| 108 } | |
| 109 | |
| 110 #if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON) | |
| 111 user_lock_.CheckUnheldAndMark(); | |
| 112 #endif | |
| 113 } | |
| 114 | |
| 115 void WinVistaCondVar::Broadcast() { | |
| 116 wake_all_condition_variable_fn(&cv_); | |
| 117 } | |
| 118 | |
| 119 void WinVistaCondVar::Signal() { | |
| 120 wake_condition_variable_fn(&cv_); | |
| 121 } | |
| 122 | |
| 123 /////////////////////////////////////////////////////////////////////////////// | |
| 124 // Windows XP implementation. | |
| 125 /////////////////////////////////////////////////////////////////////////////// | |
| 126 | |
| 127 class WinXPCondVar : public ConditionVarImpl { | |
| 128 public: | |
| 129 WinXPCondVar(Lock* user_lock); | |
| 130 ~WinXPCondVar() override; | |
| 131 // Overridden from ConditionVarImpl. | |
| 132 void Wait() override; | |
| 133 void TimedWait(const TimeDelta& max_time) override; | |
| 134 void Broadcast() override; | |
| 135 void Signal() override; | |
| 136 | |
| 137 // Define Event class that is used to form circularly linked lists. | |
| 138 // The list container is an element with NULL as its handle_ value. | |
| 139 // The actual list elements have a non-zero handle_ value. | |
| 140 // All calls to methods MUST be done under protection of a lock so that links | |
| 141 // can be validated. Without the lock, some links might asynchronously | |
| 142 // change, and the assertions would fail (as would list change operations). | |
| 143 class Event { | |
| 144 public: | |
| 145 // Default constructor with no arguments creates a list container. | |
| 146 Event(); | |
| 147 ~Event(); | |
| 148 | |
| 149 // InitListElement transitions an instance from a container, to an element. | |
| 150 void InitListElement(); | |
| 151 | |
| 152 // Methods for use on lists. | |
| 153 bool IsEmpty() const; | |
| 154 void PushBack(Event* other); | |
| 155 Event* PopFront(); | |
| 156 Event* PopBack(); | |
| 157 | |
| 158 // Methods for use on list elements. | |
| 159 // Accessor method. | |
| 160 HANDLE handle() const; | |
| 161 // Pull an element from a list (if it's in one). | |
| 162 Event* Extract(); | |
| 163 | |
| 164 // Method for use on a list element or on a list. | |
| 165 bool IsSingleton() const; | |
| 166 | |
| 167 private: | |
| 168 // Provide pre/post conditions to validate correct manipulations. | |
| 169 bool ValidateAsDistinct(Event* other) const; | |
| 170 bool ValidateAsItem() const; | |
| 171 bool ValidateAsList() const; | |
| 172 bool ValidateLinks() const; | |
| 173 | |
| 174 HANDLE handle_; | |
| 175 Event* next_; | |
| 176 Event* prev_; | |
| 177 DISALLOW_COPY_AND_ASSIGN(Event); | |
| 178 }; | |
| 179 | |
| 180 // Note that RUNNING is an unlikely number to have in RAM by accident. | |
| 181 // This helps with defensive destructor coding in the face of user error. | |
| 182 enum RunState { SHUTDOWN = 0, RUNNING = 64213 }; | |
| 183 | |
| 184 // Internal implementation methods supporting Wait(). | |
| 185 Event* GetEventForWaiting(); | |
| 186 void RecycleEvent(Event* used_event); | |
| 187 | |
| 188 RunState run_state_; | |
| 189 | |
| 190 // Private critical section for access to member data. | |
| 191 base::Lock internal_lock_; | |
| 192 | |
| 193 // Lock that is acquired before calling Wait(). | |
| 194 base::Lock& user_lock_; | |
| 195 | |
| 196 // Events that threads are blocked on. | |
| 197 Event waiting_list_; | |
| 198 | |
| 199 // Free list for old events. | |
| 200 Event recycling_list_; | |
| 201 int recycling_list_size_; | |
| 202 | |
| 203 // The number of allocated, but not yet deleted events. | |
| 204 int allocation_counter_; | |
| 205 }; | |
| 206 | |
| 207 WinXPCondVar::WinXPCondVar(Lock* user_lock) | |
| 208 : user_lock_(*user_lock), | |
| 209 run_state_(RUNNING), | |
| 210 allocation_counter_(0), | |
| 211 recycling_list_size_(0) { | |
| 212 DCHECK(user_lock); | |
| 213 } | |
| 214 | |
| 215 WinXPCondVar::~WinXPCondVar() { | |
| 216 AutoLock auto_lock(internal_lock_); | |
| 217 run_state_ = SHUTDOWN; // Prevent any more waiting. | |
| 218 | |
| 219 DCHECK_EQ(recycling_list_size_, allocation_counter_); | |
| 220 if (recycling_list_size_ != allocation_counter_) { // Rare shutdown problem. | |
| 221 // There are threads of execution still in this->TimedWait() and yet the | |
| 222 // caller has instigated the destruction of this instance :-/. | |
| 223 // A common reason for such "overly hasty" destruction is that the caller | |
| 224 // was not willing to wait for all the threads to terminate. Such hasty | |
| 225 // actions are a violation of our usage contract, but we'll give the | |
| 226 // waiting thread(s) one last chance to exit gracefully (prior to our | |
| 227 // destruction). | |
| 228 // Note: waiting_list_ *might* be empty, but recycling is still pending. | |
| 229 AutoUnlock auto_unlock(internal_lock_); | |
| 230 Broadcast(); // Make sure all waiting threads have been signaled. | |
| 231 Sleep(10); // Give threads a chance to grab internal_lock_. | |
| 232 // All contained threads should be blocked on user_lock_ by now :-). | |
| 233 } // Reacquire internal_lock_. | |
| 234 | |
| 235 DCHECK_EQ(recycling_list_size_, allocation_counter_); | |
| 236 } | |
| 237 | |
| 238 void WinXPCondVar::Wait() { | |
| 239 // Default to "wait forever" timing, which means have to get a Signal() | |
| 240 // or Broadcast() to come out of this wait state. | |
| 241 TimedWait(TimeDelta::FromMilliseconds(INFINITE)); | |
| 242 } | |
| 243 | |
| 244 void WinXPCondVar::TimedWait(const TimeDelta& max_time) { | |
| 245 base::ThreadRestrictions::AssertWaitAllowed(); | |
| 246 Event* waiting_event; | |
| 247 HANDLE handle; | |
| 248 { | |
| 249 AutoLock auto_lock(internal_lock_); | |
| 250 if (RUNNING != run_state_) return; // Destruction in progress. | |
| 251 waiting_event = GetEventForWaiting(); | |
| 252 handle = waiting_event->handle(); | |
| 253 DCHECK(handle); | |
| 254 } // Release internal_lock. | |
| 255 | |
| 256 { | |
| 257 AutoUnlock unlock(user_lock_); // Release caller's lock | |
| 258 WaitForSingleObject(handle, static_cast<DWORD>(max_time.InMilliseconds())); | |
| 259 // Minimize spurious signal creation window by recycling asap. | |
| 260 AutoLock auto_lock(internal_lock_); | |
| 261 RecycleEvent(waiting_event); | |
| 262 // Release internal_lock_ | |
| 263 } // Reacquire callers lock to depth at entry. | |
| 264 } | |
| 265 | |
| 266 // Broadcast() is guaranteed to signal all threads that were waiting (i.e., had | |
| 267 // a cv_event internally allocated for them) before Broadcast() was called. | |
| 268 void WinXPCondVar::Broadcast() { | |
| 269 std::stack<HANDLE> handles; // See FAQ-question-10. | |
| 270 { | |
| 271 AutoLock auto_lock(internal_lock_); | |
| 272 if (waiting_list_.IsEmpty()) | |
| 273 return; | |
| 274 while (!waiting_list_.IsEmpty()) | |
| 275 // This is not a leak from waiting_list_. See FAQ-question 12. | |
| 276 handles.push(waiting_list_.PopBack()->handle()); | |
| 277 } // Release internal_lock_. | |
| 278 while (!handles.empty()) { | |
| 279 SetEvent(handles.top()); | |
| 280 handles.pop(); | |
| 281 } | |
| 282 } | |
| 283 | |
| 284 // Signal() will select one of the waiting threads, and signal it (signal its | |
| 285 // cv_event). For better performance we signal the thread that went to sleep | |
| 286 // most recently (LIFO). If we want fairness, then we wake the thread that has | |
| 287 // been sleeping the longest (FIFO). | |
| 288 void WinXPCondVar::Signal() { | |
| 289 HANDLE handle; | |
| 290 { | |
| 291 AutoLock auto_lock(internal_lock_); | |
| 292 if (waiting_list_.IsEmpty()) | |
| 293 return; // No one to signal. | |
| 294 // Only performance option should be used. | |
| 295 // This is not a leak from waiting_list. See FAQ-question 12. | |
| 296 handle = waiting_list_.PopBack()->handle(); // LIFO. | |
| 297 } // Release internal_lock_. | |
| 298 SetEvent(handle); | |
| 299 } | |
| 300 | |
| 301 // GetEventForWaiting() provides a unique cv_event for any caller that needs to | |
| 302 // wait. This means that (worst case) we may over time create as many cv_event | |
| 303 // objects as there are threads simultaneously using this instance's Wait() | |
| 304 // functionality. | |
| 305 WinXPCondVar::Event* WinXPCondVar::GetEventForWaiting() { | |
| 306 // We hold internal_lock, courtesy of Wait(). | |
| 307 Event* cv_event; | |
| 308 if (0 == recycling_list_size_) { | |
| 309 DCHECK(recycling_list_.IsEmpty()); | |
| 310 cv_event = new Event(); | |
| 311 cv_event->InitListElement(); | |
| 312 allocation_counter_++; | |
| 313 DCHECK(cv_event->handle()); | |
| 314 } else { | |
| 315 cv_event = recycling_list_.PopFront(); | |
| 316 recycling_list_size_--; | |
| 317 } | |
| 318 waiting_list_.PushBack(cv_event); | |
| 319 return cv_event; | |
| 320 } | |
| 321 | |
| 322 // RecycleEvent() takes a cv_event that was previously used for Wait()ing, and | |
| 323 // recycles it for use in future Wait() calls for this or other threads. | |
| 324 // Note that there is a tiny chance that the cv_event is still signaled when we | |
| 325 // obtain it, and that can cause spurious signals (if/when we re-use the | |
| 326 // cv_event), but such is quite rare (see FAQ-question-5). | |
| 327 void WinXPCondVar::RecycleEvent(Event* used_event) { | |
| 328 // We hold internal_lock, courtesy of Wait(). | |
| 329 // If the cv_event timed out, then it is necessary to remove it from | |
| 330 // waiting_list_. If it was selected by Broadcast() or Signal(), then it is | |
| 331 // already gone. | |
| 332 used_event->Extract(); // Possibly redundant | |
| 333 recycling_list_.PushBack(used_event); | |
| 334 recycling_list_size_++; | |
| 335 } | |
| 336 //------------------------------------------------------------------------------ | |
| 337 // The next section provides the implementation for the private Event class. | |
| 338 //------------------------------------------------------------------------------ | |
| 339 | |
| 340 // Event provides a doubly-linked-list of events for use exclusively by the | |
| 341 // ConditionVariable class. | |
| 342 | |
| 343 // This custom container was crafted because no simple combination of STL | |
| 344 // classes appeared to support the functionality required. The specific | |
| 345 // unusual requirement for a linked-list-class is support for the Extract() | |
| 346 // method, which can remove an element from a list, potentially for insertion | |
| 347 // into a second list. Most critically, the Extract() method is idempotent, | |
| 348 // turning the indicated element into an extracted singleton whether it was | |
| 349 // contained in a list or not. This functionality allows one (or more) of | |
| 350 // threads to do the extraction. The iterator that identifies this extractable | |
| 351 // element (in this case, a pointer to the list element) can be used after | |
| 352 // arbitrary manipulation of the (possibly) enclosing list container. In | |
| 353 // general, STL containers do not provide iterators that can be used across | |
| 354 // modifications (insertions/extractions) of the enclosing containers, and | |
| 355 // certainly don't provide iterators that can be used if the identified | |
| 356 // element is *deleted* (removed) from the container. | |
| 357 | |
| 358 // It is possible to use multiple redundant containers, such as an STL list, | |
| 359 // and an STL map, to achieve similar container semantics. This container has | |
| 360 // only O(1) methods, while the corresponding (multiple) STL container approach | |
| 361 // would have more complex O(log(N)) methods (yeah... N isn't that large). | |
| 362 // Multiple containers also makes correctness more difficult to assert, as | |
| 363 // data is redundantly stored and maintained, which is generally evil. | |
| 364 | |
| 365 WinXPCondVar::Event::Event() : handle_(0) { | |
| 366 next_ = prev_ = this; // Self referencing circular. | |
| 367 } | |
| 368 | |
| 369 WinXPCondVar::Event::~Event() { | |
| 370 if (0 == handle_) { | |
| 371 // This is the list holder | |
| 372 while (!IsEmpty()) { | |
| 373 Event* cv_event = PopFront(); | |
| 374 DCHECK(cv_event->ValidateAsItem()); | |
| 375 delete cv_event; | |
| 376 } | |
| 377 } | |
| 378 DCHECK(IsSingleton()); | |
| 379 if (0 != handle_) { | |
| 380 int ret_val = CloseHandle(handle_); | |
| 381 DCHECK(ret_val); | |
| 382 } | |
| 383 } | |
| 384 | |
| 385 // Change a container instance permanently into an element of a list. | |
| 386 void WinXPCondVar::Event::InitListElement() { | |
| 387 DCHECK(!handle_); | |
| 388 handle_ = CreateEvent(NULL, false, false, NULL); | |
| 389 DCHECK(handle_); | |
| 390 } | |
| 391 | |
| 392 // Methods for use on lists. | |
| 393 bool WinXPCondVar::Event::IsEmpty() const { | |
| 394 DCHECK(ValidateAsList()); | |
| 395 return IsSingleton(); | |
| 396 } | |
| 397 | |
| 398 void WinXPCondVar::Event::PushBack(Event* other) { | |
| 399 DCHECK(ValidateAsList()); | |
| 400 DCHECK(other->ValidateAsItem()); | |
| 401 DCHECK(other->IsSingleton()); | |
| 402 // Prepare other for insertion. | |
| 403 other->prev_ = prev_; | |
| 404 other->next_ = this; | |
| 405 // Cut into list. | |
| 406 prev_->next_ = other; | |
| 407 prev_ = other; | |
| 408 DCHECK(ValidateAsDistinct(other)); | |
| 409 } | |
| 410 | |
| 411 WinXPCondVar::Event* WinXPCondVar::Event::PopFront() { | |
| 412 DCHECK(ValidateAsList()); | |
| 413 DCHECK(!IsSingleton()); | |
| 414 return next_->Extract(); | |
| 415 } | |
| 416 | |
| 417 WinXPCondVar::Event* WinXPCondVar::Event::PopBack() { | |
| 418 DCHECK(ValidateAsList()); | |
| 419 DCHECK(!IsSingleton()); | |
| 420 return prev_->Extract(); | |
| 421 } | |
| 422 | |
| 423 // Methods for use on list elements. | |
| 424 // Accessor method. | |
| 425 HANDLE WinXPCondVar::Event::handle() const { | |
| 426 DCHECK(ValidateAsItem()); | |
| 427 return handle_; | |
| 428 } | |
| 429 | |
| 430 // Pull an element from a list (if it's in one). | |
| 431 WinXPCondVar::Event* WinXPCondVar::Event::Extract() { | |
| 432 DCHECK(ValidateAsItem()); | |
| 433 if (!IsSingleton()) { | |
| 434 // Stitch neighbors together. | |
| 435 next_->prev_ = prev_; | |
| 436 prev_->next_ = next_; | |
| 437 // Make extractee into a singleton. | |
| 438 prev_ = next_ = this; | |
| 439 } | |
| 440 DCHECK(IsSingleton()); | |
| 441 return this; | |
| 442 } | |
| 443 | |
| 444 // Method for use on a list element or on a list. | |
| 445 bool WinXPCondVar::Event::IsSingleton() const { | |
| 446 DCHECK(ValidateLinks()); | |
| 447 return next_ == this; | |
| 448 } | |
| 449 | |
| 450 // Provide pre/post conditions to validate correct manipulations. | |
| 451 bool WinXPCondVar::Event::ValidateAsDistinct(Event* other) const { | |
| 452 return ValidateLinks() && other->ValidateLinks() && (this != other); | |
| 453 } | |
| 454 | |
| 455 bool WinXPCondVar::Event::ValidateAsItem() const { | |
| 456 return (0 != handle_) && ValidateLinks(); | |
| 457 } | |
| 458 | |
| 459 bool WinXPCondVar::Event::ValidateAsList() const { | |
| 460 return (0 == handle_) && ValidateLinks(); | |
| 461 } | |
| 462 | |
| 463 bool WinXPCondVar::Event::ValidateLinks() const { | |
| 464 // Make sure both of our neighbors have links that point back to us. | |
| 465 // We don't do the O(n) check and traverse the whole loop, and instead only | |
| 466 // do a local check to (and returning from) our immediate neighbors. | |
| 467 return (next_->prev_ == this) && (prev_->next_ == this); | |
| 468 } | |
| 469 | |
| 470 | |
| 471 /* | |
| 472 FAQ On WinXPCondVar subtle implementation details: | |
| 473 | |
| 474 1) What makes this problem subtle? Please take a look at "Strategies | |
| 475 for Implementing POSIX Condition Variables on Win32" by Douglas | |
| 476 C. Schmidt and Irfan Pyarali. | |
| 477 http://www.cs.wustl.edu/~schmidt/win32-cv-1.html It includes | |
| 478 discussions of numerous flawed strategies for implementing this | |
| 479 functionality. I'm not convinced that even the final proposed | |
| 480 implementation has semantics that are as nice as this implementation | |
| 481 (especially with regard to Broadcast() and the impact on threads that | |
| 482 try to Wait() after a Broadcast() has been called, but before all the | |
| 483 original waiting threads have been signaled). | |
| 484 | |
| 485 2) Why can't you use a single wait_event for all threads that call | |
| 486 Wait()? See FAQ-question-1, or consider the following: If a single | |
| 487 event were used, then numerous threads calling Wait() could release | |
| 488 their cs locks, and be preempted just before calling | |
| 489 WaitForSingleObject(). If a call to Broadcast() was then presented on | |
| 490 a second thread, it would be impossible to actually signal all | |
| 491 waiting(?) threads. Some number of SetEvent() calls *could* be made, | |
| 492 but there could be no guarantee that those led to to more than one | |
| 493 signaled thread (SetEvent()'s may be discarded after the first!), and | |
| 494 there could be no guarantee that the SetEvent() calls didn't just | |
| 495 awaken "other" threads that hadn't even started waiting yet (oops). | |
| 496 Without any limit on the number of requisite SetEvent() calls, the | |
| 497 system would be forced to do many such calls, allowing many new waits | |
| 498 to receive spurious signals. | |
| 499 | |
| 500 3) How does this implementation cause spurious signal events? The | |
| 501 cause in this implementation involves a race between a signal via | |
| 502 time-out and a signal via Signal() or Broadcast(). The series of | |
| 503 actions leading to this are: | |
| 504 | |
| 505 a) Timer fires, and a waiting thread exits the line of code: | |
| 506 | |
| 507 WaitForSingleObject(waiting_event, max_time.InMilliseconds()); | |
| 508 | |
| 509 b) That thread (in (a)) is randomly pre-empted after the above line, | |
| 510 leaving the waiting_event reset (unsignaled) and still in the | |
| 511 waiting_list_. | |
| 512 | |
| 513 c) A call to Signal() (or Broadcast()) on a second thread proceeds, and | |
| 514 selects the waiting cv_event (identified in step (b)) as the event to revive | |
| 515 via a call to SetEvent(). | |
| 516 | |
| 517 d) The Signal() method (step c) calls SetEvent() on waiting_event (step b). | |
| 518 | |
| 519 e) The waiting cv_event (step b) is now signaled, but no thread is | |
| 520 waiting on it. | |
| 521 | |
| 522 f) When that waiting_event (step b) is reused, it will immediately | |
| 523 be signaled (spuriously). | |
| 524 | |
| 525 | |
| 526 4) Why do you recycle events, and cause spurious signals? First off, | |
| 527 the spurious events are very rare. They can only (I think) appear | |
| 528 when the race described in FAQ-question-3 takes place. This should be | |
| 529 very rare. Most(?) uses will involve only timer expiration, or only | |
| 530 Signal/Broadcast() actions. When both are used, it will be rare that | |
| 531 the race will appear, and it would require MANY Wait() and signaling | |
| 532 activities. If this implementation did not recycle events, then it | |
| 533 would have to create and destroy events for every call to Wait(). | |
| 534 That allocation/deallocation and associated construction/destruction | |
| 535 would be costly (per wait), and would only be a rare benefit (when the | |
| 536 race was "lost" and a spurious signal took place). That would be bad | |
| 537 (IMO) optimization trade-off. Finally, such spurious events are | |
| 538 allowed by the specification of condition variables (such as | |
| 539 implemented in Vista), and hence it is better if any user accommodates | |
| 540 such spurious events (see usage note in condition_variable.h). | |
| 541 | |
| 542 5) Why don't you reset events when you are about to recycle them, or | |
| 543 about to reuse them, so that the spurious signals don't take place? | |
| 544 The thread described in FAQ-question-3 step c may be pre-empted for an | |
| 545 arbitrary length of time before proceeding to step d. As a result, | |
| 546 the wait_event may actually be re-used *before* step (e) is reached. | |
| 547 As a result, calling reset would not help significantly. | |
| 548 | |
| 549 6) How is it that the callers lock is released atomically with the | |
| 550 entry into a wait state? We commit to the wait activity when we | |
| 551 allocate the wait_event for use in a given call to Wait(). This | |
| 552 allocation takes place before the caller's lock is released (and | |
| 553 actually before our internal_lock_ is released). That allocation is | |
| 554 the defining moment when "the wait state has been entered," as that | |
| 555 thread *can* now be signaled by a call to Broadcast() or Signal(). | |
| 556 Hence we actually "commit to wait" before releasing the lock, making | |
| 557 the pair effectively atomic. | |
| 558 | |
| 559 8) Why do you need to lock your data structures during waiting, as the | |
| 560 caller is already in possession of a lock? We need to Acquire() and | |
| 561 Release() our internal lock during Signal() and Broadcast(). If we tried | |
| 562 to use a callers lock for this purpose, we might conflict with their | |
| 563 external use of the lock. For example, the caller may use to consistently | |
| 564 hold a lock on one thread while calling Signal() on another, and that would | |
| 565 block Signal(). | |
| 566 | |
| 567 9) Couldn't a more efficient implementation be provided if you | |
| 568 preclude using more than one external lock in conjunction with a | |
| 569 single ConditionVariable instance? Yes, at least it could be viewed | |
| 570 as a simpler API (since you don't have to reiterate the lock argument | |
| 571 in each Wait() call). One of the constructors now takes a specific | |
| 572 lock as an argument, and a there are corresponding Wait() calls that | |
| 573 don't specify a lock now. It turns that the resulting implmentation | |
| 574 can't be made more efficient, as the internal lock needs to be used by | |
| 575 Signal() and Broadcast(), to access internal data structures. As a | |
| 576 result, I was not able to utilize the user supplied lock (which is | |
| 577 being used by the user elsewhere presumably) to protect the private | |
| 578 member access. | |
| 579 | |
| 580 9) Since you have a second lock, how can be be sure that there is no | |
| 581 possible deadlock scenario? Our internal_lock_ is always the last | |
| 582 lock acquired, and the first one released, and hence a deadlock (due | |
| 583 to critical section problems) is impossible as a consequence of our | |
| 584 lock. | |
| 585 | |
| 586 10) When doing a Broadcast(), why did you copy all the events into | |
| 587 an STL queue, rather than making a linked-loop, and iterating over it? | |
| 588 The iterating during Broadcast() is done so outside the protection | |
| 589 of the internal lock. As a result, other threads, such as the thread | |
| 590 wherein a related event is waiting, could asynchronously manipulate | |
| 591 the links around a cv_event. As a result, the link structure cannot | |
| 592 be used outside a lock. Broadcast() could iterate over waiting | |
| 593 events by cycling in-and-out of the protection of the internal_lock, | |
| 594 but that appears more expensive than copying the list into an STL | |
| 595 stack. | |
| 596 | |
| 597 11) Why did the lock.h file need to be modified so much for this | |
| 598 change? Central to a Condition Variable is the atomic release of a | |
| 599 lock during a Wait(). This places Wait() functionality exactly | |
| 600 mid-way between the two classes, Lock and Condition Variable. Given | |
| 601 that there can be nested Acquire()'s of locks, and Wait() had to | |
| 602 Release() completely a held lock, it was necessary to augment the Lock | |
| 603 class with a recursion counter. Even more subtle is the fact that the | |
| 604 recursion counter (in a Lock) must be protected, as many threads can | |
| 605 access it asynchronously. As a positive fallout of this, there are | |
| 606 now some DCHECKS to be sure no one Release()s a Lock more than they | |
| 607 Acquire()ed it, and there is ifdef'ed functionality that can detect | |
| 608 nested locks (legal under windows, but not under Posix). | |
| 609 | |
| 610 12) Why is it that the cv_events removed from list in Broadcast() and Signal() | |
| 611 are not leaked? How are they recovered?? The cv_events that appear to leak are | |
| 612 taken from the waiting_list_. For each element in that list, there is currently | |
| 613 a thread in or around the WaitForSingleObject() call of Wait(), and those | |
| 614 threads have references to these otherwise leaked events. They are passed as | |
| 615 arguments to be recycled just aftre returning from WaitForSingleObject(). | |
| 616 | |
| 617 13) Why did you use a custom container class (the linked list), when STL has | |
| 618 perfectly good containers, such as an STL list? The STL list, as with any | |
| 619 container, does not guarantee the utility of an iterator across manipulation | |
| 620 (such as insertions and deletions) of the underlying container. The custom | |
| 621 double-linked-list container provided that assurance. I don't believe any | |
| 622 combination of STL containers provided the services that were needed at the same | |
| 623 O(1) efficiency as the custom linked list. The unusual requirement | |
| 624 for the container class is that a reference to an item within a container (an | |
| 625 iterator) needed to be maintained across an arbitrary manipulation of the | |
| 626 container. This requirement exposes itself in the Wait() method, where a | |
| 627 waiting_event must be selected prior to the WaitForSingleObject(), and then it | |
| 628 must be used as part of recycling to remove the related instance from the | |
| 629 waiting_list. A hash table (STL map) could be used, but I was embarrased to | |
| 630 use a complex and relatively low efficiency container when a doubly linked list | |
| 631 provided O(1) performance in all required operations. Since other operations | |
| 632 to provide performance-and/or-fairness required queue (FIFO) and list (LIFO) | |
| 633 containers, I would also have needed to use an STL list/queue as well as an STL | |
| 634 map. In the end I decided it would be "fun" to just do it right, and I | |
| 635 put so many assertions (DCHECKs) into the container class that it is trivial to | |
| 636 code review and validate its correctness. | |
| 637 | |
| 638 */ | |
| 639 | |
| 640 ConditionVariable::ConditionVariable(Lock* user_lock) | |
| 641 : impl_(NULL) { | |
| 642 static bool use_vista_native_cv = BindVistaCondVarFunctions(); | |
| 643 if (use_vista_native_cv) | |
| 644 impl_= new WinVistaCondVar(user_lock); | |
| 645 else | |
| 646 impl_ = new WinXPCondVar(user_lock); | |
| 647 } | |
| 648 | |
| 649 ConditionVariable::~ConditionVariable() { | |
| 650 delete impl_; | |
| 651 } | |
| 652 | |
| 653 void ConditionVariable::Wait() { | |
| 654 impl_->Wait(); | |
| 655 } | |
| 656 | |
| 657 void ConditionVariable::TimedWait(const TimeDelta& max_time) { | |
| 658 impl_->TimedWait(max_time); | |
| 659 } | |
| 660 | |
| 661 void ConditionVariable::Broadcast() { | |
| 662 impl_->Broadcast(); | |
| 663 } | |
| 664 | |
| 665 void ConditionVariable::Signal() { | |
| 666 impl_->Signal(); | |
| 667 } | |
| 668 | |
| 669 } // namespace base | |
| OLD | NEW |