Index: mojo/edk/system/waitable_event.cc |
diff --git a/mojo/edk/util/waitable_event.cc b/mojo/edk/system/waitable_event.cc |
similarity index 78% |
rename from mojo/edk/util/waitable_event.cc |
rename to mojo/edk/system/waitable_event.cc |
index 405f32e6ce09853005f4195faac0821aa0b4572d..c05d038e0e45e49d1980da91a41fabf17388a614 100644 |
--- a/mojo/edk/util/waitable_event.cc |
+++ b/mojo/edk/system/waitable_event.cc |
@@ -2,34 +2,20 @@ |
// Use of this source code is governed by a BSD-style license that can be |
// found in the LICENSE file. |
-#include "mojo/edk/util/waitable_event.h" |
+#include "mojo/edk/system/waitable_event.h" |
-#include <errno.h> |
-#include <time.h> |
+#include "base/logging.h" |
+#include "base/time/time.h" |
-#include "mojo/edk/util/logging_internal.h" |
+using mojo::util::CondVar; |
+using mojo::util::Mutex; |
+using mojo::util::MutexLocker; |
namespace mojo { |
-namespace util { |
+namespace system { |
namespace { |
-// Returns the number of microseconds elapsed since epoch start (according to a |
-// monotonic clock). |
-uint64_t Now() { |
- const uint64_t kMicrosecondsPerSecond = 1000000ULL; |
- const uint64_t kNanosecondsPerMicrosecond = 1000ULL; |
- |
- struct timespec now; |
- int error = clock_gettime(CLOCK_MONOTONIC, &now); |
- INTERNAL_DCHECK_WITH_ERRNO(!error, "clock_gettime", errno); |
- INTERNAL_DCHECK(now.tv_sec >= 0); |
- INTERNAL_DCHECK(now.tv_nsec >= 0); |
- |
- return static_cast<uint64_t>(now.tv_sec) * kMicrosecondsPerSecond + |
- static_cast<uint64_t>(now.tv_nsec) / kNanosecondsPerMicrosecond; |
-} |
- |
// Waits with a timeout on |condition()|. Returns true on timeout, or false if |
// |condition()| ever returns true. |condition()| should have no side effects |
// (and will always be called with |*mutex| held). |
@@ -46,7 +32,7 @@ bool WaitWithTimeoutImpl(Mutex* mutex, |
// We may get spurious wakeups. |
uint64_t wait_remaining = timeout_microseconds; |
- uint64_t start = Now(); |
+ auto start = base::TimeTicks::Now(); |
while (true) { |
if (cv->WaitWithTimeout(mutex, wait_remaining)) |
return true; // Definitely timed out. |
@@ -56,9 +42,9 @@ bool WaitWithTimeoutImpl(Mutex* mutex, |
return false; |
// Or the wakeup may have been spurious. |
- uint64_t now = Now(); |
- INTERNAL_DCHECK(now >= start); |
- uint64_t elapsed = now - start; |
+ auto now = base::TimeTicks::Now(); |
+ DCHECK_GE(now, start); |
+ uint64_t elapsed = static_cast<uint64_t>((now - start).InMicroseconds()); |
// It's possible that we may have timed out anyway. |
if (elapsed >= timeout_microseconds) |
return true; |
@@ -100,7 +86,7 @@ bool AutoResetWaitableEvent::WaitWithTimeout(uint64_t timeout_microseconds) { |
// We may get spurious wakeups. |
uint64_t wait_remaining = timeout_microseconds; |
- uint64_t start = Now(); |
+ auto start = base::TimeTicks::Now(); |
while (true) { |
if (cv_.WaitWithTimeout(&mutex_, wait_remaining)) |
return true; // Definitely timed out. |
@@ -110,9 +96,9 @@ bool AutoResetWaitableEvent::WaitWithTimeout(uint64_t timeout_microseconds) { |
break; |
// Or the wakeup may have been spurious. |
- uint64_t now = Now(); |
- INTERNAL_DCHECK(now >= start); |
- uint64_t elapsed = now - start; |
+ auto now = base::TimeTicks::Now(); |
+ DCHECK_GE(now, start); |
+ uint64_t elapsed = static_cast<uint64_t>((now - start).InMicroseconds()); |
// It's possible that we may have timed out anyway. |
if (elapsed >= timeout_microseconds) |
return true; |
@@ -169,7 +155,7 @@ bool ManualResetWaitableEvent::WaitWithTimeout(uint64_t timeout_microseconds) { |
// Also check |signaled_| in case we're already signaled. |
return signaled_ || signal_id_ != last_signal_id; |
}, timeout_microseconds); |
- INTERNAL_DCHECK(rv || signaled_ || signal_id_ != last_signal_id); |
+ DCHECK(rv || signaled_ || signal_id_ != last_signal_id); |
return rv; |
} |
@@ -178,5 +164,5 @@ bool ManualResetWaitableEvent::IsSignaledForTest() { |
return signaled_; |
} |
-} // namespace util |
+} // namespace system |
} // namespace mojo |