Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 /* Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 * Use of this source code is governed by a BSD-style license that can be | 2 * Use of this source code is governed by a BSD-style license that can be |
| 3 * found in the LICENSE file. | 3 * found in the LICENSE file. |
| 4 */ | 4 */ |
| 5 | 5 |
| 6 #include <errno.h> | 6 #include <errno.h> |
| 7 #include <fcntl.h> | 7 #include <fcntl.h> |
| 8 #include <stdio.h> | 8 #include <stdio.h> |
| 9 #include <sys/ioctl.h> | 9 #include <sys/ioctl.h> |
| 10 #include <sys/stat.h> | 10 #include <sys/stat.h> |
| 11 #include <sys/time.h> | 11 #include <sys/time.h> |
| 12 | 12 |
| 13 #include "gtest/gtest.h" | 13 #include "gtest/gtest.h" |
| 14 | 14 |
| 15 #include "nacl_io/event_emitter.h" | 15 #include "nacl_io/event_emitter.h" |
| 16 #include "nacl_io/event_listener.h" | 16 #include "nacl_io/event_listener.h" |
| 17 #include "nacl_io/kernel_intercept.h" | 17 #include "nacl_io/kernel_intercept.h" |
| 18 #include "nacl_io/kernel_proxy.h" | 18 #include "nacl_io/kernel_proxy.h" |
| 19 #include "nacl_io/kernel_wrap.h" | 19 #include "nacl_io/kernel_wrap.h" |
| 20 #include "nacl_io/mount_node_pipe.h" | |
| 21 #include "nacl_io/mount_stream.h" | |
| 22 | |
| 23 #include "ppapi_simple/ps.h" | |
| 20 | 24 |
| 21 | 25 |
| 22 using namespace nacl_io; | 26 using namespace nacl_io; |
| 23 using namespace sdk_util; | 27 using namespace sdk_util; |
| 24 | 28 |
| 25 class EventEmitterTester : public MountNode { | 29 |
| 30 class EventListenerTester : public EventListener { | |
| 26 public: | 31 public: |
| 27 EventEmitterTester() : MountNode(NULL), event_status_(0), event_cnt_(0) {} | 32 EventListenerTester() : EventListener(), events_(0) {}; |
| 28 | 33 |
| 29 void SetEventStatus(uint32_t bits) { event_status_ = bits; } | 34 virtual void ReceiveEvents(EventEmitter* emitter, uint32_t events) { |
| 30 uint32_t GetEventStatus() { return event_status_; } | 35 events_ |= events; |
| 31 | 36 } |
| 32 Error Ioctl(int request, char* arg) { | 37 |
| 33 event_status_ = static_cast<uint32_t>(request); | 38 uint32_t Events() { |
| 34 return 0; | 39 return events_; |
| 35 } | 40 } |
| 36 | 41 |
| 37 int GetType() { return S_IFSOCK; } | 42 void Clear() { |
| 38 int NumEvents() { return event_cnt_; } | 43 events_ = 0; |
| 39 | 44 } |
| 45 | |
| 46 uint32_t events_; | |
| 47 }; | |
| 48 | |
| 49 class EventEmitterTester : public EventEmitter { | |
| 40 public: | 50 public: |
| 41 // Make this function public for testing | 51 void RegisterListener(EventListener* listener, uint32_t events) { |
|
binji
2013/09/12 01:47:57
using EventEmitter::RegisterListener;
This makes
noelallen1
2013/09/12 23:19:03
Done.
| |
| 42 void RaiseEvent(uint32_t events) { | 52 EventEmitter::RegisterListener(listener, events); |
| 43 EventEmitter::RaiseEvent(events); | 53 } |
| 44 } | 54 void UnregisterListener(EventListener* listener) { |
|
binji
2013/09/12 01:47:57
ditto
noelallen1
2013/09/12 23:19:03
Done.
| |
| 45 | 55 EventEmitter::UnregisterListener(listener); |
| 46 // Called after registering locally, but while lock is still held. | 56 } |
| 47 void ChainRegisterEventInfo(const ScopedEventInfo& event) { | 57 |
| 48 event_cnt_++; | 58 uint32_t GetEventStatus() { return 0; } |
| 49 } | 59 |
| 50 | 60 void Raise(uint32_t events) { |
| 51 // Called before unregistering locally, but while lock is still held. | 61 AUTO_LOCK(emitter_lock_); |
| 52 void ChainUnregisterEventInfo(const ScopedEventInfo& event) { | 62 RaiseEvents_Locked(events); |
| 53 event_cnt_--; | 63 } |
| 54 } | 64 }; |
| 55 | 65 |
| 56 protected: | 66 TEST(Emitter, Basic) { |
| 57 uint32_t event_status_; | 67 EventListenerTester listenerA; |
|
binji
2013/09/12 01:47:57
nit: listener_a
noelallen1
2013/09/12 23:19:03
Done.
| |
| 58 uint32_t event_cnt_; | 68 EventListenerTester listenerB; |
| 59 }; | 69 EventEmitterTester emitter; |
| 60 | 70 |
| 61 | 71 emitter.RegisterListener(&listenerA, POLLIN | POLLOUT | POLLERR); |
| 62 const int MAX_EVENTS = 8; | 72 emitter.RegisterListener(&listenerB, POLLIN | POLLOUT | POLLERR); |
| 63 | 73 |
| 64 // IDs for Emitters | 74 EXPECT_EQ(0, emitter.GetEventStatus()); |
| 65 const int ID_EMITTER = 5; | 75 EXPECT_EQ(0, listenerA.Events()); |
| 66 const int ID_LISTENER = 6; | 76 |
| 67 const int ID_EMITTER_DUP = 7; | 77 emitter.Raise(POLLIN); |
| 68 | 78 EXPECT_EQ(POLLIN, listenerA.Events()); |
| 69 // Kernel Event values | 79 |
| 70 const uint32_t KE_EXPECTED = 4; | 80 listenerA.Clear(); |
| 71 const uint32_t KE_FILTERED = 2; | 81 |
| 72 const uint32_t KE_NONE = 0; | 82 emitter.Raise(POLLOUT); |
| 73 | 83 EXPECT_EQ(POLLOUT, listenerA.Events()); |
| 74 // User Data values | 84 EXPECT_EQ(POLLIN | POLLOUT, listenerB.Events()); |
| 75 const uint64_t USER_DATA_A = 1; | 85 } |
| 76 const uint64_t USER_DATA_B = 5; | 86 |
| 77 | 87 |
| 78 // Timeout durations | 88 TEST(PipeTest, Listener) { |
| 79 const int TIMEOUT_IMMEDIATE = 0; | 89 const char hello[] = "Hello World."; |
| 80 const int TIMEOUT_SHORT= 100; | 90 char tmp[64] = "Goodbye"; |
| 81 const int TIMEOUT_LONG = 500; | 91 |
| 82 const int TIMEOUT_NEVER = -1; | 92 EventEmitterPipe* pipe = new EventEmitterPipe(32); |
|
binji
2013/09/12 01:47:57
If you're going to ignore the ref-count, why not j
noelallen1
2013/09/12 23:19:03
Done.
| |
| 83 const int TIMEOUT_VERY_LONG = 1000; | 93 |
| 84 | 94 // Expect to time out on input. |
| 85 // We subtract TIMEOUT_SLOP from the expected minimum timed due to rounding | 95 { |
| 86 // and clock drift converting between absolute and relative time. This should | 96 EventListenerLock locker(pipe); |
| 87 // only be 1 for Less Than, and 1 for rounding, but we use 10 since we don't | 97 EXPECT_EQ(ETIMEDOUT, locker.WaitOnEvent(POLLIN, 0)); |
| 88 // care about real precision, aren't testing of the underlying | 98 } |
| 89 // implementations and don't want flakiness. | 99 |
| 90 const int TIMEOUT_SLOP = 10; | 100 // Output should be ready to go. |
| 91 | 101 { |
| 92 TEST(EventTest, EmitterBasic) { | 102 EventListenerLock locker(pipe); |
| 93 ScopedRef<EventEmitterTester> emitter(new EventEmitterTester()); | 103 EXPECT_EQ(0, locker.WaitOnEvent(POLLOUT, 0)); |
| 94 ScopedRef<EventEmitter> null_emitter; | 104 EXPECT_EQ(sizeof(hello), pipe->Write_Locked(hello, sizeof(hello))); |
| 95 | 105 } |
| 96 ScopedEventListener listener(new EventListener); | 106 |
| 97 | 107 // We should now be able to poll |
| 98 // Verify construction | 108 { |
| 99 EXPECT_EQ(0, emitter->NumEvents()); | 109 EventListenerLock locker(pipe); |
| 100 EXPECT_EQ(0, emitter->GetEventStatus()); | 110 EXPECT_EQ(0, locker.WaitOnEvent(POLLIN, 0)); |
| 101 | 111 EXPECT_EQ(sizeof(hello), pipe->Read_Locked(tmp, sizeof(tmp))); |
| 102 // Verify status | 112 } |
| 103 emitter->SetEventStatus(KE_EXPECTED); | 113 |
| 104 EXPECT_EQ(KE_EXPECTED, emitter->GetEventStatus()); | 114 // Verify we can read it correctly. |
| 105 | 115 EXPECT_EQ(0, strcmp(hello, tmp)); |
| 106 // Fail to update or free an ID not in the set | 116 |
| 107 EXPECT_EQ(ENOENT, listener->Update(ID_EMITTER, KE_EXPECTED, USER_DATA_A)); | 117 delete pipe; |
| 108 EXPECT_EQ(ENOENT, listener->Free(ID_EMITTER)); | 118 } |
| 109 | 119 |
| 110 // Fail to Track self | 120 |
| 111 EXPECT_EQ(EINVAL, listener->Track(ID_LISTENER, | 121 class TestMountStream : public MountStream { |
| 112 listener, | 122 public: |
|
binji
2013/09/12 01:47:57
nit: 1 space
noelallen1
2013/09/12 23:19:03
Done.
| |
| 113 KE_EXPECTED, | 123 TestMountStream() {}; |
|
binji
2013/09/12 01:47:57
2 spaces
binji
2013/09/12 01:47:57
remove ;
noelallen1
2013/09/12 23:19:03
Done.
noelallen1
2013/09/12 23:19:03
Done.
| |
| 114 USER_DATA_A)); | 124 }; |
| 115 | 125 |
| 116 // Set the emitter filter and data | 126 TEST(PipeNodeTest, Basic) { |
| 117 EXPECT_EQ(0, listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A)); | 127 ScopedMount mnt(new TestMountStream()); |
| 118 EXPECT_EQ(1, emitter->NumEvents()); | 128 |
| 119 | 129 MountNodePipe* pipe_node = new MountNodePipe(mnt.get()); |
| 120 // Fail to add the same ID | 130 ScopedRef<MountNodePipe> pipe(pipe_node); |
| 121 EXPECT_EQ(EEXIST, | 131 |
| 122 listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A)); | 132 EXPECT_EQ(POLLOUT, pipe_node->GetEventStatus()); |
| 123 EXPECT_EQ(1, emitter->NumEvents()); | 133 } |
| 124 | 134 |
| 125 int event_cnt = 0; | 135 |
| 126 EventData ev[MAX_EVENTS]; | 136 |
| 127 | 137 #define MAX_FDS 32 |
|
binji
2013/09/12 01:47:57
use const instead of define
noelallen1
2013/09/12 23:19:03
Done.
| |
| 128 // Do not allow a wait with a zero events count. | 138 class SelectPollTest : public ::testing::Test { |
| 129 EXPECT_EQ(EINVAL, listener->Wait(ev, 0, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 130 | |
| 131 // Do not allow a wait with a negative events count. | |
| 132 EXPECT_EQ(EINVAL, listener->Wait(ev, -1, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 133 | |
| 134 // Do not allow a wait with a NULL EventData pointer | |
| 135 EXPECT_EQ(EFAULT, | |
| 136 listener->Wait(NULL, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 137 | |
| 138 // Return with no events if the Emitter has no signals set. | |
| 139 memset(ev, 0, sizeof(ev)); | |
| 140 event_cnt = 100; | |
| 141 emitter->SetEventStatus(KE_NONE); | |
| 142 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 143 EXPECT_EQ(0, event_cnt); | |
| 144 | |
| 145 // Return with no events if the Emitter has a filtered signals set. | |
| 146 memset(ev, 0, sizeof(ev)); | |
| 147 event_cnt = 100; | |
| 148 emitter->SetEventStatus(KE_FILTERED); | |
| 149 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 150 EXPECT_EQ(0, event_cnt); | |
| 151 | |
| 152 // Return with one event if the Emitter has the expected signal set. | |
| 153 memset(ev, 0, sizeof(ev)); | |
| 154 event_cnt = 100; | |
| 155 emitter->SetEventStatus(KE_EXPECTED); | |
| 156 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 157 EXPECT_EQ(1, event_cnt); | |
| 158 EXPECT_EQ(USER_DATA_A, ev[0].user_data); | |
| 159 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 160 | |
| 161 // Return with one event containing only the expected signal. | |
| 162 memset(ev, 0, sizeof(ev)); | |
| 163 event_cnt = 100; | |
| 164 emitter->SetEventStatus(KE_EXPECTED | KE_FILTERED); | |
| 165 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 166 EXPECT_EQ(1, event_cnt); | |
| 167 EXPECT_EQ(USER_DATA_A, ev[0].user_data); | |
| 168 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 169 | |
| 170 // Change the USER_DATA on an existing event | |
| 171 EXPECT_EQ(0, listener->Update(ID_EMITTER, KE_EXPECTED, USER_DATA_B)); | |
| 172 | |
| 173 // Return with one event signaled with the alternate USER DATA | |
| 174 memset(ev, 0, sizeof(ev)); | |
| 175 event_cnt = 100; | |
| 176 emitter->SetEventStatus(KE_EXPECTED | KE_FILTERED); | |
| 177 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, 0, &event_cnt)); | |
| 178 EXPECT_EQ(1, event_cnt); | |
| 179 EXPECT_EQ(USER_DATA_B, ev[0].user_data); | |
| 180 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 181 | |
| 182 // Reset the USER_DATA. | |
| 183 EXPECT_EQ(0, listener->Update(ID_EMITTER, KE_EXPECTED, USER_DATA_A)); | |
| 184 | |
| 185 // Support adding a DUP. | |
| 186 EXPECT_EQ(0, listener->Track(ID_EMITTER_DUP, | |
| 187 emitter, | |
| 188 KE_EXPECTED, | |
| 189 USER_DATA_A)); | |
| 190 EXPECT_EQ(2, emitter->NumEvents()); | |
| 191 | |
| 192 // Return unsignaled. | |
| 193 memset(ev, 0, sizeof(ev)); | |
| 194 emitter->SetEventStatus(KE_NONE); | |
| 195 event_cnt = 100; | |
| 196 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 197 EXPECT_EQ(0, event_cnt); | |
| 198 | |
| 199 // Return with two event signaled with expected data. | |
| 200 memset(ev, 0, sizeof(ev)); | |
| 201 emitter->SetEventStatus(KE_EXPECTED); | |
| 202 event_cnt = 100; | |
| 203 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 204 EXPECT_EQ(2, event_cnt); | |
| 205 EXPECT_EQ(USER_DATA_A, ev[0].user_data); | |
| 206 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 207 EXPECT_EQ(USER_DATA_A, ev[1].user_data); | |
| 208 EXPECT_EQ(KE_EXPECTED, ev[1].events); | |
| 209 } | |
| 210 | |
| 211 long Duration(struct timeval* start, struct timeval* end) { | |
| 212 if (start->tv_usec > end->tv_usec) { | |
| 213 end->tv_sec -= 1; | |
| 214 end->tv_usec += 1000000; | |
| 215 } | |
| 216 long cur_time = 1000 * (end->tv_sec - start->tv_sec); | |
| 217 cur_time += (end->tv_usec - start->tv_usec) / 1000; | |
| 218 return cur_time; | |
| 219 } | |
| 220 | |
| 221 | |
| 222 // Run a timed wait, and return the average of 8 iterations to reduce | |
| 223 // chance of false negative on outlier. | |
| 224 const int TRIES_TO_AVERAGE = 8; | |
| 225 bool TimedListen(ScopedEventListener& listen, | |
| 226 EventData* ev, | |
| 227 int ev_max, | |
| 228 int ev_expect, | |
| 229 int ms_wait, | |
| 230 long* duration) { | |
| 231 | |
| 232 struct timeval start; | |
| 233 struct timeval end; | |
| 234 long total_time = 0; | |
| 235 | |
| 236 for (int a=0; a < TRIES_TO_AVERAGE; a++) { | |
| 237 gettimeofday(&start, NULL); | |
| 238 | |
| 239 int signaled; | |
| 240 | |
| 241 EXPECT_EQ(0, listen->Wait(ev, ev_max, ms_wait, &signaled)); | |
| 242 EXPECT_EQ(signaled, ev_expect); | |
| 243 | |
| 244 if (signaled != ev_expect) { | |
| 245 return false; | |
| 246 } | |
| 247 | |
| 248 gettimeofday(&end, NULL); | |
| 249 | |
| 250 long cur_time = Duration(&start, &end); | |
| 251 total_time += cur_time; | |
| 252 } | |
| 253 | |
| 254 *duration = total_time / TRIES_TO_AVERAGE; | |
| 255 return true; | |
| 256 } | |
| 257 | |
| 258 | |
| 259 // NOTE: These timing tests are potentially flaky, the real test is | |
| 260 // for the zero timeout should be, has the ConditionVariable been waited on? | |
| 261 // Once we provide a debuggable SimpleCond and SimpleLock we can actually test | |
| 262 // the correct thing. | |
| 263 | |
| 264 // Normal scheduling would expect us to see ~10ms accuracy, but we'll | |
| 265 // use a much bigger number (yet smaller than the MAX_MS_TIMEOUT). | |
| 266 const int SCHEDULING_GRANULARITY = 100; | |
| 267 | |
| 268 const int EXPECT_ONE_EVENT = 1; | |
| 269 const int EXPECT_NO_EVENT = 0; | |
| 270 | |
| 271 TEST(EventTest, EmitterTimeout) { | |
| 272 ScopedRef<EventEmitterTester> emitter(new EventEmitterTester()); | |
| 273 ScopedEventListener listener(new EventListener()); | |
| 274 long duration; | |
| 275 | |
| 276 EventData ev[MAX_EVENTS]; | |
| 277 memset(ev, 0, sizeof(ev)); | |
| 278 EXPECT_EQ(0, listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A)); | |
| 279 | |
| 280 // Return immediately when emitter is signaled, with no timeout | |
| 281 emitter->SetEventStatus(KE_EXPECTED); | |
| 282 memset(ev, 0, sizeof(ev)); | |
| 283 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_ONE_EVENT, | |
| 284 TIMEOUT_IMMEDIATE, &duration)); | |
| 285 EXPECT_EQ(USER_DATA_A, ev[0].user_data); | |
| 286 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 287 EXPECT_EQ(0, duration); | |
| 288 | |
| 289 // Return immediately when emitter is signaled, even with timeout | |
| 290 emitter->SetEventStatus(KE_EXPECTED); | |
| 291 memset(ev, 0, sizeof(ev)); | |
| 292 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_ONE_EVENT, | |
| 293 TIMEOUT_LONG, &duration)); | |
| 294 EXPECT_EQ(USER_DATA_A, ev[0].user_data); | |
| 295 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 296 EXPECT_GT(SCHEDULING_GRANULARITY, duration); | |
| 297 | |
| 298 // Return immediately if Emiiter is already signaled when blocking forever. | |
| 299 emitter->SetEventStatus(KE_EXPECTED); | |
| 300 memset(ev, 0, sizeof(ev)); | |
| 301 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_ONE_EVENT, | |
| 302 TIMEOUT_NEVER, &duration)); | |
| 303 EXPECT_EQ(USER_DATA_A, ev[0].user_data); | |
| 304 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 305 EXPECT_GT(SCHEDULING_GRANULARITY, duration); | |
| 306 | |
| 307 // Return immediately if Emitter is no signaled when not blocking. | |
| 308 emitter->SetEventStatus(KE_NONE); | |
| 309 memset(ev, 0, sizeof(ev)); | |
| 310 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_NO_EVENT, | |
| 311 TIMEOUT_IMMEDIATE, &duration)); | |
| 312 EXPECT_EQ(0, duration); | |
| 313 | |
| 314 // Wait TIMEOUT_LONG if the emitter is not in a signaled state. | |
| 315 emitter->SetEventStatus(KE_NONE); | |
| 316 memset(ev, 0, sizeof(ev)); | |
| 317 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_NO_EVENT, | |
| 318 TIMEOUT_LONG, &duration)); | |
| 319 EXPECT_LT(TIMEOUT_LONG - TIMEOUT_SLOP, duration); | |
| 320 EXPECT_GT(TIMEOUT_LONG + SCHEDULING_GRANULARITY, duration); | |
| 321 } | |
| 322 | |
| 323 struct SignalInfo { | |
| 324 EventEmitterTester* em; | |
| 325 unsigned int ms_wait; | |
| 326 uint32_t events; | |
| 327 }; | |
| 328 | |
| 329 static void *SignalEmitterThread(void *ptr) { | |
| 330 SignalInfo* info = (SignalInfo*) ptr; | |
| 331 struct timespec ts; | |
| 332 ts.tv_sec = 0; | |
| 333 ts.tv_nsec = info->ms_wait * 1000000; | |
| 334 | |
| 335 nanosleep(&ts, NULL); | |
| 336 | |
| 337 info->em->RaiseEvent(info->events); | |
| 338 return NULL; | |
| 339 } | |
| 340 | |
| 341 TEST(EventTest, EmitterSignalling) { | |
| 342 ScopedRef<EventEmitterTester> emitter(new EventEmitterTester()); | |
| 343 ScopedEventListener listener(new EventListener); | |
| 344 | |
| 345 SignalInfo siginfo; | |
| 346 struct timeval start; | |
| 347 struct timeval end; | |
| 348 long duration; | |
| 349 | |
| 350 EventData ev[MAX_EVENTS]; | |
| 351 memset(ev, 0, sizeof(ev)); | |
| 352 EXPECT_EQ(0, listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A)); | |
| 353 | |
| 354 // Setup another thread to wait 1/4 of the max time, and signal both | |
| 355 // an expected, and unexpected value. | |
| 356 siginfo.em = emitter.get(); | |
| 357 siginfo.ms_wait = TIMEOUT_SHORT; | |
| 358 siginfo.events = KE_EXPECTED | KE_FILTERED; | |
| 359 pthread_t tid; | |
| 360 pthread_create(&tid, NULL, SignalEmitterThread, &siginfo); | |
| 361 | |
| 362 // Wait for the signal from the other thread and time it. | |
| 363 gettimeofday(&start, NULL); | |
| 364 int cnt = 0; | |
| 365 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_VERY_LONG, &cnt)); | |
| 366 EXPECT_EQ(1, cnt); | |
| 367 gettimeofday(&end, NULL); | |
| 368 | |
| 369 // Verify the wait duration, and that we only recieved the expected signal. | |
| 370 duration = Duration(&start, &end); | |
| 371 EXPECT_GT(TIMEOUT_SHORT + SCHEDULING_GRANULARITY, duration); | |
| 372 EXPECT_LT(TIMEOUT_SHORT - TIMEOUT_SLOP, duration); | |
| 373 EXPECT_EQ(USER_DATA_A, ev[0].user_data); | |
| 374 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 375 } | |
| 376 | |
| 377 | |
| 378 namespace { | |
| 379 | |
| 380 class KernelProxyPolling : public KernelProxy { | |
| 381 public: | |
| 382 virtual int socket(int domain, int type, int protocol) { | |
| 383 ScopedMount mnt; | |
| 384 ScopedMountNode node(new EventEmitterTester()); | |
| 385 ScopedKernelHandle handle(new KernelHandle(mnt, node)); | |
| 386 | |
| 387 Error error = handle->Init(0); | |
| 388 if (error) { | |
| 389 errno = error; | |
| 390 return -1; | |
| 391 } | |
| 392 | |
| 393 return AllocateFD(handle); | |
| 394 } | |
| 395 }; | |
| 396 | |
| 397 class KernelProxyPollingTest : public ::testing::Test { | |
| 398 public: | 139 public: |
| 399 void SetUp() { | 140 void SetUp() { |
| 400 ki_init(&kp_); | 141 kp = new KernelProxy(); |
| 142 kp->Init(NULL); | |
| 143 EXPECT_EQ(0, kp->umount("/")); | |
| 144 EXPECT_EQ(0, kp->mount("", "/", "memfs", 0, NULL)); | |
| 145 | |
| 146 memset(&tv, 0, sizeof(tv)); | |
| 401 } | 147 } |
| 402 | 148 |
| 403 void TearDown() { | 149 void TearDown() { |
| 404 ki_uninit(); | 150 delete kp; |
| 151 } | |
| 152 | |
| 153 void SetFDs(int* fds, int cnt) { | |
| 154 FD_ZERO(&rd_set); | |
| 155 FD_ZERO(&wr_set); | |
| 156 FD_ZERO(&ex_set); | |
| 157 | |
| 158 for (int index = 0; index < cnt; index++) { | |
| 159 EXPECT_NE(-1, fds[index]); | |
| 160 FD_SET(fds[index], &rd_set); | |
| 161 FD_SET(fds[index], &wr_set); | |
| 162 FD_SET(fds[index], &ex_set); | |
| 163 | |
| 164 pollfds[index].fd = fds[0]; | |
|
binji
2013/09/12 01:47:57
fds[index]?
noelallen1
2013/09/12 23:19:03
doh!
| |
| 165 pollfds[index].events = POLLIN | POLLOUT; | |
| 166 pollfds[index].revents = -1; | |
| 167 } | |
| 168 } | |
| 169 | |
| 170 void CloseFDs(int* fds, int cnt) { | |
| 171 for (int index = 0; index < cnt; index++) | |
| 172 kp->close(fds[index]); | |
| 405 } | 173 } |
| 406 | 174 |
| 407 protected: | 175 protected: |
| 408 KernelProxyPolling kp_; | 176 KernelProxy* kp; |
| 409 }; | 177 |
| 410 | 178 timeval tv; |
| 411 } // namespace | |
| 412 | |
| 413 | |
| 414 #define SOCKET_CNT 4 | |
| 415 void SetFDs(fd_set* set, int* fds) { | |
| 416 FD_ZERO(set); | |
| 417 | |
| 418 FD_SET(0, set); | |
| 419 FD_SET(1, set); | |
| 420 FD_SET(2, set); | |
| 421 | |
| 422 for (int index = 0; index < SOCKET_CNT; index++) | |
| 423 FD_SET(fds[index], set); | |
| 424 } | |
| 425 | |
| 426 TEST_F(KernelProxyPollingTest, Select) { | |
| 427 int fds[SOCKET_CNT]; | |
| 428 | |
| 429 fd_set rd_set; | 179 fd_set rd_set; |
| 430 fd_set wr_set; | 180 fd_set wr_set; |
| 431 | 181 fd_set ex_set; |
| 432 FD_ZERO(&rd_set); | 182 struct pollfd pollfds[MAX_FDS]; |
| 433 FD_ZERO(&wr_set); | 183 }; |
| 434 | 184 |
| 435 FD_SET(0, &rd_set); | 185 TEST_F(SelectPollTest, PollMemPipe) { |
| 436 FD_SET(1, &rd_set); | 186 int fds[2]; |
| 437 FD_SET(2, &rd_set); | 187 |
| 438 | 188 // Both FDs for regular files should be read/write but not exception. |
| 439 FD_SET(0, &wr_set); | 189 fds[0] = kp->open("/test.txt", O_CREAT | O_WRONLY); |
| 440 FD_SET(1, &wr_set); | 190 fds[1] = kp->open("/test.txt", O_RDONLY); |
| 441 FD_SET(2, &wr_set); | 191 |
| 442 | 192 SetFDs(fds, 2); |
| 443 // Expect normal files to select as read, write, and error | 193 |
| 444 int cnt = select(4, &rd_set, &rd_set, &rd_set, NULL); | 194 EXPECT_EQ(2, kp->poll(pollfds, 2, 0)); |
| 445 EXPECT_EQ(3 * 3, cnt); | 195 EXPECT_EQ(POLLIN | POLLOUT, pollfds[0].revents); |
| 446 EXPECT_NE(0, FD_ISSET(0, &rd_set)); | 196 EXPECT_EQ(POLLIN | POLLOUT, pollfds[1].revents); |
| 447 EXPECT_NE(0, FD_ISSET(1, &rd_set)); | 197 CloseFDs(fds, 2); |
| 448 EXPECT_NE(0, FD_ISSET(2, &rd_set)); | 198 |
| 449 | 199 // The write FD should select for write-only, read FD should not select |
| 450 for (int index = 0 ; index < SOCKET_CNT; index++) { | 200 EXPECT_EQ(0, kp->pipe(fds)); |
| 451 fds[index] = socket(0, 0, 0); | 201 SetFDs(fds, 2); |
| 452 EXPECT_NE(-1, fds[index]); | 202 |
| 453 } | 203 EXPECT_EQ(2, kp->poll(pollfds, 2, 0)); |
| 454 | 204 EXPECT_EQ(POLLOUT, pollfds[0].revents); |
| 455 // Highest numbered fd | 205 EXPECT_EQ(POLLOUT, pollfds[1].revents); |
|
binji
2013/09/12 01:47:57
doesn't match comment (probably from fds[index] bu
noelallen1
2013/09/12 23:19:03
Done.
| |
| 456 const int fdnum = fds[SOCKET_CNT - 1] + 1; | 206 |
| 457 | 207 CloseFDs(fds, 2); |
| 458 // Expect only the normal files to select | 208 } |
| 459 SetFDs(&rd_set, fds); | 209 |
| 460 cnt = select(fds[SOCKET_CNT-1] + 1, &rd_set, NULL, NULL, NULL); | 210 TEST_F(SelectPollTest, SelectMemPipe) { |
| 461 EXPECT_EQ(3, cnt); | 211 int fds[2]; |
| 462 EXPECT_NE(0, FD_ISSET(0, &rd_set)); | 212 |
| 463 EXPECT_NE(0, FD_ISSET(1, &rd_set)); | 213 // Both FDs for regular files should be read/write but not exception. |
| 464 EXPECT_NE(0, FD_ISSET(2, &rd_set)); | 214 fds[0] = kp->open("/test.txt", O_CREAT | O_WRONLY); |
| 465 for (int index = 0 ; index < SOCKET_CNT; index++) { | 215 fds[1] = kp->open("/test.txt", O_RDONLY); |
| 466 EXPECT_EQ(0, FD_ISSET(fds[index], &rd_set)); | 216 SetFDs(fds, 2); |
| 467 } | 217 |
| 468 | 218 EXPECT_EQ(4, kp->select(fds[1] + 1, &rd_set, &wr_set, &ex_set, &tv)); |
| 469 // Poke one of the pollable nodes to be READ ready | |
| 470 ioctl(fds[0], POLLIN, NULL); | |
| 471 | |
| 472 // Expect normal files to be read/write and one pollable node to be read. | |
| 473 SetFDs(&rd_set, fds); | |
| 474 SetFDs(&wr_set, fds); | |
| 475 cnt = select(fdnum, &rd_set, &wr_set, NULL, NULL); | |
| 476 EXPECT_EQ(7, cnt); | |
| 477 EXPECT_NE(0, FD_ISSET(fds[0], &rd_set)); | 219 EXPECT_NE(0, FD_ISSET(fds[0], &rd_set)); |
| 478 EXPECT_EQ(0, FD_ISSET(fds[0], &wr_set)); | 220 EXPECT_NE(0, FD_ISSET(fds[1], &rd_set)); |
| 479 } | 221 EXPECT_NE(0, FD_ISSET(fds[0], &wr_set)); |
| 480 | 222 EXPECT_NE(0, FD_ISSET(fds[1], &wr_set)); |
| 481 | 223 EXPECT_EQ(0, FD_ISSET(fds[0], &ex_set)); |
| 224 EXPECT_EQ(0, FD_ISSET(fds[1], &ex_set)); | |
| 225 | |
| 226 CloseFDs(fds, 2); | |
| 227 | |
| 228 // The write FD should select for write-only, read FD should not select | |
| 229 EXPECT_EQ(0, kp->pipe(fds)); | |
| 230 SetFDs(fds, 2); | |
| 231 | |
| 232 EXPECT_EQ(2, kp->select(fds[1] + 1, &rd_set, &wr_set, &ex_set, &tv)); | |
| 233 EXPECT_EQ(0, FD_ISSET(fds[0], &rd_set)); | |
| 234 EXPECT_EQ(0, FD_ISSET(fds[1], &rd_set)); | |
| 235 // EXPECT_EQ(0, FD_ISSET(fds[0], &wr_set)); | |
|
binji
2013/09/12 01:47:57
why?
noelallen1
2013/09/12 23:19:03
Done.
| |
| 236 EXPECT_NE(0, FD_ISSET(fds[1], &wr_set)); | |
| 237 EXPECT_EQ(0, FD_ISSET(fds[0], &ex_set)); | |
| 238 EXPECT_EQ(0, FD_ISSET(fds[1], &ex_set)); | |
| 239 } | |
| 240 | |
| 241 | |
| OLD | NEW |