| OLD | NEW | 
|---|
| 1 /* Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 /* Copyright (c) 2013 The Chromium Authors. All rights reserved. | 
| 2  * Use of this source code is governed by a BSD-style license that can be | 2  * Use of this source code is governed by a BSD-style license that can be | 
| 3  * found in the LICENSE file. | 3  * found in the LICENSE file. | 
| 4  */ | 4  */ | 
| 5 | 5 | 
| 6 #include <errno.h> | 6 #include <errno.h> | 
| 7 #include <fcntl.h> | 7 #include <fcntl.h> | 
|  | 8 #include <pthread.h> | 
| 8 #include <stdio.h> | 9 #include <stdio.h> | 
| 9 #include <sys/ioctl.h> | 10 #include <sys/ioctl.h> | 
| 10 #include <sys/stat.h> | 11 #include <sys/stat.h> | 
| 11 #include <sys/time.h> | 12 #include <sys/time.h> | 
| 12 | 13 | 
| 13 #include "gtest/gtest.h" | 14 #include "gtest/gtest.h" | 
| 14 | 15 | 
| 15 #include "nacl_io/event_emitter.h" | 16 #include "nacl_io/event_emitter.h" | 
| 16 #include "nacl_io/event_listener.h" | 17 #include "nacl_io/event_listener.h" | 
|  | 18 #include "nacl_io/event_listener.h" | 
|  | 19 #include "nacl_io/event_listener.h" | 
| 17 #include "nacl_io/kernel_intercept.h" | 20 #include "nacl_io/kernel_intercept.h" | 
| 18 #include "nacl_io/kernel_proxy.h" | 21 #include "nacl_io/kernel_proxy.h" | 
| 19 #include "nacl_io/kernel_wrap.h" | 22 #include "nacl_io/kernel_wrap.h" | 
|  | 23 #include "nacl_io/mount_node_pipe.h" | 
|  | 24 #include "nacl_io/mount_stream.h" | 
|  | 25 | 
|  | 26 #include "ppapi_simple/ps.h" | 
| 20 | 27 | 
| 21 | 28 | 
| 22 using namespace nacl_io; | 29 using namespace nacl_io; | 
| 23 using namespace sdk_util; | 30 using namespace sdk_util; | 
| 24 | 31 | 
| 25 class EventEmitterTester : public MountNode { | 32 | 
| 26  public: | 33 class EventListenerTester : public EventListener { | 
| 27   EventEmitterTester() : MountNode(NULL), event_status_(0), event_cnt_(0) {} | 34  public: | 
| 28 | 35   EventListenerTester() : EventListener(), events_(0) {}; | 
| 29   void SetEventStatus(uint32_t bits) { event_status_ = bits; } | 36 | 
| 30   uint32_t GetEventStatus() { return event_status_; } | 37   virtual void ReceiveEvents(EventEmitter* emitter, uint32_t events) { | 
| 31 | 38     events_ |= events; | 
| 32   Error Ioctl(int request, char* arg) { | 39   } | 
| 33     event_status_ = static_cast<uint32_t>(request); | 40 | 
| 34     return 0; | 41   uint32_t Events() { | 
| 35   } | 42     return events_; | 
| 36 | 43   } | 
| 37   int GetType() { return S_IFSOCK; } | 44 | 
| 38   int NumEvents() { return event_cnt_; } | 45   void Clear() { | 
| 39 | 46     events_ = 0; | 
| 40  public: | 47   } | 
| 41   // Make this function public for testing | 48 | 
| 42   void RaiseEvent(uint32_t events) { | 49   uint32_t events_; | 
| 43     EventEmitter::RaiseEvent(events); | 50 }; | 
| 44   } | 51 | 
| 45 | 52 | 
| 46     // Called after registering locally, but while lock is still held. | 53 TEST(EmitterBasic, SingleThread) { | 
| 47   void ChainRegisterEventInfo(const ScopedEventInfo& event) { | 54   EventListenerTester listener_a; | 
| 48     event_cnt_++; | 55   EventListenerTester listener_b; | 
| 49   } | 56   EventEmitter emitter; | 
| 50 | 57 | 
| 51   // Called before unregistering locally, but while lock is still held. | 58   emitter.RegisterListener(&listener_a, POLLIN | POLLOUT | POLLERR); | 
| 52   void ChainUnregisterEventInfo(const ScopedEventInfo& event) { | 59   emitter.RegisterListener(&listener_b, POLLIN | POLLOUT | POLLERR); | 
| 53     event_cnt_--; | 60 | 
|  | 61   EXPECT_EQ(0, emitter.GetEventStatus()); | 
|  | 62   EXPECT_EQ(0, listener_a.Events()); | 
|  | 63 | 
|  | 64   { | 
|  | 65     AUTO_LOCK(emitter.GetLock()) | 
|  | 66     emitter.RaiseEvents_Locked(POLLIN); | 
|  | 67   } | 
|  | 68   EXPECT_EQ(POLLIN, listener_a.Events()); | 
|  | 69 | 
|  | 70   listener_a.Clear(); | 
|  | 71 | 
|  | 72   { | 
|  | 73     AUTO_LOCK(emitter.GetLock()) | 
|  | 74     emitter.RaiseEvents_Locked(POLLOUT); | 
|  | 75   } | 
|  | 76   EXPECT_EQ(POLLOUT, listener_a.Events()); | 
|  | 77   EXPECT_EQ(POLLIN | POLLOUT, listener_b.Events()); | 
|  | 78 } | 
|  | 79 | 
|  | 80 class EmitterTest : public ::testing::Test { | 
|  | 81  public: | 
|  | 82   void SetUp() { | 
|  | 83     pthread_cond_init(&multi_cond_, NULL); | 
|  | 84     waiting_ = 0; | 
|  | 85     signaled_ = 0; | 
|  | 86   } | 
|  | 87 | 
|  | 88   void TearDown() { | 
|  | 89     pthread_cond_destroy(&multi_cond_); | 
|  | 90   } | 
|  | 91 | 
|  | 92   void CreateThread() { | 
|  | 93     pthread_t id; | 
|  | 94     EXPECT_EQ(0, pthread_create(&id, NULL, ThreadThunk, this)); | 
|  | 95   } | 
|  | 96 | 
|  | 97   static void* ThreadThunk(void *ptr) { | 
|  | 98     return static_cast<EmitterTest*>(ptr)->ThreadEntry(); | 
|  | 99   } | 
|  | 100 | 
|  | 101   void* ThreadEntry() { | 
|  | 102     EventListenerLock listener(&emitter_); | 
|  | 103 | 
|  | 104     pthread_cond_signal(&multi_cond_); | 
|  | 105     waiting_++; | 
|  | 106     EXPECT_EQ(0, listener.WaitOnEvent(POLLIN, -1)); | 
|  | 107     emitter_.ClearEvents_Locked(POLLIN); | 
|  | 108     signaled_ ++; | 
|  | 109     return NULL; | 
| 54   } | 110   } | 
| 55 | 111 | 
| 56  protected: | 112  protected: | 
| 57   uint32_t event_status_; | 113   pthread_cond_t multi_cond_; | 
| 58   uint32_t event_cnt_; | 114   EventEmitter emitter_; | 
| 59 }; | 115 | 
| 60 | 116   uint32_t waiting_; | 
| 61 | 117   uint32_t signaled_; | 
| 62 const int MAX_EVENTS = 8; | 118 }; | 
| 63 | 119 | 
| 64 // IDs for Emitters | 120 | 
| 65 const int ID_EMITTER = 5; | 121 const int NUM_THREADS = 10; | 
| 66 const int ID_LISTENER = 6; | 122 TEST_F(EmitterTest, MultiThread) { | 
| 67 const int ID_EMITTER_DUP = 7; | 123   for (int a=0; a <NUM_THREADS; a++) | 
| 68 | 124     CreateThread(); | 
| 69 // Kernel Event values | 125 | 
| 70 const uint32_t KE_EXPECTED = 4; | 126   sleep(1); | 
| 71 const uint32_t KE_FILTERED = 2; | 127   EXPECT_EQ(0, signaled_); | 
| 72 const uint32_t KE_NONE = 0; | 128 | 
| 73 | 129   { | 
| 74 // User Data values | 130     // Wait for all threads to wait | 
| 75 const uint64_t USER_DATA_A = 1; | 131     while(waiting_ < NUM_THREADS) | 
| 76 const uint64_t USER_DATA_B = 5; | 132       pthread_cond_wait(&multi_cond_, emitter_.GetLock().mutex()); | 
| 77 | 133 | 
| 78 // Timeout durations | 134     emitter_.RaiseEvents_Locked(POLLIN); | 
| 79 const int TIMEOUT_IMMEDIATE = 0; | 135   } | 
| 80 const int TIMEOUT_SHORT= 100; | 136 | 
| 81 const int TIMEOUT_LONG = 500; | 137   sleep(1); | 
| 82 const int TIMEOUT_NEVER = -1; | 138   EXPECT_EQ(1, signaled_); | 
| 83 const int TIMEOUT_VERY_LONG = 1000; | 139 | 
| 84 | 140   { | 
| 85 // We subtract TIMEOUT_SLOP from the expected minimum timed due to rounding | 141     AUTO_LOCK(emitter_.GetLock()); | 
| 86 // and clock drift converting between absolute and relative time.  This should | 142     emitter_.RaiseEvents_Locked(POLLIN); | 
| 87 // only be 1 for Less Than, and 1 for rounding, but we use 10 since we don't | 143   } | 
| 88 // care about real precision, aren't testing of the underlying | 144 | 
| 89 // implementations and don't want flakiness. | 145   sleep(1); | 
| 90 const int TIMEOUT_SLOP = 10; | 146   EXPECT_EQ(2, signaled_); | 
| 91 | 147 | 
| 92 TEST(EventTest, EmitterBasic) { | 148   // Clean up remaining threads. | 
| 93   ScopedRef<EventEmitterTester> emitter(new EventEmitterTester()); | 149   while (signaled_ < waiting_) { | 
| 94   ScopedRef<EventEmitter> null_emitter; | 150     AUTO_LOCK(emitter_.GetLock()); | 
| 95 | 151 | 
| 96   ScopedEventListener listener(new EventListener); | 152     pthread_cond_wait(&multi_cond_, emitter_.GetLock().mutex()); | 
| 97 | 153     emitter_.RaiseEvents_Locked(POLLIN); | 
| 98   // Verify construction | 154   } | 
| 99   EXPECT_EQ(0, emitter->NumEvents()); | 155 } | 
| 100   EXPECT_EQ(0, emitter->GetEventStatus()); | 156 | 
| 101 | 157 | 
| 102   // Verify status | 158 TEST(PipeTest, Listener) { | 
| 103   emitter->SetEventStatus(KE_EXPECTED); | 159   const char hello[] = "Hello World."; | 
| 104   EXPECT_EQ(KE_EXPECTED, emitter->GetEventStatus()); | 160   char tmp[64] = "Goodbye"; | 
| 105 | 161 | 
| 106   // Fail to update or free an ID not in the set | 162   EventEmitterPipe pipe(32); | 
| 107   EXPECT_EQ(ENOENT, listener->Update(ID_EMITTER, KE_EXPECTED, USER_DATA_A)); | 163 | 
| 108   EXPECT_EQ(ENOENT, listener->Free(ID_EMITTER)); | 164   // Expect to time out on input. | 
| 109 | 165   { | 
| 110   // Fail to Track self | 166     EventListenerLock locker(&pipe); | 
| 111   EXPECT_EQ(EINVAL, listener->Track(ID_LISTENER, | 167     EXPECT_EQ(ETIMEDOUT, locker.WaitOnEvent(POLLIN, 0)); | 
| 112                                     listener, | 168   } | 
| 113                                     KE_EXPECTED, | 169 | 
| 114                                     USER_DATA_A)); | 170   // Output should be ready to go. | 
| 115 | 171   { | 
| 116   // Set the emitter filter and data | 172     EventListenerLock locker(&pipe); | 
| 117   EXPECT_EQ(0, listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A)); | 173     EXPECT_EQ(0, locker.WaitOnEvent(POLLOUT, 0)); | 
| 118   EXPECT_EQ(1, emitter->NumEvents()); | 174     EXPECT_EQ(sizeof(hello), pipe.Write_Locked(hello, sizeof(hello))); | 
| 119 | 175   } | 
| 120   // Fail to add the same ID | 176 | 
| 121   EXPECT_EQ(EEXIST, | 177   // We should now be able to poll | 
| 122             listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A)); | 178   { | 
| 123   EXPECT_EQ(1, emitter->NumEvents()); | 179     EventListenerLock locker(&pipe); | 
| 124 | 180     EXPECT_EQ(0, locker.WaitOnEvent(POLLIN, 0)); | 
| 125   int event_cnt = 0; | 181     EXPECT_EQ(sizeof(hello), pipe.Read_Locked(tmp, sizeof(tmp))); | 
| 126   EventData ev[MAX_EVENTS]; | 182   } | 
| 127 | 183 | 
| 128   // Do not allow a wait with a zero events count. | 184   // Verify we can read it correctly. | 
| 129   EXPECT_EQ(EINVAL, listener->Wait(ev, 0, TIMEOUT_IMMEDIATE, &event_cnt)); | 185   EXPECT_EQ(0, strcmp(hello, tmp)); | 
| 130 | 186 } | 
| 131   // Do not allow a wait with a negative events count. | 187 | 
| 132   EXPECT_EQ(EINVAL, listener->Wait(ev, -1, TIMEOUT_IMMEDIATE, &event_cnt)); | 188 | 
| 133 | 189 class TestMountStream : public MountStream { | 
| 134   // Do not allow a wait with a NULL EventData pointer | 190  public: | 
| 135   EXPECT_EQ(EFAULT, | 191   TestMountStream() {} | 
| 136             listener->Wait(NULL, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | 192 }; | 
| 137 | 193 | 
| 138   // Return with no events if the Emitter has no signals set. | 194 TEST(PipeNodeTest, Basic) { | 
| 139   memset(ev, 0, sizeof(ev)); | 195   ScopedMount mnt(new TestMountStream()); | 
| 140   event_cnt = 100; | 196 | 
| 141   emitter->SetEventStatus(KE_NONE); | 197   MountNodePipe* pipe_node = new MountNodePipe(mnt.get()); | 
| 142   EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | 198   ScopedRef<MountNodePipe> pipe(pipe_node); | 
| 143   EXPECT_EQ(0, event_cnt); | 199 | 
| 144 | 200   EXPECT_EQ(POLLOUT, pipe_node->GetEventStatus()); | 
| 145   // Return with no events if the Emitter has a filtered signals set. | 201 } | 
| 146   memset(ev, 0, sizeof(ev)); | 202 | 
| 147   event_cnt = 100; | 203 const int MAX_FDS = 32; | 
| 148   emitter->SetEventStatus(KE_FILTERED); | 204 class SelectPollTest : public ::testing::Test { | 
| 149   EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | 205  public: | 
| 150   EXPECT_EQ(0, event_cnt); | 206   void SetUp() { | 
| 151 | 207     kp = new KernelProxy(); | 
| 152   // Return with one event if the Emitter has the expected signal set. | 208     kp->Init(NULL); | 
| 153   memset(ev, 0, sizeof(ev)); | 209     EXPECT_EQ(0, kp->umount("/")); | 
| 154   event_cnt = 100; | 210     EXPECT_EQ(0, kp->mount("", "/", "memfs", 0, NULL)); | 
| 155   emitter->SetEventStatus(KE_EXPECTED); | 211 | 
| 156   EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | 212     memset(&tv, 0, sizeof(tv)); | 
| 157   EXPECT_EQ(1, event_cnt); | 213   } | 
| 158   EXPECT_EQ(USER_DATA_A, ev[0].user_data); | 214 | 
| 159   EXPECT_EQ(KE_EXPECTED, ev[0].events); | 215   void TearDown() { | 
| 160 | 216     delete kp; | 
| 161   // Return with one event containing only the expected signal. | 217   } | 
| 162   memset(ev, 0, sizeof(ev)); | 218 | 
| 163   event_cnt = 100; | 219   void SetFDs(int* fds, int cnt) { | 
| 164   emitter->SetEventStatus(KE_EXPECTED | KE_FILTERED); | 220     FD_ZERO(&rd_set); | 
| 165   EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | 221     FD_ZERO(&wr_set); | 
| 166   EXPECT_EQ(1, event_cnt); | 222     FD_ZERO(&ex_set); | 
| 167   EXPECT_EQ(USER_DATA_A, ev[0].user_data); | 223 | 
| 168   EXPECT_EQ(KE_EXPECTED, ev[0].events); | 224     for (int index = 0; index < cnt; index++) { | 
| 169 | 225       EXPECT_NE(-1, fds[index]); | 
| 170   // Change the USER_DATA on an existing event | 226       FD_SET(fds[index], &rd_set); | 
| 171   EXPECT_EQ(0, listener->Update(ID_EMITTER, KE_EXPECTED, USER_DATA_B)); | 227       FD_SET(fds[index], &wr_set); | 
| 172 | 228       FD_SET(fds[index], &ex_set); | 
| 173   // Return with one event signaled with the alternate USER DATA | 229 | 
| 174   memset(ev, 0, sizeof(ev)); | 230       pollfds[index].fd = fds[index]; | 
| 175   event_cnt = 100; | 231       pollfds[index].events = POLLIN | POLLOUT; | 
| 176   emitter->SetEventStatus(KE_EXPECTED | KE_FILTERED); | 232       pollfds[index].revents = -1; | 
| 177   EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, 0, &event_cnt)); |  | 
| 178   EXPECT_EQ(1, event_cnt); |  | 
| 179   EXPECT_EQ(USER_DATA_B, ev[0].user_data); |  | 
| 180   EXPECT_EQ(KE_EXPECTED, ev[0].events); |  | 
| 181 |  | 
| 182   // Reset the USER_DATA. |  | 
| 183   EXPECT_EQ(0, listener->Update(ID_EMITTER, KE_EXPECTED, USER_DATA_A)); |  | 
| 184 |  | 
| 185   // Support adding a DUP. |  | 
| 186   EXPECT_EQ(0, listener->Track(ID_EMITTER_DUP, |  | 
| 187                                emitter, |  | 
| 188                                KE_EXPECTED, |  | 
| 189                                USER_DATA_A)); |  | 
| 190   EXPECT_EQ(2, emitter->NumEvents()); |  | 
| 191 |  | 
| 192   // Return unsignaled. |  | 
| 193   memset(ev, 0, sizeof(ev)); |  | 
| 194   emitter->SetEventStatus(KE_NONE); |  | 
| 195   event_cnt = 100; |  | 
| 196   EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); |  | 
| 197   EXPECT_EQ(0, event_cnt); |  | 
| 198 |  | 
| 199   // Return with two event signaled with expected data. |  | 
| 200   memset(ev, 0, sizeof(ev)); |  | 
| 201   emitter->SetEventStatus(KE_EXPECTED); |  | 
| 202   event_cnt = 100; |  | 
| 203   EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); |  | 
| 204   EXPECT_EQ(2, event_cnt); |  | 
| 205   EXPECT_EQ(USER_DATA_A, ev[0].user_data); |  | 
| 206   EXPECT_EQ(KE_EXPECTED, ev[0].events); |  | 
| 207   EXPECT_EQ(USER_DATA_A, ev[1].user_data); |  | 
| 208   EXPECT_EQ(KE_EXPECTED, ev[1].events); |  | 
| 209 } |  | 
| 210 |  | 
| 211 long Duration(struct timeval* start, struct timeval* end) { |  | 
| 212   if (start->tv_usec > end->tv_usec) { |  | 
| 213     end->tv_sec -= 1; |  | 
| 214     end->tv_usec += 1000000; |  | 
| 215   } |  | 
| 216   long cur_time = 1000 * (end->tv_sec - start->tv_sec); |  | 
| 217   cur_time += (end->tv_usec - start->tv_usec) / 1000; |  | 
| 218   return cur_time; |  | 
| 219 } |  | 
| 220 |  | 
| 221 |  | 
| 222 // Run a timed wait, and return the average of 8 iterations to reduce |  | 
| 223 // chance of false negative on outlier. |  | 
| 224 const int TRIES_TO_AVERAGE = 8; |  | 
| 225 bool TimedListen(ScopedEventListener& listen, |  | 
| 226                  EventData* ev, |  | 
| 227                  int ev_max, |  | 
| 228                  int ev_expect, |  | 
| 229                  int ms_wait, |  | 
| 230                  long* duration) { |  | 
| 231 |  | 
| 232   struct timeval start; |  | 
| 233   struct timeval end; |  | 
| 234   long total_time = 0; |  | 
| 235 |  | 
| 236   for (int a=0; a < TRIES_TO_AVERAGE; a++) { |  | 
| 237     gettimeofday(&start, NULL); |  | 
| 238 |  | 
| 239     int signaled; |  | 
| 240 |  | 
| 241     EXPECT_EQ(0, listen->Wait(ev, ev_max, ms_wait, &signaled)); |  | 
| 242     EXPECT_EQ(signaled, ev_expect); |  | 
| 243 |  | 
| 244     if (signaled != ev_expect) { |  | 
| 245       return false; |  | 
| 246     } | 233     } | 
| 247 | 234   } | 
| 248     gettimeofday(&end, NULL); | 235 | 
| 249 | 236   void CloseFDs(int* fds, int cnt) { | 
| 250     long cur_time = Duration(&start, &end); | 237     for (int index = 0; index < cnt; index++) | 
| 251     total_time += cur_time; | 238       kp->close(fds[index]); | 
| 252   } |  | 
| 253 |  | 
| 254   *duration = total_time / TRIES_TO_AVERAGE; |  | 
| 255   return true; |  | 
| 256 } |  | 
| 257 |  | 
| 258 |  | 
| 259 // NOTE:  These timing tests are potentially flaky, the real test is |  | 
| 260 // for the zero timeout should be, has the ConditionVariable been waited on? |  | 
| 261 // Once we provide a debuggable SimpleCond and SimpleLock we can actually test |  | 
| 262 // the correct thing. |  | 
| 263 |  | 
| 264 // Normal scheduling would expect us to see ~10ms accuracy, but we'll |  | 
| 265 // use a much bigger number (yet smaller than the MAX_MS_TIMEOUT). |  | 
| 266 const int SCHEDULING_GRANULARITY = 100; |  | 
| 267 |  | 
| 268 const int EXPECT_ONE_EVENT = 1; |  | 
| 269 const int EXPECT_NO_EVENT = 0; |  | 
| 270 |  | 
| 271 TEST(EventTest, EmitterTimeout) { |  | 
| 272   ScopedRef<EventEmitterTester> emitter(new EventEmitterTester()); |  | 
| 273   ScopedEventListener listener(new EventListener()); |  | 
| 274   long duration; |  | 
| 275 |  | 
| 276   EventData ev[MAX_EVENTS]; |  | 
| 277   memset(ev, 0, sizeof(ev)); |  | 
| 278   EXPECT_EQ(0, listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A)); |  | 
| 279 |  | 
| 280   // Return immediately when emitter is signaled, with no timeout |  | 
| 281   emitter->SetEventStatus(KE_EXPECTED); |  | 
| 282   memset(ev, 0, sizeof(ev)); |  | 
| 283   EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_ONE_EVENT, |  | 
| 284                           TIMEOUT_IMMEDIATE, &duration)); |  | 
| 285   EXPECT_EQ(USER_DATA_A, ev[0].user_data); |  | 
| 286   EXPECT_EQ(KE_EXPECTED, ev[0].events); |  | 
| 287   EXPECT_EQ(0, duration); |  | 
| 288 |  | 
| 289   // Return immediately when emitter is signaled, even with timeout |  | 
| 290   emitter->SetEventStatus(KE_EXPECTED); |  | 
| 291   memset(ev, 0, sizeof(ev)); |  | 
| 292   EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_ONE_EVENT, |  | 
| 293                           TIMEOUT_LONG, &duration)); |  | 
| 294   EXPECT_EQ(USER_DATA_A, ev[0].user_data); |  | 
| 295   EXPECT_EQ(KE_EXPECTED, ev[0].events); |  | 
| 296   EXPECT_GT(SCHEDULING_GRANULARITY, duration); |  | 
| 297 |  | 
| 298   // Return immediately if Emiiter is already signaled when blocking forever. |  | 
| 299   emitter->SetEventStatus(KE_EXPECTED); |  | 
| 300   memset(ev, 0, sizeof(ev)); |  | 
| 301   EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_ONE_EVENT, |  | 
| 302                           TIMEOUT_NEVER, &duration)); |  | 
| 303   EXPECT_EQ(USER_DATA_A, ev[0].user_data); |  | 
| 304   EXPECT_EQ(KE_EXPECTED, ev[0].events); |  | 
| 305   EXPECT_GT(SCHEDULING_GRANULARITY, duration); |  | 
| 306 |  | 
| 307   // Return immediately if Emitter is no signaled when not blocking. |  | 
| 308   emitter->SetEventStatus(KE_NONE); |  | 
| 309   memset(ev, 0, sizeof(ev)); |  | 
| 310   EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_NO_EVENT, |  | 
| 311                           TIMEOUT_IMMEDIATE, &duration)); |  | 
| 312   EXPECT_EQ(0, duration); |  | 
| 313 |  | 
| 314   // Wait TIMEOUT_LONG if the emitter is not in a signaled state. |  | 
| 315   emitter->SetEventStatus(KE_NONE); |  | 
| 316   memset(ev, 0, sizeof(ev)); |  | 
| 317   EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_NO_EVENT, |  | 
| 318                           TIMEOUT_LONG, &duration)); |  | 
| 319   EXPECT_LT(TIMEOUT_LONG - TIMEOUT_SLOP, duration); |  | 
| 320   EXPECT_GT(TIMEOUT_LONG + SCHEDULING_GRANULARITY, duration); |  | 
| 321 } |  | 
| 322 |  | 
| 323 struct SignalInfo { |  | 
| 324   EventEmitterTester* em; |  | 
| 325   unsigned int ms_wait; |  | 
| 326   uint32_t events; |  | 
| 327 }; |  | 
| 328 |  | 
| 329 static void *SignalEmitterThread(void *ptr) { |  | 
| 330   SignalInfo* info = (SignalInfo*) ptr; |  | 
| 331   struct timespec ts; |  | 
| 332   ts.tv_sec = 0; |  | 
| 333   ts.tv_nsec = info->ms_wait * 1000000; |  | 
| 334 |  | 
| 335   nanosleep(&ts, NULL); |  | 
| 336 |  | 
| 337   info->em->RaiseEvent(info->events); |  | 
| 338   return NULL; |  | 
| 339 } |  | 
| 340 |  | 
| 341 TEST(EventTest, EmitterSignalling) { |  | 
| 342   ScopedRef<EventEmitterTester> emitter(new EventEmitterTester()); |  | 
| 343   ScopedEventListener listener(new EventListener); |  | 
| 344 |  | 
| 345   SignalInfo siginfo; |  | 
| 346   struct timeval start; |  | 
| 347   struct timeval end; |  | 
| 348   long duration; |  | 
| 349 |  | 
| 350   EventData ev[MAX_EVENTS]; |  | 
| 351   memset(ev, 0, sizeof(ev)); |  | 
| 352   EXPECT_EQ(0, listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A)); |  | 
| 353 |  | 
| 354   // Setup another thread to wait 1/4 of the max time, and signal both |  | 
| 355   // an expected, and unexpected value. |  | 
| 356   siginfo.em = emitter.get(); |  | 
| 357   siginfo.ms_wait = TIMEOUT_SHORT; |  | 
| 358   siginfo.events = KE_EXPECTED | KE_FILTERED; |  | 
| 359   pthread_t tid; |  | 
| 360   pthread_create(&tid, NULL, SignalEmitterThread, &siginfo); |  | 
| 361 |  | 
| 362   // Wait for the signal from the other thread and time it. |  | 
| 363   gettimeofday(&start, NULL); |  | 
| 364   int cnt = 0; |  | 
| 365   EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_VERY_LONG, &cnt)); |  | 
| 366   EXPECT_EQ(1, cnt); |  | 
| 367   gettimeofday(&end, NULL); |  | 
| 368 |  | 
| 369   // Verify the wait duration, and that we only recieved the expected signal. |  | 
| 370   duration = Duration(&start, &end); |  | 
| 371   EXPECT_GT(TIMEOUT_SHORT + SCHEDULING_GRANULARITY, duration); |  | 
| 372   EXPECT_LT(TIMEOUT_SHORT - TIMEOUT_SLOP, duration); |  | 
| 373   EXPECT_EQ(USER_DATA_A, ev[0].user_data); |  | 
| 374   EXPECT_EQ(KE_EXPECTED, ev[0].events); |  | 
| 375 } |  | 
| 376 |  | 
| 377 |  | 
| 378 namespace { |  | 
| 379 |  | 
| 380 class KernelProxyPolling : public KernelProxy { |  | 
| 381  public: |  | 
| 382   virtual int socket(int domain, int type, int protocol) { |  | 
| 383     ScopedMount mnt; |  | 
| 384     ScopedMountNode node(new EventEmitterTester()); |  | 
| 385     ScopedKernelHandle handle(new KernelHandle(mnt, node)); |  | 
| 386 |  | 
| 387     Error error = handle->Init(0); |  | 
| 388     if (error) { |  | 
| 389       errno = error; |  | 
| 390       return -1; |  | 
| 391     } |  | 
| 392 |  | 
| 393     return AllocateFD(handle); |  | 
| 394   } |  | 
| 395 }; |  | 
| 396 |  | 
| 397 class KernelProxyPollingTest : public ::testing::Test { |  | 
| 398  public: |  | 
| 399   void SetUp() { |  | 
| 400     ki_init(&kp_); |  | 
| 401   } |  | 
| 402 |  | 
| 403   void TearDown() { |  | 
| 404     ki_uninit(); |  | 
| 405   } | 239   } | 
| 406 | 240 | 
| 407  protected: | 241  protected: | 
| 408   KernelProxyPolling kp_; | 242   KernelProxy* kp; | 
| 409 }; | 243 | 
| 410 | 244   timeval tv; | 
| 411 }  // namespace |  | 
| 412 |  | 
| 413 |  | 
| 414 #define SOCKET_CNT 4 |  | 
| 415 void SetFDs(fd_set* set, int* fds) { |  | 
| 416   FD_ZERO(set); |  | 
| 417 |  | 
| 418   FD_SET(0, set); |  | 
| 419   FD_SET(1, set); |  | 
| 420   FD_SET(2, set); |  | 
| 421 |  | 
| 422   for (int index = 0; index < SOCKET_CNT; index++) |  | 
| 423     FD_SET(fds[index], set); |  | 
| 424 } |  | 
| 425 |  | 
| 426 TEST_F(KernelProxyPollingTest, Select) { |  | 
| 427   int fds[SOCKET_CNT]; |  | 
| 428 |  | 
| 429   fd_set rd_set; | 245   fd_set rd_set; | 
| 430   fd_set wr_set; | 246   fd_set wr_set; | 
| 431 | 247   fd_set ex_set; | 
| 432   FD_ZERO(&rd_set); | 248   struct pollfd pollfds[MAX_FDS]; | 
| 433   FD_ZERO(&wr_set); | 249 }; | 
| 434 | 250 | 
| 435   FD_SET(0, &rd_set); | 251 TEST_F(SelectPollTest, PollMemPipe) { | 
| 436   FD_SET(1, &rd_set); | 252   int fds[2]; | 
| 437   FD_SET(2, &rd_set); | 253 | 
| 438 | 254   // Both FDs for regular files should be read/write but not exception. | 
| 439   FD_SET(0, &wr_set); | 255   fds[0] = kp->open("/test.txt", O_CREAT | O_WRONLY); | 
| 440   FD_SET(1, &wr_set); | 256   fds[1] = kp->open("/test.txt", O_RDONLY); | 
| 441   FD_SET(2, &wr_set); | 257 | 
| 442 | 258   SetFDs(fds, 2); | 
| 443   // Expect normal files to select as read, write, and error | 259 | 
| 444   int cnt = select(4, &rd_set, &rd_set, &rd_set, NULL); | 260   EXPECT_EQ(2, kp->poll(pollfds, 2, 0)); | 
| 445   EXPECT_EQ(3 * 3, cnt); | 261   EXPECT_EQ(POLLIN | POLLOUT, pollfds[0].revents); | 
| 446   EXPECT_NE(0, FD_ISSET(0, &rd_set)); | 262   EXPECT_EQ(POLLIN | POLLOUT, pollfds[1].revents); | 
| 447   EXPECT_NE(0, FD_ISSET(1, &rd_set)); | 263   CloseFDs(fds, 2); | 
| 448   EXPECT_NE(0, FD_ISSET(2, &rd_set)); | 264 | 
| 449 | 265   // The write FD should select for write-only, read FD should not select | 
| 450   for (int index = 0 ; index < SOCKET_CNT; index++) { | 266   EXPECT_EQ(0, kp->pipe(fds)); | 
| 451     fds[index] = socket(0, 0, 0); | 267   SetFDs(fds, 2); | 
| 452     EXPECT_NE(-1, fds[index]); | 268 | 
| 453   } | 269   EXPECT_EQ(2, kp->poll(pollfds, 2, 0)); | 
| 454 | 270   // TODO(noelallen) fix poll based on open mode | 
| 455   // Highest numbered fd | 271   // EXPECT_EQ(0, pollfds[0].revents); | 
| 456   const int fdnum = fds[SOCKET_CNT - 1] + 1; | 272   // Bug 291018 | 
| 457 | 273   EXPECT_EQ(POLLOUT, pollfds[1].revents); | 
| 458   // Expect only the normal files to select | 274 | 
| 459   SetFDs(&rd_set, fds); | 275   CloseFDs(fds, 2); | 
| 460   cnt = select(fds[SOCKET_CNT-1] + 1, &rd_set, NULL, NULL, NULL); | 276 } | 
| 461   EXPECT_EQ(3, cnt); | 277 | 
| 462   EXPECT_NE(0, FD_ISSET(0, &rd_set)); | 278 TEST_F(SelectPollTest, SelectMemPipe) { | 
| 463   EXPECT_NE(0, FD_ISSET(1, &rd_set)); | 279   int fds[2]; | 
| 464   EXPECT_NE(0, FD_ISSET(2, &rd_set)); | 280 | 
| 465   for (int index = 0 ; index < SOCKET_CNT; index++) { | 281   // Both FDs for regular files should be read/write but not exception. | 
| 466     EXPECT_EQ(0, FD_ISSET(fds[index], &rd_set)); | 282   fds[0] = kp->open("/test.txt", O_CREAT | O_WRONLY); | 
| 467   } | 283   fds[1] = kp->open("/test.txt", O_RDONLY); | 
| 468 | 284   SetFDs(fds, 2); | 
| 469   // Poke one of the pollable nodes to be READ ready | 285 | 
| 470   ioctl(fds[0], POLLIN, NULL); | 286   EXPECT_EQ(4, kp->select(fds[1] + 1, &rd_set, &wr_set, &ex_set, &tv)); | 
| 471 |  | 
| 472   // Expect normal files to be read/write and one pollable node to be read. |  | 
| 473   SetFDs(&rd_set, fds); |  | 
| 474   SetFDs(&wr_set, fds); |  | 
| 475   cnt = select(fdnum, &rd_set, &wr_set, NULL, NULL); |  | 
| 476   EXPECT_EQ(7, cnt); |  | 
| 477   EXPECT_NE(0, FD_ISSET(fds[0], &rd_set)); | 287   EXPECT_NE(0, FD_ISSET(fds[0], &rd_set)); | 
| 478   EXPECT_EQ(0, FD_ISSET(fds[0], &wr_set)); | 288   EXPECT_NE(0, FD_ISSET(fds[1], &rd_set)); | 
| 479 } | 289   EXPECT_NE(0, FD_ISSET(fds[0], &wr_set)); | 
| 480 | 290   EXPECT_NE(0, FD_ISSET(fds[1], &wr_set)); | 
| 481 | 291   EXPECT_EQ(0, FD_ISSET(fds[0], &ex_set)); | 
|  | 292   EXPECT_EQ(0, FD_ISSET(fds[1], &ex_set)); | 
|  | 293 | 
|  | 294   CloseFDs(fds, 2); | 
|  | 295 | 
|  | 296   // The write FD should select for write-only, read FD should not select | 
|  | 297   EXPECT_EQ(0, kp->pipe(fds)); | 
|  | 298   SetFDs(fds, 2); | 
|  | 299 | 
|  | 300   EXPECT_EQ(2, kp->select(fds[1] + 1, &rd_set, &wr_set, &ex_set, &tv)); | 
|  | 301   EXPECT_EQ(0, FD_ISSET(fds[0], &rd_set)); | 
|  | 302   EXPECT_EQ(0, FD_ISSET(fds[1], &rd_set)); | 
|  | 303   // TODO(noelallen) fix poll based on open mode | 
|  | 304   // EXPECT_EQ(0, FD_ISSET(fds[0], &wr_set)); | 
|  | 305   // Bug 291018 | 
|  | 306   EXPECT_NE(0, FD_ISSET(fds[1], &wr_set)); | 
|  | 307   EXPECT_EQ(0, FD_ISSET(fds[0], &ex_set)); | 
|  | 308   EXPECT_EQ(0, FD_ISSET(fds[1], &ex_set)); | 
|  | 309 } | 
|  | 310 | 
|  | 311 | 
| OLD | NEW | 
|---|