Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(303)

Side by Side Diff: native_client_sdk/src/tests/nacl_io_test/event_test.cc

Issue 23498015: [NaCl SDK] Support non blocking TCP/UDP (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Added Fifo Tests Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* Copyright (c) 2013 The Chromium Authors. All rights reserved. 1 /* Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 * Use of this source code is governed by a BSD-style license that can be 2 * Use of this source code is governed by a BSD-style license that can be
3 * found in the LICENSE file. 3 * found in the LICENSE file.
4 */ 4 */
5 5
6 #include <errno.h> 6 #include <errno.h>
7 #include <fcntl.h> 7 #include <fcntl.h>
8 #include <pthread.h>
8 #include <stdio.h> 9 #include <stdio.h>
9 #include <sys/ioctl.h> 10 #include <sys/ioctl.h>
10 #include <sys/stat.h> 11 #include <sys/stat.h>
11 #include <sys/time.h> 12 #include <sys/time.h>
12 13
13 #include "gtest/gtest.h" 14 #include "gtest/gtest.h"
14 15
15 #include "nacl_io/event_emitter.h" 16 #include "nacl_io/event_emitter.h"
16 #include "nacl_io/event_listener.h" 17 #include "nacl_io/event_listener.h"
18 #include "nacl_io/event_listener.h"
19 #include "nacl_io/event_listener.h"
17 #include "nacl_io/kernel_intercept.h" 20 #include "nacl_io/kernel_intercept.h"
18 #include "nacl_io/kernel_proxy.h" 21 #include "nacl_io/kernel_proxy.h"
19 #include "nacl_io/kernel_wrap.h" 22 #include "nacl_io/kernel_wrap.h"
23 #include "nacl_io/mount_node_pipe.h"
24 #include "nacl_io/mount_stream.h"
25
26 #include "ppapi_simple/ps.h"
20 27
21 28
22 using namespace nacl_io; 29 using namespace nacl_io;
23 using namespace sdk_util; 30 using namespace sdk_util;
24 31
25 class EventEmitterTester : public MountNode { 32
26 public: 33 class EventListenerTester : public EventListener {
27 EventEmitterTester() : MountNode(NULL), event_status_(0), event_cnt_(0) {} 34 public:
28 35 EventListenerTester() : EventListener(), events_(0) {};
29 void SetEventStatus(uint32_t bits) { event_status_ = bits; } 36
30 uint32_t GetEventStatus() { return event_status_; } 37 virtual void ReceiveEvents(EventEmitter* emitter, uint32_t events) {
31 38 events_ |= events;
32 Error Ioctl(int request, char* arg) { 39 }
33 event_status_ = static_cast<uint32_t>(request); 40
34 return 0; 41 uint32_t Events() {
35 } 42 return events_;
36 43 }
37 int GetType() { return S_IFSOCK; } 44
38 int NumEvents() { return event_cnt_; } 45 void Clear() {
39 46 events_ = 0;
40 public: 47 }
41 // Make this function public for testing 48
42 void RaiseEvent(uint32_t events) { 49 uint32_t events_;
43 EventEmitter::RaiseEvent(events); 50 };
44 } 51
45 52
46 // Called after registering locally, but while lock is still held. 53 TEST(EmitterBasic, SingleThread) {
47 void ChainRegisterEventInfo(const ScopedEventInfo& event) { 54 EventListenerTester listener_a;
48 event_cnt_++; 55 EventListenerTester listener_b;
49 } 56 EventEmitter emitter;
50 57
51 // Called before unregistering locally, but while lock is still held. 58 emitter.RegisterListener(&listener_a, POLLIN | POLLOUT | POLLERR);
52 void ChainUnregisterEventInfo(const ScopedEventInfo& event) { 59 emitter.RegisterListener(&listener_b, POLLIN | POLLOUT | POLLERR);
53 event_cnt_--; 60
61 EXPECT_EQ(0, emitter.GetEventStatus());
62 EXPECT_EQ(0, listener_a.Events());
63
64 {
65 AUTO_LOCK(emitter.GetLock())
66 emitter.RaiseEvents_Locked(POLLIN);
67 }
68 EXPECT_EQ(POLLIN, listener_a.Events());
69
70 listener_a.Clear();
71
72 {
73 AUTO_LOCK(emitter.GetLock())
74 emitter.RaiseEvents_Locked(POLLOUT);
75 }
76 EXPECT_EQ(POLLOUT, listener_a.Events());
77 EXPECT_EQ(POLLIN | POLLOUT, listener_b.Events());
78 }
79
80 class EmitterTest : public ::testing::Test {
81 public:
82 void SetUp() {
83 pthread_cond_destroy(&multi_cond_);
binji 2013/09/15 22:18:58 pthread_cond_init
noelallen1 2013/09/17 21:21:54 Done.
84 waiting_ = 0;
85 signaled_ = 0;
86 }
87
88 void TearDown() {
89 pthread_cond_destroy(&multi_cond_);
90 }
91
92 static void* ThreadThunk(void *ptr) {
93 return static_cast<EmitterTest*>(ptr)->ThreadEntry();
94 }
95
96 void CreateThread() {
binji 2013/09/15 22:18:58 It would be clearer if the function order were: Cr
noelallen1 2013/09/17 21:21:54 Done.
97 pthread_t id;
98 EXPECT_EQ(0, pthread_create(&id, NULL, ThreadThunk, this));
99 }
100
101 void* ThreadEntry() {
102 EventListenerLock listener(&emitter_);
103
104 pthread_cond_signal(&multi_cond_);
105 waiting_++;
binji 2013/09/15 22:18:58 atomic increment?
noelallen1 2013/09/17 21:21:54 I'm holding the emitter lock.
106 EXPECT_EQ(0, listener.WaitOnEvent(POLLIN, -1));
107 emitter_.ClearEvents_Locked(POLLIN);
108 signaled_ ++;
109 return NULL;
54 } 110 }
55 111
56 protected: 112 protected:
57 uint32_t event_status_; 113 pthread_cond_t multi_cond_;
58 uint32_t event_cnt_; 114 EventEmitter emitter_;
59 }; 115
60 116 uint32_t waiting_;
61 117 uint32_t signaled_;
62 const int MAX_EVENTS = 8; 118 };
63 119
64 // IDs for Emitters 120
65 const int ID_EMITTER = 5; 121 const int NUM_THREADS = 10;
66 const int ID_LISTENER = 6; 122 TEST_F(EmitterTest, MultiThread) {
67 const int ID_EMITTER_DUP = 7; 123 for (int a=0; a <NUM_THREADS; a++)
68 124 CreateThread();
69 // Kernel Event values 125
70 const uint32_t KE_EXPECTED = 4; 126 sleep(1);
binji 2013/09/15 22:18:58 why?
noelallen1 2013/09/17 21:21:54 Tests are written such that threads that should ma
71 const uint32_t KE_FILTERED = 2; 127 EXPECT_EQ(0, signaled_);
72 const uint32_t KE_NONE = 0; 128
73 129 {
74 // User Data values 130 AUTO_LOCK(emitter_.GetLock());
75 const uint64_t USER_DATA_A = 1; 131 // Wait for all threads to wait
76 const uint64_t USER_DATA_B = 5; 132 while(waiting_ < NUM_THREADS)
77 133 pthread_cond_wait(&multi_cond_, emitter_.GetLock().mutex());
78 // Timeout durations 134
79 const int TIMEOUT_IMMEDIATE = 0; 135 emitter_.RaiseEvents_Locked(POLLIN);
80 const int TIMEOUT_SHORT= 100; 136 }
81 const int TIMEOUT_LONG = 500; 137
82 const int TIMEOUT_NEVER = -1; 138 sleep(1);
binji 2013/09/15 22:18:58 can we do better than sleeping here?
noelallen1 2013/09/17 21:21:54 Not without serious changes to pthread_cond, and/o
83 const int TIMEOUT_VERY_LONG = 1000; 139 EXPECT_EQ(1, signaled_);
84 140
85 // We subtract TIMEOUT_SLOP from the expected minimum timed due to rounding 141 {
86 // and clock drift converting between absolute and relative time. This should 142 AUTO_LOCK(emitter_.GetLock());
87 // only be 1 for Less Than, and 1 for rounding, but we use 10 since we don't 143 emitter_.RaiseEvents_Locked(POLLIN);
88 // care about real precision, aren't testing of the underlying 144 }
89 // implementations and don't want flakiness. 145
90 const int TIMEOUT_SLOP = 10; 146 sleep(1);
91 147 EXPECT_EQ(2, signaled_);
binji 2013/09/15 22:18:58 clean up other threads?
noelallen1 2013/09/17 21:21:54 The other threads just return after running the te
binji 2013/09/19 00:48:54 Aren't they all blocked on WaitOnEvent?
noelallen1 2013/09/19 21:29:27 Done.
92 TEST(EventTest, EmitterBasic) { 148 }
93 ScopedRef<EventEmitterTester> emitter(new EventEmitterTester()); 149
94 ScopedRef<EventEmitter> null_emitter; 150
95 151 TEST(PipeTest, Listener) {
96 ScopedEventListener listener(new EventListener); 152 const char hello[] = "Hello World.";
97 153 char tmp[64] = "Goodbye";
98 // Verify construction 154
99 EXPECT_EQ(0, emitter->NumEvents()); 155 EventEmitterPipe pipe(32);
100 EXPECT_EQ(0, emitter->GetEventStatus()); 156
101 157 // Expect to time out on input.
102 // Verify status 158 {
103 emitter->SetEventStatus(KE_EXPECTED); 159 EventListenerLock locker(&pipe);
104 EXPECT_EQ(KE_EXPECTED, emitter->GetEventStatus()); 160 EXPECT_EQ(ETIMEDOUT, locker.WaitOnEvent(POLLIN, 0));
105 161 }
106 // Fail to update or free an ID not in the set 162
107 EXPECT_EQ(ENOENT, listener->Update(ID_EMITTER, KE_EXPECTED, USER_DATA_A)); 163 // Output should be ready to go.
108 EXPECT_EQ(ENOENT, listener->Free(ID_EMITTER)); 164 {
109 165 EventListenerLock locker(&pipe);
110 // Fail to Track self 166 EXPECT_EQ(0, locker.WaitOnEvent(POLLOUT, 0));
111 EXPECT_EQ(EINVAL, listener->Track(ID_LISTENER, 167 EXPECT_EQ(sizeof(hello), pipe.Write_Locked(hello, sizeof(hello)));
112 listener, 168 }
113 KE_EXPECTED, 169
114 USER_DATA_A)); 170 // We should now be able to poll
115 171 {
116 // Set the emitter filter and data 172 EventListenerLock locker(&pipe);
117 EXPECT_EQ(0, listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A)); 173 EXPECT_EQ(0, locker.WaitOnEvent(POLLIN, 0));
118 EXPECT_EQ(1, emitter->NumEvents()); 174 EXPECT_EQ(sizeof(hello), pipe.Read_Locked(tmp, sizeof(tmp)));
119 175 }
120 // Fail to add the same ID 176
121 EXPECT_EQ(EEXIST, 177 // Verify we can read it correctly.
122 listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A)); 178 EXPECT_EQ(0, strcmp(hello, tmp));
123 EXPECT_EQ(1, emitter->NumEvents()); 179 }
124 180
125 int event_cnt = 0; 181
126 EventData ev[MAX_EVENTS]; 182 class TestMountStream : public MountStream {
127 183 public:
128 // Do not allow a wait with a zero events count. 184 TestMountStream() {}
129 EXPECT_EQ(EINVAL, listener->Wait(ev, 0, TIMEOUT_IMMEDIATE, &event_cnt)); 185 };
130 186
131 // Do not allow a wait with a negative events count. 187 TEST(PipeNodeTest, Basic) {
132 EXPECT_EQ(EINVAL, listener->Wait(ev, -1, TIMEOUT_IMMEDIATE, &event_cnt)); 188 ScopedMount mnt(new TestMountStream());
133 189
134 // Do not allow a wait with a NULL EventData pointer 190 MountNodePipe* pipe_node = new MountNodePipe(mnt.get());
135 EXPECT_EQ(EFAULT, 191 ScopedRef<MountNodePipe> pipe(pipe_node);
136 listener->Wait(NULL, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); 192
137 193 EXPECT_EQ(POLLOUT, pipe_node->GetEventStatus());
138 // Return with no events if the Emitter has no signals set. 194 }
139 memset(ev, 0, sizeof(ev)); 195
140 event_cnt = 100; 196 const int MAX_FDS = 32;
141 emitter->SetEventStatus(KE_NONE); 197 class SelectPollTest : public ::testing::Test {
142 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); 198 public:
143 EXPECT_EQ(0, event_cnt); 199 void SetUp() {
144 200 kp = new KernelProxy();
145 // Return with no events if the Emitter has a filtered signals set. 201 kp->Init(NULL);
146 memset(ev, 0, sizeof(ev)); 202 EXPECT_EQ(0, kp->umount("/"));
147 event_cnt = 100; 203 EXPECT_EQ(0, kp->mount("", "/", "memfs", 0, NULL));
148 emitter->SetEventStatus(KE_FILTERED); 204
149 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); 205 memset(&tv, 0, sizeof(tv));
150 EXPECT_EQ(0, event_cnt); 206 }
151 207
152 // Return with one event if the Emitter has the expected signal set. 208 void TearDown() {
153 memset(ev, 0, sizeof(ev)); 209 delete kp;
154 event_cnt = 100; 210 }
155 emitter->SetEventStatus(KE_EXPECTED); 211
156 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); 212 void SetFDs(int* fds, int cnt) {
157 EXPECT_EQ(1, event_cnt); 213 FD_ZERO(&rd_set);
158 EXPECT_EQ(USER_DATA_A, ev[0].user_data); 214 FD_ZERO(&wr_set);
159 EXPECT_EQ(KE_EXPECTED, ev[0].events); 215 FD_ZERO(&ex_set);
160 216
161 // Return with one event containing only the expected signal. 217 for (int index = 0; index < cnt; index++) {
162 memset(ev, 0, sizeof(ev)); 218 EXPECT_NE(-1, fds[index]);
163 event_cnt = 100; 219 FD_SET(fds[index], &rd_set);
164 emitter->SetEventStatus(KE_EXPECTED | KE_FILTERED); 220 FD_SET(fds[index], &wr_set);
165 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); 221 FD_SET(fds[index], &ex_set);
166 EXPECT_EQ(1, event_cnt); 222
167 EXPECT_EQ(USER_DATA_A, ev[0].user_data); 223 pollfds[index].fd = fds[index];
168 EXPECT_EQ(KE_EXPECTED, ev[0].events); 224 pollfds[index].events = POLLIN | POLLOUT;
169 225 pollfds[index].revents = -1;
170 // Change the USER_DATA on an existing event
171 EXPECT_EQ(0, listener->Update(ID_EMITTER, KE_EXPECTED, USER_DATA_B));
172
173 // Return with one event signaled with the alternate USER DATA
174 memset(ev, 0, sizeof(ev));
175 event_cnt = 100;
176 emitter->SetEventStatus(KE_EXPECTED | KE_FILTERED);
177 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, 0, &event_cnt));
178 EXPECT_EQ(1, event_cnt);
179 EXPECT_EQ(USER_DATA_B, ev[0].user_data);
180 EXPECT_EQ(KE_EXPECTED, ev[0].events);
181
182 // Reset the USER_DATA.
183 EXPECT_EQ(0, listener->Update(ID_EMITTER, KE_EXPECTED, USER_DATA_A));
184
185 // Support adding a DUP.
186 EXPECT_EQ(0, listener->Track(ID_EMITTER_DUP,
187 emitter,
188 KE_EXPECTED,
189 USER_DATA_A));
190 EXPECT_EQ(2, emitter->NumEvents());
191
192 // Return unsignaled.
193 memset(ev, 0, sizeof(ev));
194 emitter->SetEventStatus(KE_NONE);
195 event_cnt = 100;
196 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt));
197 EXPECT_EQ(0, event_cnt);
198
199 // Return with two event signaled with expected data.
200 memset(ev, 0, sizeof(ev));
201 emitter->SetEventStatus(KE_EXPECTED);
202 event_cnt = 100;
203 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt));
204 EXPECT_EQ(2, event_cnt);
205 EXPECT_EQ(USER_DATA_A, ev[0].user_data);
206 EXPECT_EQ(KE_EXPECTED, ev[0].events);
207 EXPECT_EQ(USER_DATA_A, ev[1].user_data);
208 EXPECT_EQ(KE_EXPECTED, ev[1].events);
209 }
210
211 long Duration(struct timeval* start, struct timeval* end) {
212 if (start->tv_usec > end->tv_usec) {
213 end->tv_sec -= 1;
214 end->tv_usec += 1000000;
215 }
216 long cur_time = 1000 * (end->tv_sec - start->tv_sec);
217 cur_time += (end->tv_usec - start->tv_usec) / 1000;
218 return cur_time;
219 }
220
221
222 // Run a timed wait, and return the average of 8 iterations to reduce
223 // chance of false negative on outlier.
224 const int TRIES_TO_AVERAGE = 8;
225 bool TimedListen(ScopedEventListener& listen,
226 EventData* ev,
227 int ev_max,
228 int ev_expect,
229 int ms_wait,
230 long* duration) {
231
232 struct timeval start;
233 struct timeval end;
234 long total_time = 0;
235
236 for (int a=0; a < TRIES_TO_AVERAGE; a++) {
237 gettimeofday(&start, NULL);
238
239 int signaled;
240
241 EXPECT_EQ(0, listen->Wait(ev, ev_max, ms_wait, &signaled));
242 EXPECT_EQ(signaled, ev_expect);
243
244 if (signaled != ev_expect) {
245 return false;
246 } 226 }
247 227 }
248 gettimeofday(&end, NULL); 228
249 229 void CloseFDs(int* fds, int cnt) {
250 long cur_time = Duration(&start, &end); 230 for (int index = 0; index < cnt; index++)
251 total_time += cur_time; 231 kp->close(fds[index]);
252 }
253
254 *duration = total_time / TRIES_TO_AVERAGE;
255 return true;
256 }
257
258
259 // NOTE: These timing tests are potentially flaky, the real test is
260 // for the zero timeout should be, has the ConditionVariable been waited on?
261 // Once we provide a debuggable SimpleCond and SimpleLock we can actually test
262 // the correct thing.
263
264 // Normal scheduling would expect us to see ~10ms accuracy, but we'll
265 // use a much bigger number (yet smaller than the MAX_MS_TIMEOUT).
266 const int SCHEDULING_GRANULARITY = 100;
267
268 const int EXPECT_ONE_EVENT = 1;
269 const int EXPECT_NO_EVENT = 0;
270
271 TEST(EventTest, EmitterTimeout) {
272 ScopedRef<EventEmitterTester> emitter(new EventEmitterTester());
273 ScopedEventListener listener(new EventListener());
274 long duration;
275
276 EventData ev[MAX_EVENTS];
277 memset(ev, 0, sizeof(ev));
278 EXPECT_EQ(0, listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A));
279
280 // Return immediately when emitter is signaled, with no timeout
281 emitter->SetEventStatus(KE_EXPECTED);
282 memset(ev, 0, sizeof(ev));
283 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_ONE_EVENT,
284 TIMEOUT_IMMEDIATE, &duration));
285 EXPECT_EQ(USER_DATA_A, ev[0].user_data);
286 EXPECT_EQ(KE_EXPECTED, ev[0].events);
287 EXPECT_EQ(0, duration);
288
289 // Return immediately when emitter is signaled, even with timeout
290 emitter->SetEventStatus(KE_EXPECTED);
291 memset(ev, 0, sizeof(ev));
292 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_ONE_EVENT,
293 TIMEOUT_LONG, &duration));
294 EXPECT_EQ(USER_DATA_A, ev[0].user_data);
295 EXPECT_EQ(KE_EXPECTED, ev[0].events);
296 EXPECT_GT(SCHEDULING_GRANULARITY, duration);
297
298 // Return immediately if Emiiter is already signaled when blocking forever.
299 emitter->SetEventStatus(KE_EXPECTED);
300 memset(ev, 0, sizeof(ev));
301 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_ONE_EVENT,
302 TIMEOUT_NEVER, &duration));
303 EXPECT_EQ(USER_DATA_A, ev[0].user_data);
304 EXPECT_EQ(KE_EXPECTED, ev[0].events);
305 EXPECT_GT(SCHEDULING_GRANULARITY, duration);
306
307 // Return immediately if Emitter is no signaled when not blocking.
308 emitter->SetEventStatus(KE_NONE);
309 memset(ev, 0, sizeof(ev));
310 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_NO_EVENT,
311 TIMEOUT_IMMEDIATE, &duration));
312 EXPECT_EQ(0, duration);
313
314 // Wait TIMEOUT_LONG if the emitter is not in a signaled state.
315 emitter->SetEventStatus(KE_NONE);
316 memset(ev, 0, sizeof(ev));
317 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_NO_EVENT,
318 TIMEOUT_LONG, &duration));
319 EXPECT_LT(TIMEOUT_LONG - TIMEOUT_SLOP, duration);
320 EXPECT_GT(TIMEOUT_LONG + SCHEDULING_GRANULARITY, duration);
321 }
322
323 struct SignalInfo {
324 EventEmitterTester* em;
325 unsigned int ms_wait;
326 uint32_t events;
327 };
328
329 static void *SignalEmitterThread(void *ptr) {
330 SignalInfo* info = (SignalInfo*) ptr;
331 struct timespec ts;
332 ts.tv_sec = 0;
333 ts.tv_nsec = info->ms_wait * 1000000;
334
335 nanosleep(&ts, NULL);
336
337 info->em->RaiseEvent(info->events);
338 return NULL;
339 }
340
341 TEST(EventTest, EmitterSignalling) {
342 ScopedRef<EventEmitterTester> emitter(new EventEmitterTester());
343 ScopedEventListener listener(new EventListener);
344
345 SignalInfo siginfo;
346 struct timeval start;
347 struct timeval end;
348 long duration;
349
350 EventData ev[MAX_EVENTS];
351 memset(ev, 0, sizeof(ev));
352 EXPECT_EQ(0, listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A));
353
354 // Setup another thread to wait 1/4 of the max time, and signal both
355 // an expected, and unexpected value.
356 siginfo.em = emitter.get();
357 siginfo.ms_wait = TIMEOUT_SHORT;
358 siginfo.events = KE_EXPECTED | KE_FILTERED;
359 pthread_t tid;
360 pthread_create(&tid, NULL, SignalEmitterThread, &siginfo);
361
362 // Wait for the signal from the other thread and time it.
363 gettimeofday(&start, NULL);
364 int cnt = 0;
365 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_VERY_LONG, &cnt));
366 EXPECT_EQ(1, cnt);
367 gettimeofday(&end, NULL);
368
369 // Verify the wait duration, and that we only recieved the expected signal.
370 duration = Duration(&start, &end);
371 EXPECT_GT(TIMEOUT_SHORT + SCHEDULING_GRANULARITY, duration);
372 EXPECT_LT(TIMEOUT_SHORT - TIMEOUT_SLOP, duration);
373 EXPECT_EQ(USER_DATA_A, ev[0].user_data);
374 EXPECT_EQ(KE_EXPECTED, ev[0].events);
375 }
376
377
378 namespace {
379
380 class KernelProxyPolling : public KernelProxy {
381 public:
382 virtual int socket(int domain, int type, int protocol) {
383 ScopedMount mnt;
384 ScopedMountNode node(new EventEmitterTester());
385 ScopedKernelHandle handle(new KernelHandle(mnt, node));
386
387 Error error = handle->Init(0);
388 if (error) {
389 errno = error;
390 return -1;
391 }
392
393 return AllocateFD(handle);
394 }
395 };
396
397 class KernelProxyPollingTest : public ::testing::Test {
398 public:
399 void SetUp() {
400 ki_init(&kp_);
401 }
402
403 void TearDown() {
404 ki_uninit();
405 } 232 }
406 233
407 protected: 234 protected:
408 KernelProxyPolling kp_; 235 KernelProxy* kp;
409 }; 236
410 237 timeval tv;
411 } // namespace
412
413
414 #define SOCKET_CNT 4
415 void SetFDs(fd_set* set, int* fds) {
416 FD_ZERO(set);
417
418 FD_SET(0, set);
419 FD_SET(1, set);
420 FD_SET(2, set);
421
422 for (int index = 0; index < SOCKET_CNT; index++)
423 FD_SET(fds[index], set);
424 }
425
426 TEST_F(KernelProxyPollingTest, Select) {
427 int fds[SOCKET_CNT];
428
429 fd_set rd_set; 238 fd_set rd_set;
430 fd_set wr_set; 239 fd_set wr_set;
431 240 fd_set ex_set;
432 FD_ZERO(&rd_set); 241 struct pollfd pollfds[MAX_FDS];
433 FD_ZERO(&wr_set); 242 };
434 243
435 FD_SET(0, &rd_set); 244 TEST_F(SelectPollTest, PollMemPipe) {
436 FD_SET(1, &rd_set); 245 int fds[2];
437 FD_SET(2, &rd_set); 246
438 247 // Both FDs for regular files should be read/write but not exception.
439 FD_SET(0, &wr_set); 248 fds[0] = kp->open("/test.txt", O_CREAT | O_WRONLY);
440 FD_SET(1, &wr_set); 249 fds[1] = kp->open("/test.txt", O_RDONLY);
441 FD_SET(2, &wr_set); 250
442 251 SetFDs(fds, 2);
443 // Expect normal files to select as read, write, and error 252
444 int cnt = select(4, &rd_set, &rd_set, &rd_set, NULL); 253 EXPECT_EQ(2, kp->poll(pollfds, 2, 0));
445 EXPECT_EQ(3 * 3, cnt); 254 EXPECT_EQ(POLLIN | POLLOUT, pollfds[0].revents);
446 EXPECT_NE(0, FD_ISSET(0, &rd_set)); 255 EXPECT_EQ(POLLIN | POLLOUT, pollfds[1].revents);
447 EXPECT_NE(0, FD_ISSET(1, &rd_set)); 256 CloseFDs(fds, 2);
448 EXPECT_NE(0, FD_ISSET(2, &rd_set)); 257
449 258 // The write FD should select for write-only, read FD should not select
450 for (int index = 0 ; index < SOCKET_CNT; index++) { 259 EXPECT_EQ(0, kp->pipe(fds));
451 fds[index] = socket(0, 0, 0); 260 SetFDs(fds, 2);
452 EXPECT_NE(-1, fds[index]); 261
453 } 262 EXPECT_EQ(2, kp->poll(pollfds, 2, 0));
454 263 // TODO(noelallen) fix poll based on open mode
455 // Highest numbered fd 264 // EXPECT_EQ(0, pollfds[0].revents);
456 const int fdnum = fds[SOCKET_CNT - 1] + 1; 265 // Bug 291018
457 266 EXPECT_EQ(POLLOUT, pollfds[1].revents);
458 // Expect only the normal files to select 267
459 SetFDs(&rd_set, fds); 268 CloseFDs(fds, 2);
460 cnt = select(fds[SOCKET_CNT-1] + 1, &rd_set, NULL, NULL, NULL); 269 }
461 EXPECT_EQ(3, cnt); 270
462 EXPECT_NE(0, FD_ISSET(0, &rd_set)); 271 TEST_F(SelectPollTest, SelectMemPipe) {
463 EXPECT_NE(0, FD_ISSET(1, &rd_set)); 272 int fds[2];
464 EXPECT_NE(0, FD_ISSET(2, &rd_set)); 273
465 for (int index = 0 ; index < SOCKET_CNT; index++) { 274 // Both FDs for regular files should be read/write but not exception.
466 EXPECT_EQ(0, FD_ISSET(fds[index], &rd_set)); 275 fds[0] = kp->open("/test.txt", O_CREAT | O_WRONLY);
467 } 276 fds[1] = kp->open("/test.txt", O_RDONLY);
468 277 SetFDs(fds, 2);
469 // Poke one of the pollable nodes to be READ ready 278
470 ioctl(fds[0], POLLIN, NULL); 279 EXPECT_EQ(4, kp->select(fds[1] + 1, &rd_set, &wr_set, &ex_set, &tv));
471
472 // Expect normal files to be read/write and one pollable node to be read.
473 SetFDs(&rd_set, fds);
474 SetFDs(&wr_set, fds);
475 cnt = select(fdnum, &rd_set, &wr_set, NULL, NULL);
476 EXPECT_EQ(7, cnt);
477 EXPECT_NE(0, FD_ISSET(fds[0], &rd_set)); 280 EXPECT_NE(0, FD_ISSET(fds[0], &rd_set));
478 EXPECT_EQ(0, FD_ISSET(fds[0], &wr_set)); 281 EXPECT_NE(0, FD_ISSET(fds[1], &rd_set));
479 } 282 EXPECT_NE(0, FD_ISSET(fds[0], &wr_set));
480 283 EXPECT_NE(0, FD_ISSET(fds[1], &wr_set));
481 284 EXPECT_EQ(0, FD_ISSET(fds[0], &ex_set));
285 EXPECT_EQ(0, FD_ISSET(fds[1], &ex_set));
286
287 CloseFDs(fds, 2);
288
289 // The write FD should select for write-only, read FD should not select
290 EXPECT_EQ(0, kp->pipe(fds));
291 SetFDs(fds, 2);
292
293 EXPECT_EQ(2, kp->select(fds[1] + 1, &rd_set, &wr_set, &ex_set, &tv));
294 EXPECT_EQ(0, FD_ISSET(fds[0], &rd_set));
295 EXPECT_EQ(0, FD_ISSET(fds[1], &rd_set));
296 // TODO(noelallen) fix poll based on open mode
297 // EXPECT_EQ(0, FD_ISSET(fds[0], &wr_set));
298 // Bug 291018
299 EXPECT_NE(0, FD_ISSET(fds[1], &wr_set));
300 EXPECT_EQ(0, FD_ISSET(fds[0], &ex_set));
301 EXPECT_EQ(0, FD_ISSET(fds[1], &ex_set));
302 }
303
304
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698