| OLD | NEW |
| (Empty) |
| 1 /* Copyright (c) 2013 The Chromium Authors. All rights reserved. | |
| 2 * Use of this source code is governed by a BSD-style license that can be | |
| 3 * found in the LICENSE file. | |
| 4 */ | |
| 5 | |
| 6 #include <errno.h> | |
| 7 #include <fcntl.h> | |
| 8 #include <stdio.h> | |
| 9 #include <sys/stat.h> | |
| 10 #include <sys/time.h> | |
| 11 | |
| 12 #include "gtest/gtest.h" | |
| 13 | |
| 14 #include "nacl_io/event_emitter.h" | |
| 15 #include "nacl_io/event_listener.h" | |
| 16 #include "nacl_io/kernel_intercept.h" | |
| 17 #include "nacl_io/kernel_proxy.h" | |
| 18 #include "nacl_io/kernel_wrap.h" | |
| 19 | |
| 20 | |
| 21 using namespace nacl_io; | |
| 22 using namespace sdk_util; | |
| 23 | |
| 24 class EventEmitterTester : public MountNode { | |
| 25 public: | |
| 26 EventEmitterTester() : MountNode(NULL), event_status_(0), event_cnt_(0) {} | |
| 27 | |
| 28 void SetEventStatus(uint32_t bits) { event_status_ = bits; } | |
| 29 uint32_t GetEventStatus() { return event_status_; } | |
| 30 | |
| 31 Error Ioctl(int request, char* arg) { | |
| 32 event_status_ = static_cast<uint32_t>(request); | |
| 33 return 0; | |
| 34 } | |
| 35 | |
| 36 int GetType() { return S_IFSOCK; } | |
| 37 int NumEvents() { return event_cnt_; } | |
| 38 | |
| 39 public: | |
| 40 // Make this function public for testing | |
| 41 void RaiseEvent(uint32_t events) { | |
| 42 EventEmitter::RaiseEvent(events); | |
| 43 } | |
| 44 | |
| 45 // Called after registering locally, but while lock is still held. | |
| 46 void ChainRegisterEventInfo(const ScopedEventInfo& event) { | |
| 47 event_cnt_++; | |
| 48 } | |
| 49 | |
| 50 // Called before unregistering locally, but while lock is still held. | |
| 51 void ChainUnregisterEventInfo(const ScopedEventInfo& event) { | |
| 52 event_cnt_--; | |
| 53 } | |
| 54 | |
| 55 protected: | |
| 56 uint32_t event_status_; | |
| 57 uint32_t event_cnt_; | |
| 58 }; | |
| 59 | |
| 60 | |
| 61 const int MAX_EVENTS = 8; | |
| 62 | |
| 63 // IDs for Emitters | |
| 64 const int ID_EMITTER = 5; | |
| 65 const int ID_LISTENER = 6; | |
| 66 const int ID_EMITTER_DUP = 7; | |
| 67 | |
| 68 // Kernel Event values | |
| 69 const uint32_t KE_EXPECTED = 4; | |
| 70 const uint32_t KE_FILTERED = 2; | |
| 71 const uint32_t KE_NONE = 0; | |
| 72 | |
| 73 // User Data values | |
| 74 const uint64_t USER_DATA_A = 1; | |
| 75 const uint64_t USER_DATA_B = 5; | |
| 76 | |
| 77 // Timeout durations | |
| 78 const int TIMEOUT_IMMEDIATE = 0; | |
| 79 const int TIMEOUT_SHORT= 100; | |
| 80 const int TIMEOUT_LONG = 500; | |
| 81 const int TIMEOUT_NEVER = -1; | |
| 82 const int TIMEOUT_VERY_LONG = 1000; | |
| 83 | |
| 84 // We subtract TIMEOUT_SLOP from the expected minimum timed due to rounding | |
| 85 // and clock drift converting between absolute and relative time. This should | |
| 86 // only be 1 for Less Than, and 1 for rounding, but we use 10 since we don't | |
| 87 // care about real precision, aren't testing of the underlying | |
| 88 // implementations and don't want flakiness. | |
| 89 const int TIMEOUT_SLOP = 10; | |
| 90 | |
| 91 TEST(EventTest, EmitterBasic) { | |
| 92 ScopedRef<EventEmitterTester> emitter(new EventEmitterTester()); | |
| 93 ScopedRef<EventEmitter> null_emitter; | |
| 94 | |
| 95 ScopedEventListener listener(new EventListener); | |
| 96 | |
| 97 // Verify construction | |
| 98 EXPECT_EQ(0, emitter->NumEvents()); | |
| 99 EXPECT_EQ(0, emitter->GetEventStatus()); | |
| 100 | |
| 101 // Verify status | |
| 102 emitter->SetEventStatus(KE_EXPECTED); | |
| 103 EXPECT_EQ(KE_EXPECTED, emitter->GetEventStatus()); | |
| 104 | |
| 105 // Fail to update or free an ID not in the set | |
| 106 EXPECT_EQ(ENOENT, listener->Update(ID_EMITTER, KE_EXPECTED, USER_DATA_A)); | |
| 107 EXPECT_EQ(ENOENT, listener->Free(ID_EMITTER)); | |
| 108 | |
| 109 // Fail to Track self | |
| 110 EXPECT_EQ(EINVAL, listener->Track(ID_LISTENER, | |
| 111 listener, | |
| 112 KE_EXPECTED, | |
| 113 USER_DATA_A)); | |
| 114 | |
| 115 // Set the emitter filter and data | |
| 116 EXPECT_EQ(0, listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A)); | |
| 117 EXPECT_EQ(1, emitter->NumEvents()); | |
| 118 | |
| 119 // Fail to add the same ID | |
| 120 EXPECT_EQ(EEXIST, | |
| 121 listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A)); | |
| 122 EXPECT_EQ(1, emitter->NumEvents()); | |
| 123 | |
| 124 int event_cnt = 0; | |
| 125 EventData ev[MAX_EVENTS]; | |
| 126 | |
| 127 // Do not allow a wait with a zero events count. | |
| 128 EXPECT_EQ(EINVAL, listener->Wait(ev, 0, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 129 | |
| 130 // Do not allow a wait with a negative events count. | |
| 131 EXPECT_EQ(EINVAL, listener->Wait(ev, -1, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 132 | |
| 133 // Do not allow a wait with a NULL EventData pointer | |
| 134 EXPECT_EQ(EFAULT, | |
| 135 listener->Wait(NULL, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 136 | |
| 137 // Return with no events if the Emitter has no signals set. | |
| 138 memset(ev, 0, sizeof(ev)); | |
| 139 event_cnt = 100; | |
| 140 emitter->SetEventStatus(KE_NONE); | |
| 141 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 142 EXPECT_EQ(0, event_cnt); | |
| 143 | |
| 144 // Return with no events if the Emitter has a filtered signals set. | |
| 145 memset(ev, 0, sizeof(ev)); | |
| 146 event_cnt = 100; | |
| 147 emitter->SetEventStatus(KE_FILTERED); | |
| 148 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 149 EXPECT_EQ(0, event_cnt); | |
| 150 | |
| 151 // Return with one event if the Emitter has the expected signal set. | |
| 152 memset(ev, 0, sizeof(ev)); | |
| 153 event_cnt = 100; | |
| 154 emitter->SetEventStatus(KE_EXPECTED); | |
| 155 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 156 EXPECT_EQ(1, event_cnt); | |
| 157 EXPECT_EQ(USER_DATA_A, ev[0].user_data); | |
| 158 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 159 | |
| 160 // Return with one event containing only the expected signal. | |
| 161 memset(ev, 0, sizeof(ev)); | |
| 162 event_cnt = 100; | |
| 163 emitter->SetEventStatus(KE_EXPECTED | KE_FILTERED); | |
| 164 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 165 EXPECT_EQ(1, event_cnt); | |
| 166 EXPECT_EQ(USER_DATA_A, ev[0].user_data); | |
| 167 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 168 | |
| 169 // Change the USER_DATA on an existing event | |
| 170 EXPECT_EQ(0, listener->Update(ID_EMITTER, KE_EXPECTED, USER_DATA_B)); | |
| 171 | |
| 172 // Return with one event signaled with the alternate USER DATA | |
| 173 memset(ev, 0, sizeof(ev)); | |
| 174 event_cnt = 100; | |
| 175 emitter->SetEventStatus(KE_EXPECTED | KE_FILTERED); | |
| 176 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, 0, &event_cnt)); | |
| 177 EXPECT_EQ(1, event_cnt); | |
| 178 EXPECT_EQ(USER_DATA_B, ev[0].user_data); | |
| 179 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 180 | |
| 181 // Reset the USER_DATA. | |
| 182 EXPECT_EQ(0, listener->Update(ID_EMITTER, KE_EXPECTED, USER_DATA_A)); | |
| 183 | |
| 184 // Support adding a DUP. | |
| 185 EXPECT_EQ(0, listener->Track(ID_EMITTER_DUP, | |
| 186 emitter, | |
| 187 KE_EXPECTED, | |
| 188 USER_DATA_A)); | |
| 189 EXPECT_EQ(2, emitter->NumEvents()); | |
| 190 | |
| 191 // Return unsignaled. | |
| 192 memset(ev, 0, sizeof(ev)); | |
| 193 emitter->SetEventStatus(KE_NONE); | |
| 194 event_cnt = 100; | |
| 195 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 196 EXPECT_EQ(0, event_cnt); | |
| 197 | |
| 198 // Return with two event signaled with expected data. | |
| 199 memset(ev, 0, sizeof(ev)); | |
| 200 emitter->SetEventStatus(KE_EXPECTED); | |
| 201 event_cnt = 100; | |
| 202 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_IMMEDIATE, &event_cnt)); | |
| 203 EXPECT_EQ(2, event_cnt); | |
| 204 EXPECT_EQ(USER_DATA_A, ev[0].user_data); | |
| 205 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 206 EXPECT_EQ(USER_DATA_A, ev[1].user_data); | |
| 207 EXPECT_EQ(KE_EXPECTED, ev[1].events); | |
| 208 } | |
| 209 | |
| 210 long Duration(struct timeval* start, struct timeval* end) { | |
| 211 if (start->tv_usec > end->tv_usec) { | |
| 212 end->tv_sec -= 1; | |
| 213 end->tv_usec += 1000000; | |
| 214 } | |
| 215 long cur_time = 1000 * (end->tv_sec - start->tv_sec); | |
| 216 cur_time += (end->tv_usec - start->tv_usec) / 1000; | |
| 217 return cur_time; | |
| 218 } | |
| 219 | |
| 220 | |
| 221 // Run a timed wait, and return the average of 8 iterations to reduce | |
| 222 // chance of false negative on outlier. | |
| 223 const int TRIES_TO_AVERAGE = 8; | |
| 224 bool TimedListen(ScopedEventListener& listen, | |
| 225 EventData* ev, | |
| 226 int ev_max, | |
| 227 int ev_expect, | |
| 228 int ms_wait, | |
| 229 long* duration) { | |
| 230 | |
| 231 struct timeval start; | |
| 232 struct timeval end; | |
| 233 long total_time = 0; | |
| 234 | |
| 235 for (int a=0; a < TRIES_TO_AVERAGE; a++) { | |
| 236 gettimeofday(&start, NULL); | |
| 237 | |
| 238 int signaled; | |
| 239 | |
| 240 EXPECT_EQ(0, listen->Wait(ev, ev_max, ms_wait, &signaled)); | |
| 241 EXPECT_EQ(signaled, ev_expect); | |
| 242 | |
| 243 if (signaled != ev_expect) { | |
| 244 return false; | |
| 245 } | |
| 246 | |
| 247 gettimeofday(&end, NULL); | |
| 248 | |
| 249 long cur_time = Duration(&start, &end); | |
| 250 total_time += cur_time; | |
| 251 } | |
| 252 | |
| 253 *duration = total_time / TRIES_TO_AVERAGE; | |
| 254 return true; | |
| 255 } | |
| 256 | |
| 257 | |
| 258 // NOTE: These timing tests are potentially flaky, the real test is | |
| 259 // for the zero timeout should be, has the ConditionVariable been waited on? | |
| 260 // Once we provide a debuggable SimpleCond and SimpleLock we can actually test | |
| 261 // the correct thing. | |
| 262 | |
| 263 // Normal scheduling would expect us to see ~10ms accuracy, but we'll | |
| 264 // use a much bigger number (yet smaller than the MAX_MS_TIMEOUT). | |
| 265 const int SCHEDULING_GRANULARITY = 100; | |
| 266 | |
| 267 const int EXPECT_ONE_EVENT = 1; | |
| 268 const int EXPECT_NO_EVENT = 0; | |
| 269 | |
| 270 TEST(EventTest, EmitterTimeout) { | |
| 271 ScopedRef<EventEmitterTester> emitter(new EventEmitterTester()); | |
| 272 ScopedEventListener listener(new EventListener()); | |
| 273 long duration; | |
| 274 | |
| 275 EventData ev[MAX_EVENTS]; | |
| 276 memset(ev, 0, sizeof(ev)); | |
| 277 EXPECT_EQ(0, listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A)); | |
| 278 | |
| 279 // Return immediately when emitter is signaled, with no timeout | |
| 280 emitter->SetEventStatus(KE_EXPECTED); | |
| 281 memset(ev, 0, sizeof(ev)); | |
| 282 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_ONE_EVENT, | |
| 283 TIMEOUT_IMMEDIATE, &duration)); | |
| 284 EXPECT_EQ(USER_DATA_A, ev[0].user_data); | |
| 285 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 286 EXPECT_EQ(0, duration); | |
| 287 | |
| 288 // Return immediately when emitter is signaled, even with timeout | |
| 289 emitter->SetEventStatus(KE_EXPECTED); | |
| 290 memset(ev, 0, sizeof(ev)); | |
| 291 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_ONE_EVENT, | |
| 292 TIMEOUT_LONG, &duration)); | |
| 293 EXPECT_EQ(USER_DATA_A, ev[0].user_data); | |
| 294 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 295 EXPECT_GT(SCHEDULING_GRANULARITY, duration); | |
| 296 | |
| 297 // Return immediately if Emiiter is already signaled when blocking forever. | |
| 298 emitter->SetEventStatus(KE_EXPECTED); | |
| 299 memset(ev, 0, sizeof(ev)); | |
| 300 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_ONE_EVENT, | |
| 301 TIMEOUT_NEVER, &duration)); | |
| 302 EXPECT_EQ(USER_DATA_A, ev[0].user_data); | |
| 303 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 304 EXPECT_GT(SCHEDULING_GRANULARITY, duration); | |
| 305 | |
| 306 // Return immediately if Emitter is no signaled when not blocking. | |
| 307 emitter->SetEventStatus(KE_NONE); | |
| 308 memset(ev, 0, sizeof(ev)); | |
| 309 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_NO_EVENT, | |
| 310 TIMEOUT_IMMEDIATE, &duration)); | |
| 311 EXPECT_EQ(0, duration); | |
| 312 | |
| 313 // Wait TIMEOUT_LONG if the emitter is not in a signaled state. | |
| 314 emitter->SetEventStatus(KE_NONE); | |
| 315 memset(ev, 0, sizeof(ev)); | |
| 316 EXPECT_TRUE(TimedListen(listener, ev, MAX_EVENTS, EXPECT_NO_EVENT, | |
| 317 TIMEOUT_LONG, &duration)); | |
| 318 EXPECT_LT(TIMEOUT_LONG - TIMEOUT_SLOP, duration); | |
| 319 EXPECT_GT(TIMEOUT_LONG + SCHEDULING_GRANULARITY, duration); | |
| 320 } | |
| 321 | |
| 322 struct SignalInfo { | |
| 323 EventEmitterTester* em; | |
| 324 unsigned int ms_wait; | |
| 325 uint32_t events; | |
| 326 }; | |
| 327 | |
| 328 static void *SignalEmitterThread(void *ptr) { | |
| 329 SignalInfo* info = (SignalInfo*) ptr; | |
| 330 struct timespec ts; | |
| 331 ts.tv_sec = 0; | |
| 332 ts.tv_nsec = info->ms_wait * 1000000; | |
| 333 | |
| 334 nanosleep(&ts, NULL); | |
| 335 | |
| 336 info->em->RaiseEvent(info->events); | |
| 337 return NULL; | |
| 338 } | |
| 339 | |
| 340 TEST(EventTest, EmitterSignalling) { | |
| 341 ScopedRef<EventEmitterTester> emitter(new EventEmitterTester()); | |
| 342 ScopedEventListener listener(new EventListener); | |
| 343 | |
| 344 SignalInfo siginfo; | |
| 345 struct timeval start; | |
| 346 struct timeval end; | |
| 347 long duration; | |
| 348 | |
| 349 EventData ev[MAX_EVENTS]; | |
| 350 memset(ev, 0, sizeof(ev)); | |
| 351 EXPECT_EQ(0, listener->Track(ID_EMITTER, emitter, KE_EXPECTED, USER_DATA_A)); | |
| 352 | |
| 353 // Setup another thread to wait 1/4 of the max time, and signal both | |
| 354 // an expected, and unexpected value. | |
| 355 siginfo.em = emitter.get(); | |
| 356 siginfo.ms_wait = TIMEOUT_SHORT; | |
| 357 siginfo.events = KE_EXPECTED | KE_FILTERED; | |
| 358 pthread_t tid; | |
| 359 pthread_create(&tid, NULL, SignalEmitterThread, &siginfo); | |
| 360 | |
| 361 // Wait for the signal from the other thread and time it. | |
| 362 gettimeofday(&start, NULL); | |
| 363 int cnt = 0; | |
| 364 EXPECT_EQ(0, listener->Wait(ev, MAX_EVENTS, TIMEOUT_VERY_LONG, &cnt)); | |
| 365 EXPECT_EQ(1, cnt); | |
| 366 gettimeofday(&end, NULL); | |
| 367 | |
| 368 // Verify the wait duration, and that we only recieved the expected signal. | |
| 369 duration = Duration(&start, &end); | |
| 370 EXPECT_GT(TIMEOUT_SHORT + SCHEDULING_GRANULARITY, duration); | |
| 371 EXPECT_LT(TIMEOUT_SHORT - TIMEOUT_SLOP, duration); | |
| 372 EXPECT_EQ(USER_DATA_A, ev[0].user_data); | |
| 373 EXPECT_EQ(KE_EXPECTED, ev[0].events); | |
| 374 } | |
| 375 | |
| 376 | |
| 377 namespace { | |
| 378 | |
| 379 class KernelProxyPolling : public KernelProxy { | |
| 380 public: | |
| 381 virtual int socket(int domain, int type, int protocol) { | |
| 382 ScopedMount mnt; | |
| 383 ScopedMountNode node(new EventEmitterTester()); | |
| 384 ScopedKernelHandle handle(new KernelHandle(mnt, node)); | |
| 385 | |
| 386 Error error = handle->Init(0); | |
| 387 if (error) { | |
| 388 errno = error; | |
| 389 return -1; | |
| 390 } | |
| 391 | |
| 392 return AllocateFD(handle); | |
| 393 } | |
| 394 }; | |
| 395 | |
| 396 class KernelProxyPollingTest : public ::testing::Test { | |
| 397 public: | |
| 398 KernelProxyPollingTest() : kp_(new KernelProxyPolling) { | |
| 399 ki_init(kp_); | |
| 400 } | |
| 401 | |
| 402 ~KernelProxyPollingTest() { | |
| 403 ki_uninit(); | |
| 404 delete kp_; | |
| 405 } | |
| 406 | |
| 407 KernelProxyPolling* kp_; | |
| 408 }; | |
| 409 | |
| 410 } // namespace | |
| 411 | |
| 412 | |
| 413 #define SOCKET_CNT 4 | |
| 414 void SetFDs(fd_set* set, int* fds) { | |
| 415 FD_ZERO(set); | |
| 416 | |
| 417 FD_SET(0, set); | |
| 418 FD_SET(1, set); | |
| 419 FD_SET(2, set); | |
| 420 | |
| 421 for (int index = 0; index < SOCKET_CNT; index++) | |
| 422 FD_SET(fds[index], set); | |
| 423 } | |
| 424 | |
| 425 TEST_F(KernelProxyPollingTest, Select) { | |
| 426 int fds[SOCKET_CNT]; | |
| 427 | |
| 428 fd_set rd_set; | |
| 429 fd_set wr_set; | |
| 430 | |
| 431 FD_ZERO(&rd_set); | |
| 432 FD_ZERO(&wr_set); | |
| 433 | |
| 434 FD_SET(0, &rd_set); | |
| 435 FD_SET(1, &rd_set); | |
| 436 FD_SET(2, &rd_set); | |
| 437 | |
| 438 FD_SET(0, &wr_set); | |
| 439 FD_SET(1, &wr_set); | |
| 440 FD_SET(2, &wr_set); | |
| 441 | |
| 442 // Expect normal files to select as read, write, and error | |
| 443 int cnt = select(4, &rd_set, &rd_set, &rd_set, NULL); | |
| 444 EXPECT_EQ(3 * 3, cnt); | |
| 445 EXPECT_NE(0, FD_ISSET(0, &rd_set)); | |
| 446 EXPECT_NE(0, FD_ISSET(1, &rd_set)); | |
| 447 EXPECT_NE(0, FD_ISSET(2, &rd_set)); | |
| 448 | |
| 449 for (int index = 0 ; index < SOCKET_CNT; index++) { | |
| 450 fds[index] = socket(0, 0, 0); | |
| 451 EXPECT_NE(-1, fds[index]); | |
| 452 } | |
| 453 | |
| 454 // Highest numbered fd | |
| 455 const int fdnum = fds[SOCKET_CNT - 1] + 1; | |
| 456 | |
| 457 // Expect only the normal files to select | |
| 458 SetFDs(&rd_set, fds); | |
| 459 cnt = select(fds[SOCKET_CNT-1] + 1, &rd_set, NULL, NULL, NULL); | |
| 460 EXPECT_EQ(3, cnt); | |
| 461 EXPECT_NE(0, FD_ISSET(0, &rd_set)); | |
| 462 EXPECT_NE(0, FD_ISSET(1, &rd_set)); | |
| 463 EXPECT_NE(0, FD_ISSET(2, &rd_set)); | |
| 464 for (int index = 0 ; index < SOCKET_CNT; index++) { | |
| 465 EXPECT_EQ(0, FD_ISSET(fds[index], &rd_set)); | |
| 466 } | |
| 467 | |
| 468 // Poke one of the pollable nodes to be READ ready | |
| 469 ioctl(fds[0], POLLIN, NULL); | |
| 470 | |
| 471 // Expect normal files to be read/write and one pollable node to be read. | |
| 472 SetFDs(&rd_set, fds); | |
| 473 SetFDs(&wr_set, fds); | |
| 474 cnt = select(fdnum, &rd_set, &wr_set, NULL, NULL); | |
| 475 EXPECT_EQ(7, cnt); | |
| 476 EXPECT_NE(0, FD_ISSET(fds[0], &rd_set)); | |
| 477 EXPECT_EQ(0, FD_ISSET(fds[0], &wr_set)); | |
| 478 } | |
| 479 | |
| 480 | |
| OLD | NEW |