| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu> | |
| 3 * All rights reserved. | |
| 4 * | |
| 5 * Redistribution and use in source and binary forms, with or without | |
| 6 * modification, are permitted provided that the following conditions | |
| 7 * are met: | |
| 8 * 1. Redistributions of source code must retain the above copyright | |
| 9 * notice, this list of conditions and the following disclaimer. | |
| 10 * 2. Redistributions in binary form must reproduce the above copyright | |
| 11 * notice, this list of conditions and the following disclaimer in the | |
| 12 * documentation and/or other materials provided with the distribution. | |
| 13 * 3. The name of the author may not be used to endorse or promote products | |
| 14 * derived from this software without specific prior written permission. | |
| 15 * | |
| 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |
| 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
| 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |
| 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | |
| 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |
| 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
| 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 26 */ | |
| 27 #ifdef HAVE_CONFIG_H | |
| 28 #include "config.h" | |
| 29 #endif | |
| 30 | |
| 31 #ifdef WIN32 | |
| 32 #define WIN32_LEAN_AND_MEAN | |
| 33 #include <windows.h> | |
| 34 #undef WIN32_LEAN_AND_MEAN | |
| 35 #endif | |
| 36 #include <sys/types.h> | |
| 37 #ifdef HAVE_SYS_TIME_H | |
| 38 #include <sys/time.h> | |
| 39 #else | |
| 40 #include <sys/_libevent_time.h> | |
| 41 #endif | |
| 42 #include <sys/queue.h> | |
| 43 #include <stdio.h> | |
| 44 #include <stdlib.h> | |
| 45 #ifndef WIN32 | |
| 46 #include <unistd.h> | |
| 47 #endif | |
| 48 #include <errno.h> | |
| 49 #include <signal.h> | |
| 50 #include <string.h> | |
| 51 #include <assert.h> | |
| 52 #include <time.h> | |
| 53 | |
| 54 #include "event.h" | |
| 55 #include "event-internal.h" | |
| 56 #include "evutil.h" | |
| 57 #include "log.h" | |
| 58 | |
| 59 #ifdef HAVE_EVENT_PORTS | |
| 60 extern const struct eventop evportops; | |
| 61 #endif | |
| 62 #ifdef HAVE_SELECT | |
| 63 extern const struct eventop selectops; | |
| 64 #endif | |
| 65 #ifdef HAVE_POLL | |
| 66 extern const struct eventop pollops; | |
| 67 #endif | |
| 68 #ifdef HAVE_EPOLL | |
| 69 extern const struct eventop epollops; | |
| 70 #endif | |
| 71 #ifdef HAVE_WORKING_KQUEUE | |
| 72 extern const struct eventop kqops; | |
| 73 #endif | |
| 74 #ifdef HAVE_DEVPOLL | |
| 75 extern const struct eventop devpollops; | |
| 76 #endif | |
| 77 #ifdef WIN32 | |
| 78 extern const struct eventop win32ops; | |
| 79 #endif | |
| 80 | |
| 81 /* In order of preference */ | |
| 82 static const struct eventop *eventops[] = { | |
| 83 #ifdef HAVE_EVENT_PORTS | |
| 84 &evportops, | |
| 85 #endif | |
| 86 #ifdef HAVE_WORKING_KQUEUE | |
| 87 &kqops, | |
| 88 #endif | |
| 89 #ifdef HAVE_EPOLL | |
| 90 &epollops, | |
| 91 #endif | |
| 92 #ifdef HAVE_DEVPOLL | |
| 93 &devpollops, | |
| 94 #endif | |
| 95 #ifdef HAVE_POLL | |
| 96 &pollops, | |
| 97 #endif | |
| 98 #ifdef HAVE_SELECT | |
| 99 &selectops, | |
| 100 #endif | |
| 101 #ifdef WIN32 | |
| 102 &win32ops, | |
| 103 #endif | |
| 104 NULL | |
| 105 }; | |
| 106 | |
| 107 /* Global state */ | |
| 108 struct event_base *current_base = NULL; | |
| 109 extern struct event_base *evsignal_base; | |
| 110 static int use_monotonic = 1; | |
| 111 | |
| 112 /* Handle signals - This is a deprecated interface */ | |
| 113 int (*event_sigcb)(void); /* Signal callback when gotsig is set */ | |
| 114 volatile sig_atomic_t event_gotsig; /* Set in signal handler */ | |
| 115 | |
| 116 /* Prototypes */ | |
| 117 static void event_queue_insert(struct event_base *, struct event *, int); | |
| 118 static void event_queue_remove(struct event_base *, struct event *, int); | |
| 119 static int event_haveevents(struct event_base *); | |
| 120 | |
| 121 static void event_process_active(struct event_base *); | |
| 122 | |
| 123 static int timeout_next(struct event_base *, struct timeval **); | |
| 124 static void timeout_process(struct event_base *); | |
| 125 static void timeout_correct(struct event_base *, struct timeval *); | |
| 126 | |
| 127 static int | |
| 128 gettime(struct event_base *base, struct timeval *tp) | |
| 129 { | |
| 130 if (base->tv_cache.tv_sec) { | |
| 131 *tp = base->tv_cache; | |
| 132 return (0); | |
| 133 } | |
| 134 | |
| 135 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) | |
| 136 struct timespec ts; | |
| 137 | |
| 138 if (use_monotonic && | |
| 139 clock_gettime(CLOCK_MONOTONIC, &ts) == 0) { | |
| 140 tp->tv_sec = ts.tv_sec; | |
| 141 tp->tv_usec = ts.tv_nsec / 1000; | |
| 142 return (0); | |
| 143 } | |
| 144 #endif | |
| 145 | |
| 146 use_monotonic = 0; | |
| 147 | |
| 148 return (evutil_gettimeofday(tp, NULL)); | |
| 149 } | |
| 150 | |
| 151 struct event_base * | |
| 152 event_init(void) | |
| 153 { | |
| 154 struct event_base *base = event_base_new(); | |
| 155 | |
| 156 if (base != NULL) | |
| 157 current_base = base; | |
| 158 | |
| 159 return (base); | |
| 160 } | |
| 161 | |
| 162 struct event_base * | |
| 163 event_base_new(void) | |
| 164 { | |
| 165 int i; | |
| 166 struct event_base *base; | |
| 167 | |
| 168 if ((base = calloc(1, sizeof(struct event_base))) == NULL) | |
| 169 event_err(1, "%s: calloc", __func__); | |
| 170 | |
| 171 event_sigcb = NULL; | |
| 172 event_gotsig = 0; | |
| 173 | |
| 174 gettime(base, &base->event_tv); | |
| 175 | |
| 176 min_heap_ctor(&base->timeheap); | |
| 177 TAILQ_INIT(&base->eventqueue); | |
| 178 base->sig.ev_signal_pair[0] = -1; | |
| 179 base->sig.ev_signal_pair[1] = -1; | |
| 180 | |
| 181 base->evbase = NULL; | |
| 182 for (i = 0; eventops[i] && !base->evbase; i++) { | |
| 183 base->evsel = eventops[i]; | |
| 184 | |
| 185 base->evbase = base->evsel->init(base); | |
| 186 } | |
| 187 | |
| 188 if (base->evbase == NULL) | |
| 189 event_errx(1, "%s: no event mechanism available", __func__); | |
| 190 | |
| 191 if (evutil_getenv("EVENT_SHOW_METHOD")) | |
| 192 event_msgx("libevent using: %s\n", | |
| 193 base->evsel->name); | |
| 194 | |
| 195 /* allocate a single active event queue */ | |
| 196 event_base_priority_init(base, 1); | |
| 197 | |
| 198 return (base); | |
| 199 } | |
| 200 | |
| 201 void | |
| 202 event_base_free(struct event_base *base) | |
| 203 { | |
| 204 int i, n_deleted=0; | |
| 205 struct event *ev; | |
| 206 | |
| 207 if (base == NULL && current_base) | |
| 208 base = current_base; | |
| 209 if (base == current_base) | |
| 210 current_base = NULL; | |
| 211 | |
| 212 /* XXX(niels) - check for internal events first */ | |
| 213 assert(base); | |
| 214 /* Delete all non-internal events. */ | |
| 215 for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) { | |
| 216 struct event *next = TAILQ_NEXT(ev, ev_next); | |
| 217 if (!(ev->ev_flags & EVLIST_INTERNAL)) { | |
| 218 event_del(ev); | |
| 219 ++n_deleted; | |
| 220 } | |
| 221 ev = next; | |
| 222 } | |
| 223 while ((ev = min_heap_top(&base->timeheap)) != NULL) { | |
| 224 event_del(ev); | |
| 225 ++n_deleted; | |
| 226 } | |
| 227 | |
| 228 for (i = 0; i < base->nactivequeues; ++i) { | |
| 229 for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) { | |
| 230 struct event *next = TAILQ_NEXT(ev, ev_active_next); | |
| 231 if (!(ev->ev_flags & EVLIST_INTERNAL)) { | |
| 232 event_del(ev); | |
| 233 ++n_deleted; | |
| 234 } | |
| 235 ev = next; | |
| 236 } | |
| 237 } | |
| 238 | |
| 239 if (n_deleted) | |
| 240 event_debug(("%s: %d events were still set in base", | |
| 241 __func__, n_deleted)); | |
| 242 | |
| 243 if (base->evsel->dealloc != NULL) | |
| 244 base->evsel->dealloc(base, base->evbase); | |
| 245 | |
| 246 for (i = 0; i < base->nactivequeues; ++i) | |
| 247 assert(TAILQ_EMPTY(base->activequeues[i])); | |
| 248 | |
| 249 assert(min_heap_empty(&base->timeheap)); | |
| 250 min_heap_dtor(&base->timeheap); | |
| 251 | |
| 252 for (i = 0; i < base->nactivequeues; ++i) | |
| 253 free(base->activequeues[i]); | |
| 254 free(base->activequeues); | |
| 255 | |
| 256 assert(TAILQ_EMPTY(&base->eventqueue)); | |
| 257 | |
| 258 free(base); | |
| 259 } | |
| 260 | |
| 261 /* reinitialized the event base after a fork */ | |
| 262 int | |
| 263 event_reinit(struct event_base *base) | |
| 264 { | |
| 265 const struct eventop *evsel = base->evsel; | |
| 266 void *evbase = base->evbase; | |
| 267 int res = 0; | |
| 268 struct event *ev; | |
| 269 | |
| 270 #if 0 | |
| 271 /* Right now, reinit always takes effect, since even if the | |
| 272 backend doesn't require it, the signal socketpair code does. | |
| 273 */ | |
| 274 /* check if this event mechanism requires reinit */ | |
| 275 if (!evsel->need_reinit) | |
| 276 return (0); | |
| 277 #endif | |
| 278 | |
| 279 /* prevent internal delete */ | |
| 280 if (base->sig.ev_signal_added) { | |
| 281 /* we cannot call event_del here because the base has | |
| 282 * not been reinitialized yet. */ | |
| 283 event_queue_remove(base, &base->sig.ev_signal, | |
| 284 EVLIST_INSERTED); | |
| 285 if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE) | |
| 286 event_queue_remove(base, &base->sig.ev_signal, | |
| 287 EVLIST_ACTIVE); | |
| 288 base->sig.ev_signal_added = 0; | |
| 289 } | |
| 290 | |
| 291 if (base->evsel->dealloc != NULL) | |
| 292 base->evsel->dealloc(base, base->evbase); | |
| 293 evbase = base->evbase = evsel->init(base); | |
| 294 if (base->evbase == NULL) | |
| 295 event_errx(1, "%s: could not reinitialize event mechanism", | |
| 296 __func__); | |
| 297 | |
| 298 TAILQ_FOREACH(ev, &base->eventqueue, ev_next) { | |
| 299 if (evsel->add(evbase, ev) == -1) | |
| 300 res = -1; | |
| 301 } | |
| 302 | |
| 303 return (res); | |
| 304 } | |
| 305 | |
| 306 int | |
| 307 event_priority_init(int npriorities) | |
| 308 { | |
| 309 return event_base_priority_init(current_base, npriorities); | |
| 310 } | |
| 311 | |
| 312 int | |
| 313 event_base_priority_init(struct event_base *base, int npriorities) | |
| 314 { | |
| 315 int i; | |
| 316 | |
| 317 if (base->event_count_active) | |
| 318 return (-1); | |
| 319 | |
| 320 if (npriorities == base->nactivequeues) | |
| 321 return (0); | |
| 322 | |
| 323 if (base->nactivequeues) { | |
| 324 for (i = 0; i < base->nactivequeues; ++i) { | |
| 325 free(base->activequeues[i]); | |
| 326 } | |
| 327 free(base->activequeues); | |
| 328 } | |
| 329 | |
| 330 /* Allocate our priority queues */ | |
| 331 base->nactivequeues = npriorities; | |
| 332 base->activequeues = (struct event_list **) | |
| 333 calloc(base->nactivequeues, sizeof(struct event_list *)); | |
| 334 if (base->activequeues == NULL) | |
| 335 event_err(1, "%s: calloc", __func__); | |
| 336 | |
| 337 for (i = 0; i < base->nactivequeues; ++i) { | |
| 338 base->activequeues[i] = malloc(sizeof(struct event_list)); | |
| 339 if (base->activequeues[i] == NULL) | |
| 340 event_err(1, "%s: malloc", __func__); | |
| 341 TAILQ_INIT(base->activequeues[i]); | |
| 342 } | |
| 343 | |
| 344 return (0); | |
| 345 } | |
| 346 | |
| 347 int | |
| 348 event_haveevents(struct event_base *base) | |
| 349 { | |
| 350 return (base->event_count > 0); | |
| 351 } | |
| 352 | |
| 353 /* | |
| 354 * Active events are stored in priority queues. Lower priorities are always | |
| 355 * process before higher priorities. Low priority events can starve high | |
| 356 * priority ones. | |
| 357 */ | |
| 358 | |
| 359 static void | |
| 360 event_process_active(struct event_base *base) | |
| 361 { | |
| 362 struct event *ev; | |
| 363 struct event_list *activeq = NULL; | |
| 364 int i; | |
| 365 short ncalls; | |
| 366 | |
| 367 for (i = 0; i < base->nactivequeues; ++i) { | |
| 368 if (TAILQ_FIRST(base->activequeues[i]) != NULL) { | |
| 369 activeq = base->activequeues[i]; | |
| 370 break; | |
| 371 } | |
| 372 } | |
| 373 | |
| 374 assert(activeq != NULL); | |
| 375 | |
| 376 for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) { | |
| 377 if (ev->ev_events & EV_PERSIST) | |
| 378 event_queue_remove(base, ev, EVLIST_ACTIVE); | |
| 379 else | |
| 380 event_del(ev); | |
| 381 | |
| 382 /* Allows deletes to work */ | |
| 383 ncalls = ev->ev_ncalls; | |
| 384 ev->ev_pncalls = &ncalls; | |
| 385 while (ncalls) { | |
| 386 ncalls--; | |
| 387 ev->ev_ncalls = ncalls; | |
| 388 (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_ar
g); | |
| 389 if (event_gotsig || base->event_break) | |
| 390 return; | |
| 391 } | |
| 392 } | |
| 393 } | |
| 394 | |
| 395 /* | |
| 396 * Wait continously for events. We exit only if no events are left. | |
| 397 */ | |
| 398 | |
| 399 int | |
| 400 event_dispatch(void) | |
| 401 { | |
| 402 return (event_loop(0)); | |
| 403 } | |
| 404 | |
| 405 int | |
| 406 event_base_dispatch(struct event_base *event_base) | |
| 407 { | |
| 408 return (event_base_loop(event_base, 0)); | |
| 409 } | |
| 410 | |
| 411 const char * | |
| 412 event_base_get_method(struct event_base *base) | |
| 413 { | |
| 414 assert(base); | |
| 415 return (base->evsel->name); | |
| 416 } | |
| 417 | |
| 418 static void | |
| 419 event_loopexit_cb(int fd, short what, void *arg) | |
| 420 { | |
| 421 struct event_base *base = arg; | |
| 422 base->event_gotterm = 1; | |
| 423 } | |
| 424 | |
| 425 /* not thread safe */ | |
| 426 int | |
| 427 event_loopexit(const struct timeval *tv) | |
| 428 { | |
| 429 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb, | |
| 430 current_base, tv)); | |
| 431 } | |
| 432 | |
| 433 int | |
| 434 event_base_loopexit(struct event_base *event_base, const struct timeval *tv) | |
| 435 { | |
| 436 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb, | |
| 437 event_base, tv)); | |
| 438 } | |
| 439 | |
| 440 /* not thread safe */ | |
| 441 int | |
| 442 event_loopbreak(void) | |
| 443 { | |
| 444 return (event_base_loopbreak(current_base)); | |
| 445 } | |
| 446 | |
| 447 int | |
| 448 event_base_loopbreak(struct event_base *event_base) | |
| 449 { | |
| 450 if (event_base == NULL) | |
| 451 return (-1); | |
| 452 | |
| 453 event_base->event_break = 1; | |
| 454 return (0); | |
| 455 } | |
| 456 | |
| 457 | |
| 458 | |
| 459 /* not thread safe */ | |
| 460 | |
| 461 int | |
| 462 event_loop(int flags) | |
| 463 { | |
| 464 return event_base_loop(current_base, flags); | |
| 465 } | |
| 466 | |
| 467 int | |
| 468 event_base_loop(struct event_base *base, int flags) | |
| 469 { | |
| 470 const struct eventop *evsel = base->evsel; | |
| 471 void *evbase = base->evbase; | |
| 472 struct timeval tv; | |
| 473 struct timeval *tv_p; | |
| 474 int res, done; | |
| 475 | |
| 476 /* clear time cache */ | |
| 477 base->tv_cache.tv_sec = 0; | |
| 478 | |
| 479 if (base->sig.ev_signal_added) | |
| 480 evsignal_base = base; | |
| 481 done = 0; | |
| 482 while (!done) { | |
| 483 /* Terminate the loop if we have been asked to */ | |
| 484 if (base->event_gotterm) { | |
| 485 base->event_gotterm = 0; | |
| 486 break; | |
| 487 } | |
| 488 | |
| 489 if (base->event_break) { | |
| 490 base->event_break = 0; | |
| 491 break; | |
| 492 } | |
| 493 | |
| 494 /* You cannot use this interface for multi-threaded apps */ | |
| 495 while (event_gotsig) { | |
| 496 event_gotsig = 0; | |
| 497 if (event_sigcb) { | |
| 498 res = (*event_sigcb)(); | |
| 499 if (res == -1) { | |
| 500 errno = EINTR; | |
| 501 return (-1); | |
| 502 } | |
| 503 } | |
| 504 } | |
| 505 | |
| 506 timeout_correct(base, &tv); | |
| 507 | |
| 508 tv_p = &tv; | |
| 509 if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) { | |
| 510 timeout_next(base, &tv_p); | |
| 511 } else { | |
| 512 /* | |
| 513 * if we have active events, we just poll new events | |
| 514 * without waiting. | |
| 515 */ | |
| 516 evutil_timerclear(&tv); | |
| 517 } | |
| 518 | |
| 519 /* If we have no events, we just exit */ | |
| 520 if (!event_haveevents(base)) { | |
| 521 event_debug(("%s: no events registered.", __func__)); | |
| 522 return (1); | |
| 523 } | |
| 524 | |
| 525 /* update last old time */ | |
| 526 gettime(base, &base->event_tv); | |
| 527 | |
| 528 /* clear time cache */ | |
| 529 base->tv_cache.tv_sec = 0; | |
| 530 | |
| 531 res = evsel->dispatch(base, evbase, tv_p); | |
| 532 | |
| 533 if (res == -1) | |
| 534 return (-1); | |
| 535 gettime(base, &base->tv_cache); | |
| 536 | |
| 537 timeout_process(base); | |
| 538 | |
| 539 if (base->event_count_active) { | |
| 540 event_process_active(base); | |
| 541 if (!base->event_count_active && (flags & EVLOOP_ONCE)) | |
| 542 done = 1; | |
| 543 } else if (flags & EVLOOP_NONBLOCK) | |
| 544 done = 1; | |
| 545 } | |
| 546 | |
| 547 /* clear time cache */ | |
| 548 base->tv_cache.tv_sec = 0; | |
| 549 | |
| 550 event_debug(("%s: asked to terminate loop.", __func__)); | |
| 551 return (0); | |
| 552 } | |
| 553 | |
| 554 /* Sets up an event for processing once */ | |
| 555 | |
| 556 struct event_once { | |
| 557 struct event ev; | |
| 558 | |
| 559 void (*cb)(int, short, void *); | |
| 560 void *arg; | |
| 561 }; | |
| 562 | |
| 563 /* One-time callback, it deletes itself */ | |
| 564 | |
| 565 static void | |
| 566 event_once_cb(int fd, short events, void *arg) | |
| 567 { | |
| 568 struct event_once *eonce = arg; | |
| 569 | |
| 570 (*eonce->cb)(fd, events, eonce->arg); | |
| 571 free(eonce); | |
| 572 } | |
| 573 | |
| 574 /* not threadsafe, event scheduled once. */ | |
| 575 int | |
| 576 event_once(int fd, short events, | |
| 577 void (*callback)(int, short, void *), void *arg, const struct timeval *tv) | |
| 578 { | |
| 579 return event_base_once(current_base, fd, events, callback, arg, tv); | |
| 580 } | |
| 581 | |
| 582 /* Schedules an event once */ | |
| 583 int | |
| 584 event_base_once(struct event_base *base, int fd, short events, | |
| 585 void (*callback)(int, short, void *), void *arg, const struct timeval *tv) | |
| 586 { | |
| 587 struct event_once *eonce; | |
| 588 struct timeval etv; | |
| 589 int res; | |
| 590 | |
| 591 /* We cannot support signals that just fire once */ | |
| 592 if (events & EV_SIGNAL) | |
| 593 return (-1); | |
| 594 | |
| 595 if ((eonce = calloc(1, sizeof(struct event_once))) == NULL) | |
| 596 return (-1); | |
| 597 | |
| 598 eonce->cb = callback; | |
| 599 eonce->arg = arg; | |
| 600 | |
| 601 if (events == EV_TIMEOUT) { | |
| 602 if (tv == NULL) { | |
| 603 evutil_timerclear(&etv); | |
| 604 tv = &etv; | |
| 605 } | |
| 606 | |
| 607 evtimer_set(&eonce->ev, event_once_cb, eonce); | |
| 608 } else if (events & (EV_READ|EV_WRITE)) { | |
| 609 events &= EV_READ|EV_WRITE; | |
| 610 | |
| 611 event_set(&eonce->ev, fd, events, event_once_cb, eonce); | |
| 612 } else { | |
| 613 /* Bad event combination */ | |
| 614 free(eonce); | |
| 615 return (-1); | |
| 616 } | |
| 617 | |
| 618 res = event_base_set(base, &eonce->ev); | |
| 619 if (res == 0) | |
| 620 res = event_add(&eonce->ev, tv); | |
| 621 if (res != 0) { | |
| 622 free(eonce); | |
| 623 return (res); | |
| 624 } | |
| 625 | |
| 626 return (0); | |
| 627 } | |
| 628 | |
| 629 void | |
| 630 event_set(struct event *ev, int fd, short events, | |
| 631 void (*callback)(int, short, void *), void *arg) | |
| 632 { | |
| 633 /* Take the current base - caller needs to set the real base later */ | |
| 634 ev->ev_base = current_base; | |
| 635 | |
| 636 ev->ev_callback = callback; | |
| 637 ev->ev_arg = arg; | |
| 638 ev->ev_fd = fd; | |
| 639 ev->ev_events = events; | |
| 640 ev->ev_res = 0; | |
| 641 ev->ev_flags = EVLIST_INIT; | |
| 642 ev->ev_ncalls = 0; | |
| 643 ev->ev_pncalls = NULL; | |
| 644 | |
| 645 min_heap_elem_init(ev); | |
| 646 | |
| 647 /* by default, we put new events into the middle priority */ | |
| 648 if(current_base) | |
| 649 ev->ev_pri = current_base->nactivequeues/2; | |
| 650 } | |
| 651 | |
| 652 int | |
| 653 event_base_set(struct event_base *base, struct event *ev) | |
| 654 { | |
| 655 /* Only innocent events may be assigned to a different base */ | |
| 656 if (ev->ev_flags != EVLIST_INIT) | |
| 657 return (-1); | |
| 658 | |
| 659 ev->ev_base = base; | |
| 660 ev->ev_pri = base->nactivequeues/2; | |
| 661 | |
| 662 return (0); | |
| 663 } | |
| 664 | |
| 665 /* | |
| 666 * Set's the priority of an event - if an event is already scheduled | |
| 667 * changing the priority is going to fail. | |
| 668 */ | |
| 669 | |
| 670 int | |
| 671 event_priority_set(struct event *ev, int pri) | |
| 672 { | |
| 673 if (ev->ev_flags & EVLIST_ACTIVE) | |
| 674 return (-1); | |
| 675 if (pri < 0 || pri >= ev->ev_base->nactivequeues) | |
| 676 return (-1); | |
| 677 | |
| 678 ev->ev_pri = pri; | |
| 679 | |
| 680 return (0); | |
| 681 } | |
| 682 | |
| 683 /* | |
| 684 * Checks if a specific event is pending or scheduled. | |
| 685 */ | |
| 686 | |
| 687 int | |
| 688 event_pending(struct event *ev, short event, struct timeval *tv) | |
| 689 { | |
| 690 struct timeval now, res; | |
| 691 int flags = 0; | |
| 692 | |
| 693 if (ev->ev_flags & EVLIST_INSERTED) | |
| 694 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)); | |
| 695 if (ev->ev_flags & EVLIST_ACTIVE) | |
| 696 flags |= ev->ev_res; | |
| 697 if (ev->ev_flags & EVLIST_TIMEOUT) | |
| 698 flags |= EV_TIMEOUT; | |
| 699 | |
| 700 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL); | |
| 701 | |
| 702 /* See if there is a timeout that we should report */ | |
| 703 if (tv != NULL && (flags & event & EV_TIMEOUT)) { | |
| 704 gettime(ev->ev_base, &now); | |
| 705 evutil_timersub(&ev->ev_timeout, &now, &res); | |
| 706 /* correctly remap to real time */ | |
| 707 evutil_gettimeofday(&now, NULL); | |
| 708 evutil_timeradd(&now, &res, tv); | |
| 709 } | |
| 710 | |
| 711 return (flags & event); | |
| 712 } | |
| 713 | |
| 714 int | |
| 715 event_add(struct event *ev, const struct timeval *tv) | |
| 716 { | |
| 717 struct event_base *base = ev->ev_base; | |
| 718 const struct eventop *evsel = base->evsel; | |
| 719 void *evbase = base->evbase; | |
| 720 int res = 0; | |
| 721 | |
| 722 event_debug(( | |
| 723 "event_add: event: %p, %s%s%scall %p", | |
| 724 ev, | |
| 725 ev->ev_events & EV_READ ? "EV_READ " : " ", | |
| 726 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ", | |
| 727 tv ? "EV_TIMEOUT " : " ", | |
| 728 ev->ev_callback)); | |
| 729 | |
| 730 assert(!(ev->ev_flags & ~EVLIST_ALL)); | |
| 731 | |
| 732 /* | |
| 733 * prepare for timeout insertion further below, if we get a | |
| 734 * failure on any step, we should not change any state. | |
| 735 */ | |
| 736 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) { | |
| 737 if (min_heap_reserve(&base->timeheap, | |
| 738 1 + min_heap_size(&base->timeheap)) == -1) | |
| 739 return (-1); /* ENOMEM == errno */ | |
| 740 } | |
| 741 | |
| 742 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) && | |
| 743 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) { | |
| 744 res = evsel->add(evbase, ev); | |
| 745 if (res != -1) | |
| 746 event_queue_insert(base, ev, EVLIST_INSERTED); | |
| 747 } | |
| 748 | |
| 749 /* | |
| 750 * we should change the timout state only if the previous event | |
| 751 * addition succeeded. | |
| 752 */ | |
| 753 if (res != -1 && tv != NULL) { | |
| 754 struct timeval now; | |
| 755 | |
| 756 /* | |
| 757 * we already reserved memory above for the case where we | |
| 758 * are not replacing an exisiting timeout. | |
| 759 */ | |
| 760 if (ev->ev_flags & EVLIST_TIMEOUT) | |
| 761 event_queue_remove(base, ev, EVLIST_TIMEOUT); | |
| 762 | |
| 763 /* Check if it is active due to a timeout. Rescheduling | |
| 764 * this timeout before the callback can be executed | |
| 765 * removes it from the active list. */ | |
| 766 if ((ev->ev_flags & EVLIST_ACTIVE) && | |
| 767 (ev->ev_res & EV_TIMEOUT)) { | |
| 768 /* See if we are just active executing this | |
| 769 * event in a loop | |
| 770 */ | |
| 771 if (ev->ev_ncalls && ev->ev_pncalls) { | |
| 772 /* Abort loop */ | |
| 773 *ev->ev_pncalls = 0; | |
| 774 } | |
| 775 | |
| 776 event_queue_remove(base, ev, EVLIST_ACTIVE); | |
| 777 } | |
| 778 | |
| 779 gettime(base, &now); | |
| 780 evutil_timeradd(&now, tv, &ev->ev_timeout); | |
| 781 | |
| 782 event_debug(( | |
| 783 "event_add: timeout in %ld seconds, call %p", | |
| 784 tv->tv_sec, ev->ev_callback)); | |
| 785 | |
| 786 event_queue_insert(base, ev, EVLIST_TIMEOUT); | |
| 787 } | |
| 788 | |
| 789 return (res); | |
| 790 } | |
| 791 | |
| 792 int | |
| 793 event_del(struct event *ev) | |
| 794 { | |
| 795 struct event_base *base; | |
| 796 | |
| 797 event_debug(("event_del: %p, callback %p", | |
| 798 ev, ev->ev_callback)); | |
| 799 | |
| 800 /* An event without a base has not been added */ | |
| 801 if (ev->ev_base == NULL) | |
| 802 return (-1); | |
| 803 | |
| 804 base = ev->ev_base; | |
| 805 | |
| 806 assert(!(ev->ev_flags & ~EVLIST_ALL)); | |
| 807 | |
| 808 /* See if we are just active executing this event in a loop */ | |
| 809 if (ev->ev_ncalls && ev->ev_pncalls) { | |
| 810 /* Abort loop */ | |
| 811 *ev->ev_pncalls = 0; | |
| 812 } | |
| 813 | |
| 814 if (ev->ev_flags & EVLIST_TIMEOUT) | |
| 815 event_queue_remove(base, ev, EVLIST_TIMEOUT); | |
| 816 | |
| 817 if (ev->ev_flags & EVLIST_ACTIVE) | |
| 818 event_queue_remove(base, ev, EVLIST_ACTIVE); | |
| 819 | |
| 820 if (ev->ev_flags & EVLIST_INSERTED) { | |
| 821 event_queue_remove(base, ev, EVLIST_INSERTED); | |
| 822 return (base->evsel->del(base->evbase, ev)); | |
| 823 } | |
| 824 | |
| 825 return (0); | |
| 826 } | |
| 827 | |
| 828 void | |
| 829 event_active(struct event *ev, int res, short ncalls) | |
| 830 { | |
| 831 /* We get different kinds of events, add them together */ | |
| 832 if (ev->ev_flags & EVLIST_ACTIVE) { | |
| 833 ev->ev_res |= res; | |
| 834 return; | |
| 835 } | |
| 836 | |
| 837 ev->ev_res = res; | |
| 838 ev->ev_ncalls = ncalls; | |
| 839 ev->ev_pncalls = NULL; | |
| 840 event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE); | |
| 841 } | |
| 842 | |
| 843 static int | |
| 844 timeout_next(struct event_base *base, struct timeval **tv_p) | |
| 845 { | |
| 846 struct timeval now; | |
| 847 struct event *ev; | |
| 848 struct timeval *tv = *tv_p; | |
| 849 | |
| 850 if ((ev = min_heap_top(&base->timeheap)) == NULL) { | |
| 851 /* if no time-based events are active wait for I/O */ | |
| 852 *tv_p = NULL; | |
| 853 return (0); | |
| 854 } | |
| 855 | |
| 856 if (gettime(base, &now) == -1) | |
| 857 return (-1); | |
| 858 | |
| 859 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) { | |
| 860 evutil_timerclear(tv); | |
| 861 return (0); | |
| 862 } | |
| 863 | |
| 864 evutil_timersub(&ev->ev_timeout, &now, tv); | |
| 865 | |
| 866 assert(tv->tv_sec >= 0); | |
| 867 assert(tv->tv_usec >= 0); | |
| 868 | |
| 869 event_debug(("timeout_next: in %ld seconds", tv->tv_sec)); | |
| 870 return (0); | |
| 871 } | |
| 872 | |
| 873 /* | |
| 874 * Determines if the time is running backwards by comparing the current | |
| 875 * time against the last time we checked. Not needed when using clock | |
| 876 * monotonic. | |
| 877 */ | |
| 878 | |
| 879 static void | |
| 880 timeout_correct(struct event_base *base, struct timeval *tv) | |
| 881 { | |
| 882 struct event **pev; | |
| 883 unsigned int size; | |
| 884 struct timeval off; | |
| 885 | |
| 886 if (use_monotonic) | |
| 887 return; | |
| 888 | |
| 889 /* Check if time is running backwards */ | |
| 890 gettime(base, tv); | |
| 891 if (evutil_timercmp(tv, &base->event_tv, >=)) { | |
| 892 base->event_tv = *tv; | |
| 893 return; | |
| 894 } | |
| 895 | |
| 896 event_debug(("%s: time is running backwards, corrected", | |
| 897 __func__)); | |
| 898 evutil_timersub(&base->event_tv, tv, &off); | |
| 899 | |
| 900 /* | |
| 901 * We can modify the key element of the node without destroying | |
| 902 * the key, beause we apply it to all in the right order. | |
| 903 */ | |
| 904 pev = base->timeheap.p; | |
| 905 size = base->timeheap.n; | |
| 906 for (; size-- > 0; ++pev) { | |
| 907 struct timeval *ev_tv = &(**pev).ev_timeout; | |
| 908 evutil_timersub(ev_tv, &off, ev_tv); | |
| 909 } | |
| 910 /* Now remember what the new time turned out to be. */ | |
| 911 base->event_tv = *tv; | |
| 912 } | |
| 913 | |
| 914 void | |
| 915 timeout_process(struct event_base *base) | |
| 916 { | |
| 917 struct timeval now; | |
| 918 struct event *ev; | |
| 919 | |
| 920 if (min_heap_empty(&base->timeheap)) | |
| 921 return; | |
| 922 | |
| 923 gettime(base, &now); | |
| 924 | |
| 925 while ((ev = min_heap_top(&base->timeheap))) { | |
| 926 if (evutil_timercmp(&ev->ev_timeout, &now, >)) | |
| 927 break; | |
| 928 | |
| 929 /* delete this event from the I/O queues */ | |
| 930 event_del(ev); | |
| 931 | |
| 932 event_debug(("timeout_process: call %p", | |
| 933 ev->ev_callback)); | |
| 934 event_active(ev, EV_TIMEOUT, 1); | |
| 935 } | |
| 936 } | |
| 937 | |
| 938 void | |
| 939 event_queue_remove(struct event_base *base, struct event *ev, int queue) | |
| 940 { | |
| 941 if (!(ev->ev_flags & queue)) | |
| 942 event_errx(1, "%s: %p(fd %d) not on queue %x", __func__, | |
| 943 ev, ev->ev_fd, queue); | |
| 944 | |
| 945 if (~ev->ev_flags & EVLIST_INTERNAL) | |
| 946 base->event_count--; | |
| 947 | |
| 948 ev->ev_flags &= ~queue; | |
| 949 switch (queue) { | |
| 950 case EVLIST_INSERTED: | |
| 951 TAILQ_REMOVE(&base->eventqueue, ev, ev_next); | |
| 952 break; | |
| 953 case EVLIST_ACTIVE: | |
| 954 base->event_count_active--; | |
| 955 TAILQ_REMOVE(base->activequeues[ev->ev_pri], | |
| 956 ev, ev_active_next); | |
| 957 break; | |
| 958 case EVLIST_TIMEOUT: | |
| 959 min_heap_erase(&base->timeheap, ev); | |
| 960 break; | |
| 961 default: | |
| 962 event_errx(1, "%s: unknown queue %x", __func__, queue); | |
| 963 } | |
| 964 } | |
| 965 | |
| 966 void | |
| 967 event_queue_insert(struct event_base *base, struct event *ev, int queue) | |
| 968 { | |
| 969 if (ev->ev_flags & queue) { | |
| 970 /* Double insertion is possible for active events */ | |
| 971 if (queue & EVLIST_ACTIVE) | |
| 972 return; | |
| 973 | |
| 974 event_errx(1, "%s: %p(fd %d) already on queue %x", __func__, | |
| 975 ev, ev->ev_fd, queue); | |
| 976 } | |
| 977 | |
| 978 if (~ev->ev_flags & EVLIST_INTERNAL) | |
| 979 base->event_count++; | |
| 980 | |
| 981 ev->ev_flags |= queue; | |
| 982 switch (queue) { | |
| 983 case EVLIST_INSERTED: | |
| 984 TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next); | |
| 985 break; | |
| 986 case EVLIST_ACTIVE: | |
| 987 base->event_count_active++; | |
| 988 TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri], | |
| 989 ev,ev_active_next); | |
| 990 break; | |
| 991 case EVLIST_TIMEOUT: { | |
| 992 min_heap_push(&base->timeheap, ev); | |
| 993 break; | |
| 994 } | |
| 995 default: | |
| 996 event_errx(1, "%s: unknown queue %x", __func__, queue); | |
| 997 } | |
| 998 } | |
| 999 | |
| 1000 /* Functions for debugging */ | |
| 1001 | |
| 1002 const char * | |
| 1003 event_get_version(void) | |
| 1004 { | |
| 1005 return (VERSION); | |
| 1006 } | |
| 1007 | |
| 1008 /* | |
| 1009 * No thread-safe interface needed - the information should be the same | |
| 1010 * for all threads. | |
| 1011 */ | |
| 1012 | |
| 1013 const char * | |
| 1014 event_get_method(void) | |
| 1015 { | |
| 1016 return (current_base->evsel->name); | |
| 1017 } | |
| OLD | NEW |