| OLD | NEW |
| (Empty) |
| 1 /* $OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $ */ | |
| 2 | |
| 3 /* | |
| 4 * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu> | |
| 5 * All rights reserved. | |
| 6 * | |
| 7 * Redistribution and use in source and binary forms, with or without | |
| 8 * modification, are permitted provided that the following conditions | |
| 9 * are met: | |
| 10 * 1. Redistributions of source code must retain the above copyright | |
| 11 * notice, this list of conditions and the following disclaimer. | |
| 12 * 2. Redistributions in binary form must reproduce the above copyright | |
| 13 * notice, this list of conditions and the following disclaimer in the | |
| 14 * documentation and/or other materials provided with the distribution. | |
| 15 * 3. The name of the author may not be used to endorse or promote products | |
| 16 * derived from this software without specific prior written permission. | |
| 17 * | |
| 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | |
| 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | |
| 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. | |
| 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | |
| 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | |
| 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
| 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 28 */ | |
| 29 #ifdef HAVE_CONFIG_H | |
| 30 #include "config.h" | |
| 31 #endif | |
| 32 | |
| 33 #define _GNU_SOURCE 1 | |
| 34 | |
| 35 #include <sys/types.h> | |
| 36 #ifdef HAVE_SYS_TIME_H | |
| 37 #include <sys/time.h> | |
| 38 #else | |
| 39 #include <sys/_libevent_time.h> | |
| 40 #endif | |
| 41 #include <sys/queue.h> | |
| 42 #include <sys/event.h> | |
| 43 #include <signal.h> | |
| 44 #include <stdio.h> | |
| 45 #include <stdlib.h> | |
| 46 #include <string.h> | |
| 47 #include <unistd.h> | |
| 48 #include <errno.h> | |
| 49 #include <assert.h> | |
| 50 #ifdef HAVE_INTTYPES_H | |
| 51 #include <inttypes.h> | |
| 52 #endif | |
| 53 | |
| 54 /* Some platforms apparently define the udata field of struct kevent as | |
| 55 * intptr_t, whereas others define it as void*. There doesn't seem to be an | |
| 56 * easy way to tell them apart via autoconf, so we need to use OS macros. */ | |
| 57 #if defined(HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) &
& !defined(__darwin__) && !defined(__APPLE__) | |
| 58 #define PTR_TO_UDATA(x) ((intptr_t)(x)) | |
| 59 #else | |
| 60 #define PTR_TO_UDATA(x) (x) | |
| 61 #endif | |
| 62 | |
| 63 #include "event.h" | |
| 64 #include "event-internal.h" | |
| 65 #include "log.h" | |
| 66 #include "evsignal.h" | |
| 67 | |
| 68 #define EVLIST_X_KQINKERNEL 0x1000 | |
| 69 | |
| 70 #define NEVENT 64 | |
| 71 | |
| 72 struct kqop { | |
| 73 struct kevent *changes; | |
| 74 int nchanges; | |
| 75 struct kevent *events; | |
| 76 struct event_list evsigevents[NSIG]; | |
| 77 int nevents; | |
| 78 int kq; | |
| 79 pid_t pid; | |
| 80 }; | |
| 81 | |
| 82 static void *kq_init (struct event_base *); | |
| 83 static int kq_add (void *, struct event *); | |
| 84 static int kq_del (void *, struct event *); | |
| 85 static int kq_dispatch (struct event_base *, void *, struct timeval *); | |
| 86 static int kq_insert (struct kqop *, struct kevent *); | |
| 87 static void kq_dealloc (struct event_base *, void *); | |
| 88 | |
| 89 const struct eventop kqops = { | |
| 90 "kqueue", | |
| 91 kq_init, | |
| 92 kq_add, | |
| 93 kq_del, | |
| 94 kq_dispatch, | |
| 95 kq_dealloc, | |
| 96 1 /* need reinit */ | |
| 97 }; | |
| 98 | |
| 99 static void * | |
| 100 kq_init(struct event_base *base) | |
| 101 { | |
| 102 int i, kq; | |
| 103 struct kqop *kqueueop; | |
| 104 | |
| 105 /* Disable kqueue when this environment variable is set */ | |
| 106 if (evutil_getenv("EVENT_NOKQUEUE")) | |
| 107 return (NULL); | |
| 108 | |
| 109 if (!(kqueueop = calloc(1, sizeof(struct kqop)))) | |
| 110 return (NULL); | |
| 111 | |
| 112 /* Initalize the kernel queue */ | |
| 113 | |
| 114 if ((kq = kqueue()) == -1) { | |
| 115 event_warn("kqueue"); | |
| 116 free (kqueueop); | |
| 117 return (NULL); | |
| 118 } | |
| 119 | |
| 120 kqueueop->kq = kq; | |
| 121 | |
| 122 kqueueop->pid = getpid(); | |
| 123 | |
| 124 /* Initalize fields */ | |
| 125 kqueueop->changes = malloc(NEVENT * sizeof(struct kevent)); | |
| 126 if (kqueueop->changes == NULL) { | |
| 127 free (kqueueop); | |
| 128 return (NULL); | |
| 129 } | |
| 130 kqueueop->events = malloc(NEVENT * sizeof(struct kevent)); | |
| 131 if (kqueueop->events == NULL) { | |
| 132 free (kqueueop->changes); | |
| 133 free (kqueueop); | |
| 134 return (NULL); | |
| 135 } | |
| 136 kqueueop->nevents = NEVENT; | |
| 137 | |
| 138 /* we need to keep track of multiple events per signal */ | |
| 139 for (i = 0; i < NSIG; ++i) { | |
| 140 TAILQ_INIT(&kqueueop->evsigevents[i]); | |
| 141 } | |
| 142 | |
| 143 /* Check for Mac OS X kqueue bug. */ | |
| 144 memset(&kqueueop->changes[0], 0, sizeof kqueueop->changes[0]); | |
| 145 kqueueop->changes[0].ident = -1; | |
| 146 kqueueop->changes[0].filter = EVFILT_READ; | |
| 147 kqueueop->changes[0].flags = EV_ADD; | |
| 148 /* | |
| 149 * If kqueue works, then kevent will succeed, and it will | |
| 150 * stick an error in events[0]. If kqueue is broken, then | |
| 151 * kevent will fail. | |
| 152 */ | |
| 153 if (kevent(kq, | |
| 154 kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 || | |
| 155 kqueueop->events[0].ident != -1 || | |
| 156 kqueueop->events[0].flags != EV_ERROR) { | |
| 157 event_warn("%s: detected broken kqueue; not using.", __func__); | |
| 158 free(kqueueop->changes); | |
| 159 free(kqueueop->events); | |
| 160 free(kqueueop); | |
| 161 close(kq); | |
| 162 return (NULL); | |
| 163 } | |
| 164 | |
| 165 return (kqueueop); | |
| 166 } | |
| 167 | |
| 168 static int | |
| 169 kq_insert(struct kqop *kqop, struct kevent *kev) | |
| 170 { | |
| 171 int nevents = kqop->nevents; | |
| 172 | |
| 173 if (kqop->nchanges == nevents) { | |
| 174 struct kevent *newchange; | |
| 175 struct kevent *newresult; | |
| 176 | |
| 177 nevents *= 2; | |
| 178 | |
| 179 newchange = realloc(kqop->changes, | |
| 180 nevents * sizeof(struct kevent)); | |
| 181 if (newchange == NULL) { | |
| 182 event_warn("%s: malloc", __func__); | |
| 183 return (-1); | |
| 184 } | |
| 185 kqop->changes = newchange; | |
| 186 | |
| 187 newresult = realloc(kqop->events, | |
| 188 nevents * sizeof(struct kevent)); | |
| 189 | |
| 190 /* | |
| 191 * If we fail, we don't have to worry about freeing, | |
| 192 * the next realloc will pick it up. | |
| 193 */ | |
| 194 if (newresult == NULL) { | |
| 195 event_warn("%s: malloc", __func__); | |
| 196 return (-1); | |
| 197 } | |
| 198 kqop->events = newresult; | |
| 199 | |
| 200 kqop->nevents = nevents; | |
| 201 } | |
| 202 | |
| 203 memcpy(&kqop->changes[kqop->nchanges++], kev, sizeof(struct kevent)); | |
| 204 | |
| 205 event_debug(("%s: fd %d %s%s", | |
| 206 __func__, (int)kev->ident, | |
| 207 kev->filter == EVFILT_READ ? "EVFILT_READ" : "EVFILT_WRITE", | |
| 208 kev->flags == EV_DELETE ? " (del)" : "")); | |
| 209 | |
| 210 return (0); | |
| 211 } | |
| 212 | |
| 213 static void | |
| 214 kq_sighandler(int sig) | |
| 215 { | |
| 216 /* Do nothing here */ | |
| 217 } | |
| 218 | |
| 219 static int | |
| 220 kq_dispatch(struct event_base *base, void *arg, struct timeval *tv) | |
| 221 { | |
| 222 struct kqop *kqop = arg; | |
| 223 struct kevent *changes = kqop->changes; | |
| 224 struct kevent *events = kqop->events; | |
| 225 struct event *ev; | |
| 226 struct timespec ts, *ts_p = NULL; | |
| 227 int i, res; | |
| 228 | |
| 229 if (tv != NULL) { | |
| 230 TIMEVAL_TO_TIMESPEC(tv, &ts); | |
| 231 ts_p = &ts; | |
| 232 } | |
| 233 | |
| 234 res = kevent(kqop->kq, changes, kqop->nchanges, | |
| 235 events, kqop->nevents, ts_p); | |
| 236 kqop->nchanges = 0; | |
| 237 if (res == -1) { | |
| 238 if (errno != EINTR) { | |
| 239 event_warn("kevent"); | |
| 240 return (-1); | |
| 241 } | |
| 242 | |
| 243 return (0); | |
| 244 } | |
| 245 | |
| 246 event_debug(("%s: kevent reports %d", __func__, res)); | |
| 247 | |
| 248 for (i = 0; i < res; i++) { | |
| 249 int which = 0; | |
| 250 | |
| 251 if (events[i].flags & EV_ERROR) { | |
| 252 /* | |
| 253 * Error messages that can happen, when a delete fails. | |
| 254 * EBADF happens when the file discriptor has been | |
| 255 * closed, | |
| 256 * ENOENT when the file discriptor was closed and | |
| 257 * then reopened. | |
| 258 * EINVAL for some reasons not understood; EINVAL | |
| 259 * should not be returned ever; but FreeBSD does :-\ | |
| 260 * An error is also indicated when a callback deletes | |
| 261 * an event we are still processing. In that case | |
| 262 * the data field is set to ENOENT. | |
| 263 */ | |
| 264 if (events[i].data == EBADF || | |
| 265 events[i].data == EINVAL || | |
| 266 events[i].data == ENOENT) | |
| 267 continue; | |
| 268 errno = events[i].data; | |
| 269 return (-1); | |
| 270 } | |
| 271 | |
| 272 if (events[i].filter == EVFILT_READ) { | |
| 273 which |= EV_READ; | |
| 274 } else if (events[i].filter == EVFILT_WRITE) { | |
| 275 which |= EV_WRITE; | |
| 276 } else if (events[i].filter == EVFILT_SIGNAL) { | |
| 277 which |= EV_SIGNAL; | |
| 278 } | |
| 279 | |
| 280 if (!which) | |
| 281 continue; | |
| 282 | |
| 283 if (events[i].filter == EVFILT_SIGNAL) { | |
| 284 struct event_list *head = | |
| 285 (struct event_list *)events[i].udata; | |
| 286 TAILQ_FOREACH(ev, head, ev_signal_next) { | |
| 287 event_active(ev, which, events[i].data); | |
| 288 } | |
| 289 } else { | |
| 290 ev = (struct event *)events[i].udata; | |
| 291 | |
| 292 if (!(ev->ev_events & EV_PERSIST)) | |
| 293 ev->ev_flags &= ~EVLIST_X_KQINKERNEL; | |
| 294 | |
| 295 event_active(ev, which, 1); | |
| 296 } | |
| 297 } | |
| 298 | |
| 299 return (0); | |
| 300 } | |
| 301 | |
| 302 | |
| 303 static int | |
| 304 kq_add(void *arg, struct event *ev) | |
| 305 { | |
| 306 struct kqop *kqop = arg; | |
| 307 struct kevent kev; | |
| 308 | |
| 309 if (ev->ev_events & EV_SIGNAL) { | |
| 310 int nsignal = EVENT_SIGNAL(ev); | |
| 311 | |
| 312 assert(nsignal >= 0 && nsignal < NSIG); | |
| 313 if (TAILQ_EMPTY(&kqop->evsigevents[nsignal])) { | |
| 314 struct timespec timeout = { 0, 0 }; | |
| 315 | |
| 316 memset(&kev, 0, sizeof(kev)); | |
| 317 kev.ident = nsignal; | |
| 318 kev.filter = EVFILT_SIGNAL; | |
| 319 kev.flags = EV_ADD; | |
| 320 kev.udata = PTR_TO_UDATA(&kqop->evsigevents[nsignal]); | |
| 321 | |
| 322 /* Be ready for the signal if it is sent any | |
| 323 * time between now and the next call to | |
| 324 * kq_dispatch. */ | |
| 325 if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) | |
| 326 return (-1); | |
| 327 | |
| 328 if (_evsignal_set_handler(ev->ev_base, nsignal, | |
| 329 kq_sighandler) == -1) | |
| 330 return (-1); | |
| 331 } | |
| 332 | |
| 333 TAILQ_INSERT_TAIL(&kqop->evsigevents[nsignal], ev, | |
| 334 ev_signal_next); | |
| 335 ev->ev_flags |= EVLIST_X_KQINKERNEL; | |
| 336 return (0); | |
| 337 } | |
| 338 | |
| 339 if (ev->ev_events & EV_READ) { | |
| 340 memset(&kev, 0, sizeof(kev)); | |
| 341 kev.ident = ev->ev_fd; | |
| 342 kev.filter = EVFILT_READ; | |
| 343 #ifdef NOTE_EOF | |
| 344 /* Make it behave like select() and poll() */ | |
| 345 kev.fflags = NOTE_EOF; | |
| 346 #endif | |
| 347 kev.flags = EV_ADD; | |
| 348 if (!(ev->ev_events & EV_PERSIST)) | |
| 349 kev.flags |= EV_ONESHOT; | |
| 350 kev.udata = PTR_TO_UDATA(ev); | |
| 351 | |
| 352 if (kq_insert(kqop, &kev) == -1) | |
| 353 return (-1); | |
| 354 | |
| 355 ev->ev_flags |= EVLIST_X_KQINKERNEL; | |
| 356 } | |
| 357 | |
| 358 if (ev->ev_events & EV_WRITE) { | |
| 359 memset(&kev, 0, sizeof(kev)); | |
| 360 kev.ident = ev->ev_fd; | |
| 361 kev.filter = EVFILT_WRITE; | |
| 362 kev.flags = EV_ADD; | |
| 363 if (!(ev->ev_events & EV_PERSIST)) | |
| 364 kev.flags |= EV_ONESHOT; | |
| 365 kev.udata = PTR_TO_UDATA(ev); | |
| 366 | |
| 367 if (kq_insert(kqop, &kev) == -1) | |
| 368 return (-1); | |
| 369 | |
| 370 ev->ev_flags |= EVLIST_X_KQINKERNEL; | |
| 371 } | |
| 372 | |
| 373 return (0); | |
| 374 } | |
| 375 | |
| 376 static int | |
| 377 kq_del(void *arg, struct event *ev) | |
| 378 { | |
| 379 struct kqop *kqop = arg; | |
| 380 struct kevent kev; | |
| 381 | |
| 382 if (!(ev->ev_flags & EVLIST_X_KQINKERNEL)) | |
| 383 return (0); | |
| 384 | |
| 385 if (ev->ev_events & EV_SIGNAL) { | |
| 386 int nsignal = EVENT_SIGNAL(ev); | |
| 387 struct timespec timeout = { 0, 0 }; | |
| 388 | |
| 389 assert(nsignal >= 0 && nsignal < NSIG); | |
| 390 TAILQ_REMOVE(&kqop->evsigevents[nsignal], ev, ev_signal_next); | |
| 391 if (TAILQ_EMPTY(&kqop->evsigevents[nsignal])) { | |
| 392 memset(&kev, 0, sizeof(kev)); | |
| 393 kev.ident = nsignal; | |
| 394 kev.filter = EVFILT_SIGNAL; | |
| 395 kev.flags = EV_DELETE; | |
| 396 | |
| 397 /* Because we insert signal events | |
| 398 * immediately, we need to delete them | |
| 399 * immediately, too */ | |
| 400 if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) | |
| 401 return (-1); | |
| 402 | |
| 403 if (_evsignal_restore_handler(ev->ev_base, | |
| 404 nsignal) == -1) | |
| 405 return (-1); | |
| 406 } | |
| 407 | |
| 408 ev->ev_flags &= ~EVLIST_X_KQINKERNEL; | |
| 409 return (0); | |
| 410 } | |
| 411 | |
| 412 if (ev->ev_events & EV_READ) { | |
| 413 memset(&kev, 0, sizeof(kev)); | |
| 414 kev.ident = ev->ev_fd; | |
| 415 kev.filter = EVFILT_READ; | |
| 416 kev.flags = EV_DELETE; | |
| 417 | |
| 418 if (kq_insert(kqop, &kev) == -1) | |
| 419 return (-1); | |
| 420 | |
| 421 ev->ev_flags &= ~EVLIST_X_KQINKERNEL; | |
| 422 } | |
| 423 | |
| 424 if (ev->ev_events & EV_WRITE) { | |
| 425 memset(&kev, 0, sizeof(kev)); | |
| 426 kev.ident = ev->ev_fd; | |
| 427 kev.filter = EVFILT_WRITE; | |
| 428 kev.flags = EV_DELETE; | |
| 429 | |
| 430 if (kq_insert(kqop, &kev) == -1) | |
| 431 return (-1); | |
| 432 | |
| 433 ev->ev_flags &= ~EVLIST_X_KQINKERNEL; | |
| 434 } | |
| 435 | |
| 436 return (0); | |
| 437 } | |
| 438 | |
| 439 static void | |
| 440 kq_dealloc(struct event_base *base, void *arg) | |
| 441 { | |
| 442 struct kqop *kqop = arg; | |
| 443 | |
| 444 evsignal_dealloc(base); | |
| 445 | |
| 446 if (kqop->changes) | |
| 447 free(kqop->changes); | |
| 448 if (kqop->events) | |
| 449 free(kqop->events); | |
| 450 if (kqop->kq >= 0 && kqop->pid == getpid()) | |
| 451 close(kqop->kq); | |
| 452 | |
| 453 memset(kqop, 0, sizeof(struct kqop)); | |
| 454 free(kqop); | |
| 455 } | |
| OLD | NEW |