OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu> | 2 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu> |
3 * All rights reserved. | 3 * All rights reserved. |
4 * | 4 * |
5 * Redistribution and use in source and binary forms, with or without | 5 * Redistribution and use in source and binary forms, with or without |
6 * modification, are permitted provided that the following conditions | 6 * modification, are permitted provided that the following conditions |
7 * are met: | 7 * are met: |
8 * 1. Redistributions of source code must retain the above copyright | 8 * 1. Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * 2. Redistributions in binary form must reproduce the above copyright | 10 * 2. Redistributions in binary form must reproduce the above copyright |
(...skipping 19 matching lines...) Expand all Loading... |
30 | 30 |
31 #ifdef WIN32 | 31 #ifdef WIN32 |
32 #define WIN32_LEAN_AND_MEAN | 32 #define WIN32_LEAN_AND_MEAN |
33 #include <windows.h> | 33 #include <windows.h> |
34 #undef WIN32_LEAN_AND_MEAN | 34 #undef WIN32_LEAN_AND_MEAN |
35 #endif | 35 #endif |
36 #include <sys/types.h> | 36 #include <sys/types.h> |
37 #ifdef HAVE_SYS_TIME_H | 37 #ifdef HAVE_SYS_TIME_H |
38 #include <sys/time.h> | 38 #include <sys/time.h> |
39 #else | 39 #else |
40 #include <sys/_time.h> | 40 #include <sys/_libevent_time.h> |
41 #endif | 41 #endif |
42 #include <sys/queue.h> | 42 #include <sys/queue.h> |
43 #include <stdio.h> | 43 #include <stdio.h> |
44 #include <stdlib.h> | 44 #include <stdlib.h> |
45 #ifndef WIN32 | 45 #ifndef WIN32 |
46 #include <unistd.h> | 46 #include <unistd.h> |
47 #endif | 47 #endif |
48 #include <errno.h> | 48 #include <errno.h> |
49 #include <signal.h> | 49 #include <signal.h> |
50 #include <string.h> | 50 #include <string.h> |
(...skipping 21 matching lines...) Expand all Loading... |
72 extern const struct eventop kqops; | 72 extern const struct eventop kqops; |
73 #endif | 73 #endif |
74 #ifdef HAVE_DEVPOLL | 74 #ifdef HAVE_DEVPOLL |
75 extern const struct eventop devpollops; | 75 extern const struct eventop devpollops; |
76 #endif | 76 #endif |
77 #ifdef WIN32 | 77 #ifdef WIN32 |
78 extern const struct eventop win32ops; | 78 extern const struct eventop win32ops; |
79 #endif | 79 #endif |
80 | 80 |
81 /* In order of preference */ | 81 /* In order of preference */ |
82 const struct eventop *eventops[] = { | 82 static const struct eventop *eventops[] = { |
83 #ifdef HAVE_EVENT_PORTS | 83 #ifdef HAVE_EVENT_PORTS |
84 &evportops, | 84 &evportops, |
85 #endif | 85 #endif |
86 #ifdef HAVE_WORKING_KQUEUE | 86 #ifdef HAVE_WORKING_KQUEUE |
87 &kqops, | 87 &kqops, |
88 #endif | 88 #endif |
89 #ifdef HAVE_EPOLL | 89 #ifdef HAVE_EPOLL |
90 &epollops, | 90 &epollops, |
91 #endif | 91 #endif |
92 #ifdef HAVE_DEVPOLL | 92 #ifdef HAVE_DEVPOLL |
93 &devpollops, | 93 &devpollops, |
94 #endif | 94 #endif |
95 #ifdef HAVE_POLL | 95 #ifdef HAVE_POLL |
96 &pollops, | 96 &pollops, |
97 #endif | 97 #endif |
98 #ifdef HAVE_SELECT | 98 #ifdef HAVE_SELECT |
99 &selectops, | 99 &selectops, |
100 #endif | 100 #endif |
101 #ifdef WIN32 | 101 #ifdef WIN32 |
102 &win32ops, | 102 &win32ops, |
103 #endif | 103 #endif |
104 NULL | 104 NULL |
105 }; | 105 }; |
106 | 106 |
107 /* Global state */ | 107 /* Global state */ |
108 struct event_base *current_base = NULL; | 108 struct event_base *current_base = NULL; |
109 extern struct event_base *evsignal_base; | 109 extern struct event_base *evsignal_base; |
110 static int use_monotonic; | 110 static int use_monotonic; |
111 static int use_monotonic_initialized; | |
112 | 111 |
113 /* Prototypes */ | 112 /* Prototypes */ |
114 static void event_queue_insert(struct event_base *, struct event *, int); | 113 static void event_queue_insert(struct event_base *, struct event *, int); |
115 static void event_queue_remove(struct event_base *, struct event *, int); | 114 static void event_queue_remove(struct event_base *, struct event *, int); |
116 static int event_haveevents(struct event_base *); | 115 static int event_haveevents(struct event_base *); |
117 | 116 |
118 static void event_process_active(struct event_base *); | 117 static void event_process_active(struct event_base *); |
119 | 118 |
120 static int timeout_next(struct event_base *, struct timeval **); | 119 static int timeout_next(struct event_base *, struct timeval **); |
121 static void timeout_process(struct event_base *); | 120 static void timeout_process(struct event_base *); |
122 static void timeout_correct(struct event_base *, struct timeval *); | 121 static void timeout_correct(struct event_base *, struct timeval *); |
123 | 122 |
124 static void | 123 static void |
125 detect_monotonic(void) | 124 detect_monotonic(void) |
126 { | 125 { |
127 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) | 126 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) |
128 if (use_monotonic_initialized) | |
129 return; | |
130 | |
131 struct timespec ts; | 127 struct timespec ts; |
132 | 128 |
133 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) | 129 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) |
134 use_monotonic = 1; | 130 use_monotonic = 1; |
135 use_monotonic_initialized = 1; | |
136 #endif | 131 #endif |
137 } | 132 } |
138 | 133 |
139 static int | 134 static int |
140 gettime(struct event_base *base, struct timeval *tp) | 135 gettime(struct event_base *base, struct timeval *tp) |
141 { | 136 { |
142 if (base->tv_cache.tv_sec) { | 137 if (base->tv_cache.tv_sec) { |
143 *tp = base->tv_cache; | 138 *tp = base->tv_cache; |
144 return (0); | 139 return (0); |
145 } | 140 } |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
191 base->evbase = NULL; | 186 base->evbase = NULL; |
192 for (i = 0; eventops[i] && !base->evbase; i++) { | 187 for (i = 0; eventops[i] && !base->evbase; i++) { |
193 base->evsel = eventops[i]; | 188 base->evsel = eventops[i]; |
194 | 189 |
195 base->evbase = base->evsel->init(base); | 190 base->evbase = base->evsel->init(base); |
196 } | 191 } |
197 | 192 |
198 if (base->evbase == NULL) | 193 if (base->evbase == NULL) |
199 event_errx(1, "%s: no event mechanism available", __func__); | 194 event_errx(1, "%s: no event mechanism available", __func__); |
200 | 195 |
201 » if (getenv("EVENT_SHOW_METHOD")) | 196 » if (evutil_getenv("EVENT_SHOW_METHOD")) |
202 event_msgx("libevent using: %s\n", | 197 event_msgx("libevent using: %s\n", |
203 base->evsel->name); | 198 base->evsel->name); |
204 | 199 |
205 /* allocate a single active event queue */ | 200 /* allocate a single active event queue */ |
206 event_base_priority_init(base, 1); | 201 event_base_priority_init(base, 1); |
207 | 202 |
208 return (base); | 203 return (base); |
209 } | 204 } |
210 | 205 |
211 void | 206 void |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
276 void *evbase = base->evbase; | 271 void *evbase = base->evbase; |
277 int res = 0; | 272 int res = 0; |
278 struct event *ev; | 273 struct event *ev; |
279 | 274 |
280 /* check if this event mechanism requires reinit */ | 275 /* check if this event mechanism requires reinit */ |
281 if (!evsel->need_reinit) | 276 if (!evsel->need_reinit) |
282 return (0); | 277 return (0); |
283 | 278 |
284 /* prevent internal delete */ | 279 /* prevent internal delete */ |
285 if (base->sig.ev_signal_added) { | 280 if (base->sig.ev_signal_added) { |
| 281 /* we cannot call event_del here because the base has |
| 282 * not been reinitialized yet. */ |
286 event_queue_remove(base, &base->sig.ev_signal, | 283 event_queue_remove(base, &base->sig.ev_signal, |
287 EVLIST_INSERTED); | 284 EVLIST_INSERTED); |
| 285 if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE) |
| 286 event_queue_remove(base, &base->sig.ev_signal, |
| 287 EVLIST_ACTIVE); |
288 base->sig.ev_signal_added = 0; | 288 base->sig.ev_signal_added = 0; |
289 } | 289 } |
290 | 290 |
291 if (base->evsel->dealloc != NULL) | 291 if (base->evsel->dealloc != NULL) |
292 base->evsel->dealloc(base, base->evbase); | 292 base->evsel->dealloc(base, base->evbase); |
293 evbase = base->evbase = evsel->init(base); | 293 evbase = base->evbase = evsel->init(base); |
294 if (base->evbase == NULL) | 294 if (base->evbase == NULL) |
295 event_errx(1, "%s: could not reinitialize event mechanism", | 295 event_errx(1, "%s: could not reinitialize event mechanism", |
296 __func__); | 296 __func__); |
297 | 297 |
(...skipping 21 matching lines...) Expand all Loading... |
319 | 319 |
320 if (base->nactivequeues && npriorities != base->nactivequeues) { | 320 if (base->nactivequeues && npriorities != base->nactivequeues) { |
321 for (i = 0; i < base->nactivequeues; ++i) { | 321 for (i = 0; i < base->nactivequeues; ++i) { |
322 free(base->activequeues[i]); | 322 free(base->activequeues[i]); |
323 } | 323 } |
324 free(base->activequeues); | 324 free(base->activequeues); |
325 } | 325 } |
326 | 326 |
327 /* Allocate our priority queues */ | 327 /* Allocate our priority queues */ |
328 base->nactivequeues = npriorities; | 328 base->nactivequeues = npriorities; |
329 » base->activequeues = (struct event_list **)calloc(base->nactivequeues, | 329 » base->activequeues = (struct event_list **) |
330 » npriorities * sizeof(struct event_list *)); | 330 » calloc(base->nactivequeues, sizeof(struct event_list *)); |
331 if (base->activequeues == NULL) | 331 if (base->activequeues == NULL) |
332 event_err(1, "%s: calloc", __func__); | 332 event_err(1, "%s: calloc", __func__); |
333 | 333 |
334 for (i = 0; i < base->nactivequeues; ++i) { | 334 for (i = 0; i < base->nactivequeues; ++i) { |
335 base->activequeues[i] = malloc(sizeof(struct event_list)); | 335 base->activequeues[i] = malloc(sizeof(struct event_list)); |
336 if (base->activequeues[i] == NULL) | 336 if (base->activequeues[i] == NULL) |
337 event_err(1, "%s: malloc", __func__); | 337 event_err(1, "%s: malloc", __func__); |
338 TAILQ_INIT(base->activequeues[i]); | 338 TAILQ_INIT(base->activequeues[i]); |
339 } | 339 } |
340 | 340 |
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
463 | 463 |
464 int | 464 int |
465 event_base_loop(struct event_base *base, int flags) | 465 event_base_loop(struct event_base *base, int flags) |
466 { | 466 { |
467 const struct eventop *evsel = base->evsel; | 467 const struct eventop *evsel = base->evsel; |
468 void *evbase = base->evbase; | 468 void *evbase = base->evbase; |
469 struct timeval tv; | 469 struct timeval tv; |
470 struct timeval *tv_p; | 470 struct timeval *tv_p; |
471 int res, done; | 471 int res, done; |
472 | 472 |
| 473 /* clear time cache */ |
| 474 base->tv_cache.tv_sec = 0; |
| 475 |
473 if (base->sig.ev_signal_added) | 476 if (base->sig.ev_signal_added) |
474 evsignal_base = base; | 477 evsignal_base = base; |
475 done = 0; | 478 done = 0; |
476 while (!done) { | 479 while (!done) { |
477 /* Terminate the loop if we have been asked to */ | 480 /* Terminate the loop if we have been asked to */ |
478 if (base->event_gotterm) { | 481 if (base->event_gotterm) { |
479 base->event_gotterm = 0; | 482 base->event_gotterm = 0; |
480 break; | 483 break; |
481 } | 484 } |
482 | 485 |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
519 timeout_process(base); | 522 timeout_process(base); |
520 | 523 |
521 if (base->event_count_active) { | 524 if (base->event_count_active) { |
522 event_process_active(base); | 525 event_process_active(base); |
523 if (!base->event_count_active && (flags & EVLOOP_ONCE)) | 526 if (!base->event_count_active && (flags & EVLOOP_ONCE)) |
524 done = 1; | 527 done = 1; |
525 } else if (flags & EVLOOP_NONBLOCK) | 528 } else if (flags & EVLOOP_NONBLOCK) |
526 done = 1; | 529 done = 1; |
527 } | 530 } |
528 | 531 |
| 532 /* clear time cache */ |
| 533 base->tv_cache.tv_sec = 0; |
| 534 |
529 event_debug(("%s: asked to terminate loop.", __func__)); | 535 event_debug(("%s: asked to terminate loop.", __func__)); |
530 return (0); | 536 return (0); |
531 } | 537 } |
532 | 538 |
533 /* Sets up an event for processing once */ | 539 /* Sets up an event for processing once */ |
534 | 540 |
535 struct event_once { | 541 struct event_once { |
536 struct event ev; | 542 struct event ev; |
537 | 543 |
538 void (*cb)(int, short, void *); | 544 void (*cb)(int, short, void *); |
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
758 gettime(base, &now); | 764 gettime(base, &now); |
759 evutil_timeradd(&now, tv, &ev->ev_timeout); | 765 evutil_timeradd(&now, tv, &ev->ev_timeout); |
760 | 766 |
761 event_debug(( | 767 event_debug(( |
762 "event_add: timeout in %ld seconds, call %p", | 768 "event_add: timeout in %ld seconds, call %p", |
763 tv->tv_sec, ev->ev_callback)); | 769 tv->tv_sec, ev->ev_callback)); |
764 | 770 |
765 event_queue_insert(base, ev, EVLIST_TIMEOUT); | 771 event_queue_insert(base, ev, EVLIST_TIMEOUT); |
766 } | 772 } |
767 | 773 |
768 » return (0); | 774 » return (res); |
769 } | 775 } |
770 | 776 |
771 int | 777 int |
772 event_del(struct event *ev) | 778 event_del(struct event *ev) |
773 { | 779 { |
774 struct event_base *base; | 780 struct event_base *base; |
775 const struct eventop *evsel; | 781 const struct eventop *evsel; |
776 void *evbase; | 782 void *evbase; |
777 | 783 |
778 event_debug(("event_del: %p, callback %p", | 784 event_debug(("event_del: %p, callback %p", |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
883 /* | 889 /* |
884 * We can modify the key element of the node without destroying | 890 * We can modify the key element of the node without destroying |
885 * the key, beause we apply it to all in the right order. | 891 * the key, beause we apply it to all in the right order. |
886 */ | 892 */ |
887 pev = base->timeheap.p; | 893 pev = base->timeheap.p; |
888 size = base->timeheap.n; | 894 size = base->timeheap.n; |
889 for (; size-- > 0; ++pev) { | 895 for (; size-- > 0; ++pev) { |
890 struct timeval *ev_tv = &(**pev).ev_timeout; | 896 struct timeval *ev_tv = &(**pev).ev_timeout; |
891 evutil_timersub(ev_tv, &off, ev_tv); | 897 evutil_timersub(ev_tv, &off, ev_tv); |
892 } | 898 } |
| 899 /* Now remember what the new time turned out to be. */ |
| 900 base->event_tv = *tv; |
893 } | 901 } |
894 | 902 |
895 void | 903 void |
896 timeout_process(struct event_base *base) | 904 timeout_process(struct event_base *base) |
897 { | 905 { |
898 struct timeval now; | 906 struct timeval now; |
899 struct event *ev; | 907 struct event *ev; |
900 | 908 |
901 if (min_heap_empty(&base->timeheap)) | 909 if (min_heap_empty(&base->timeheap)) |
902 return; | 910 return; |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
989 /* | 997 /* |
990 * No thread-safe interface needed - the information should be the same | 998 * No thread-safe interface needed - the information should be the same |
991 * for all threads. | 999 * for all threads. |
992 */ | 1000 */ |
993 | 1001 |
994 const char * | 1002 const char * |
995 event_get_method(void) | 1003 event_get_method(void) |
996 { | 1004 { |
997 return (current_base->evsel->name); | 1005 return (current_base->evsel->name); |
998 } | 1006 } |
OLD | NEW |