OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu> | 2 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu> |
3 * All rights reserved. | 3 * All rights reserved. |
4 * | 4 * |
5 * Redistribution and use in source and binary forms, with or without | 5 * Redistribution and use in source and binary forms, with or without |
6 * modification, are permitted provided that the following conditions | 6 * modification, are permitted provided that the following conditions |
7 * are met: | 7 * are met: |
8 * 1. Redistributions of source code must retain the above copyright | 8 * 1. Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * 2. Redistributions in binary form must reproduce the above copyright | 10 * 2. Redistributions in binary form must reproduce the above copyright |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
100 #endif | 100 #endif |
101 #ifdef WIN32 | 101 #ifdef WIN32 |
102 &win32ops, | 102 &win32ops, |
103 #endif | 103 #endif |
104 NULL | 104 NULL |
105 }; | 105 }; |
106 | 106 |
107 /* Global state */ | 107 /* Global state */ |
108 struct event_base *current_base = NULL; | 108 struct event_base *current_base = NULL; |
109 extern struct event_base *evsignal_base; | 109 extern struct event_base *evsignal_base; |
110 static int use_monotonic; | 110 static int use_monotonic = 1; |
111 | 111 |
112 /* Prototypes */ | 112 /* Prototypes */ |
113 static void event_queue_insert(struct event_base *, struct event *, int); | 113 static void event_queue_insert(struct event_base *, struct event *, int); |
114 static void event_queue_remove(struct event_base *, struct event *, int); | 114 static void event_queue_remove(struct event_base *, struct event *, int); |
115 static int event_haveevents(struct event_base *); | 115 static int event_haveevents(struct event_base *); |
116 | 116 |
117 static void event_process_active(struct event_base *); | 117 static void event_process_active(struct event_base *); |
118 | 118 |
119 static int timeout_next(struct event_base *, struct timeval **); | 119 static int timeout_next(struct event_base *, struct timeval **); |
120 static void timeout_process(struct event_base *); | 120 static void timeout_process(struct event_base *); |
121 static void timeout_correct(struct event_base *, struct timeval *); | 121 static void timeout_correct(struct event_base *, struct timeval *); |
122 | 122 |
123 static void | |
124 detect_monotonic(void) | |
125 { | |
126 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) | |
127 struct timespec ts; | |
128 | |
129 if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) | |
130 use_monotonic = 1; | |
131 #endif | |
132 } | |
133 | |
134 static int | 123 static int |
135 gettime(struct event_base *base, struct timeval *tp) | 124 gettime(struct event_base *base, struct timeval *tp) |
136 { | 125 { |
137 if (base->tv_cache.tv_sec) { | 126 if (base->tv_cache.tv_sec) { |
138 *tp = base->tv_cache; | 127 *tp = base->tv_cache; |
139 return (0); | 128 return (0); |
140 } | 129 } |
141 | 130 |
142 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) | 131 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) |
143 » if (use_monotonic) { | 132 » struct timespec»ts; |
144 » » struct timespec»ts; | |
145 | 133 |
146 » » if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1) | 134 » if (use_monotonic && |
147 » » » return (-1); | 135 » clock_gettime(CLOCK_MONOTONIC, &ts) == 0) { |
148 | |
149 tp->tv_sec = ts.tv_sec; | 136 tp->tv_sec = ts.tv_sec; |
150 tp->tv_usec = ts.tv_nsec / 1000; | 137 tp->tv_usec = ts.tv_nsec / 1000; |
151 return (0); | 138 return (0); |
152 } | 139 } |
153 #endif | 140 #endif |
154 | 141 |
| 142 use_monotonic = 0; |
| 143 |
155 return (evutil_gettimeofday(tp, NULL)); | 144 return (evutil_gettimeofday(tp, NULL)); |
156 } | 145 } |
157 | 146 |
158 struct event_base * | 147 struct event_base * |
159 event_init(void) | 148 event_init(void) |
160 { | 149 { |
161 struct event_base *base = event_base_new(); | 150 struct event_base *base = event_base_new(); |
162 | 151 |
163 if (base != NULL) | 152 if (base != NULL) |
164 current_base = base; | 153 current_base = base; |
165 | 154 |
166 return (base); | 155 return (base); |
167 } | 156 } |
168 | 157 |
169 struct event_base * | 158 struct event_base * |
170 event_base_new(void) | 159 event_base_new(void) |
171 { | 160 { |
172 int i; | 161 int i; |
173 struct event_base *base; | 162 struct event_base *base; |
174 | 163 |
175 if ((base = calloc(1, sizeof(struct event_base))) == NULL) | 164 if ((base = calloc(1, sizeof(struct event_base))) == NULL) |
176 event_err(1, "%s: calloc", __func__); | 165 event_err(1, "%s: calloc", __func__); |
177 | 166 |
178 detect_monotonic(); | |
179 gettime(base, &base->event_tv); | 167 gettime(base, &base->event_tv); |
180 | 168 |
181 min_heap_ctor(&base->timeheap); | 169 min_heap_ctor(&base->timeheap); |
182 TAILQ_INIT(&base->eventqueue); | 170 TAILQ_INIT(&base->eventqueue); |
183 base->sig.ev_signal_pair[0] = -1; | 171 base->sig.ev_signal_pair[0] = -1; |
184 base->sig.ev_signal_pair[1] = -1; | 172 base->sig.ev_signal_pair[1] = -1; |
185 | 173 |
186 base->evbase = NULL; | 174 base->evbase = NULL; |
187 for (i = 0; eventops[i] && !base->evbase; i++) { | 175 for (i = 0; eventops[i] && !base->evbase; i++) { |
188 base->evsel = eventops[i]; | 176 base->evsel = eventops[i]; |
(...skipping 804 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
993 /* | 981 /* |
994 * No thread-safe interface needed - the information should be the same | 982 * No thread-safe interface needed - the information should be the same |
995 * for all threads. | 983 * for all threads. |
996 */ | 984 */ |
997 | 985 |
998 const char * | 986 const char * |
999 event_get_method(void) | 987 event_get_method(void) |
1000 { | 988 { |
1001 return (current_base->evsel->name); | 989 return (current_base->evsel->name); |
1002 } | 990 } |
OLD | NEW |