Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(191)

Side by Side Diff: fusl/src/thread/pthread_create.c

Issue 1714623002: [fusl] clang-format fusl (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: headers too Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #define _GNU_SOURCE 1 #define _GNU_SOURCE
2 #include "pthread_impl.h" 2 #include "pthread_impl.h"
3 #include "stdio_impl.h" 3 #include "stdio_impl.h"
4 #include "libc.h" 4 #include "libc.h"
5 #include <sys/mman.h> 5 #include <sys/mman.h>
6 #include <string.h> 6 #include <string.h>
7 #include <stddef.h> 7 #include <stddef.h>
8 8
9 void *__mmap(void *, size_t, int, int, int, off_t); 9 void* __mmap(void*, size_t, int, int, int, off_t);
10 int __munmap(void *, size_t); 10 int __munmap(void*, size_t);
11 int __mprotect(void *, size_t, int); 11 int __mprotect(void*, size_t, int);
12 12
13 static void dummy_0() 13 static void dummy_0() {}
14 {
15 }
16 weak_alias(dummy_0, __acquire_ptc); 14 weak_alias(dummy_0, __acquire_ptc);
17 weak_alias(dummy_0, __release_ptc); 15 weak_alias(dummy_0, __release_ptc);
18 weak_alias(dummy_0, __pthread_tsd_run_dtors); 16 weak_alias(dummy_0, __pthread_tsd_run_dtors);
19 weak_alias(dummy_0, __do_orphaned_stdio_locks); 17 weak_alias(dummy_0, __do_orphaned_stdio_locks);
20 weak_alias(dummy_0, __dl_thread_cleanup); 18 weak_alias(dummy_0, __dl_thread_cleanup);
21 19
22 _Noreturn void __pthread_exit(void *result) 20 _Noreturn void __pthread_exit(void* result) {
23 { 21 pthread_t self = __pthread_self();
24 » pthread_t self = __pthread_self(); 22 sigset_t set;
25 » sigset_t set; 23
26 24 self->canceldisable = 1;
27 » self->canceldisable = 1; 25 self->cancelasync = 0;
28 » self->cancelasync = 0; 26 self->result = result;
29 » self->result = result; 27
30 28 while (self->cancelbuf) {
31 » while (self->cancelbuf) { 29 void (*f)(void*) = self->cancelbuf->__f;
32 » » void (*f)(void *) = self->cancelbuf->__f; 30 void* x = self->cancelbuf->__x;
33 » » void *x = self->cancelbuf->__x; 31 self->cancelbuf = self->cancelbuf->__next;
34 » » self->cancelbuf = self->cancelbuf->__next; 32 f(x);
35 » » f(x); 33 }
36 » } 34
37 35 __pthread_tsd_run_dtors();
38 » __pthread_tsd_run_dtors(); 36
39 37 __lock(self->exitlock);
40 » __lock(self->exitlock); 38
41 39 /* Mark this thread dead before decrementing count */
42 » /* Mark this thread dead before decrementing count */ 40 __lock(self->killlock);
43 » __lock(self->killlock); 41 self->dead = 1;
44 » self->dead = 1; 42
45 43 /* Block all signals before decrementing the live thread count.
46 » /* Block all signals before decrementing the live thread count. 44 * This is important to ensure that dynamically allocated TLS
47 » * This is important to ensure that dynamically allocated TLS 45 * is not under-allocated/over-committed, and possibly for other
48 » * is not under-allocated/over-committed, and possibly for other 46 * reasons as well. */
49 » * reasons as well. */ 47 __block_all_sigs(&set);
50 » __block_all_sigs(&set); 48
51 49 /* Wait to unlock the kill lock, which governs functions like
52 » /* Wait to unlock the kill lock, which governs functions like 50 * pthread_kill which target a thread id, until signals have
53 » * pthread_kill which target a thread id, until signals have 51 * been blocked. This precludes observation of the thread id
54 » * been blocked. This precludes observation of the thread id 52 * as a live thread (with application code running in it) after
55 » * as a live thread (with application code running in it) after 53 * the thread was reported dead by ESRCH being returned. */
56 » * the thread was reported dead by ESRCH being returned. */ 54 __unlock(self->killlock);
57 » __unlock(self->killlock); 55
58 56 /* It's impossible to determine whether this is "the last thread"
59 » /* It's impossible to determine whether this is "the last thread" 57 * until performing the atomic decrement, since multiple threads
60 » * until performing the atomic decrement, since multiple threads 58 * could exit at the same time. For the last thread, revert the
61 » * could exit at the same time. For the last thread, revert the 59 * decrement and unblock signals to give the atexit handlers and
62 » * decrement and unblock signals to give the atexit handlers and 60 * stdio cleanup code a consistent state. */
63 » * stdio cleanup code a consistent state. */ 61 if (a_fetch_add(&libc.threads_minus_1, -1) == 0) {
64 » if (a_fetch_add(&libc.threads_minus_1, -1)==0) { 62 libc.threads_minus_1 = 0;
65 » » libc.threads_minus_1 = 0; 63 __restore_sigs(&set);
66 » » __restore_sigs(&set); 64 exit(0);
67 » » exit(0); 65 }
68 » } 66
69 67 /* Process robust list in userspace to handle non-pshared mutexes
70 » /* Process robust list in userspace to handle non-pshared mutexes 68 * and the detached thread case where the robust list head will
71 » * and the detached thread case where the robust list head will 69 * be invalid when the kernel would process it. */
72 » * be invalid when the kernel would process it. */ 70 __vm_lock();
73 » __vm_lock(); 71 volatile void* volatile* rp;
74 » volatile void *volatile *rp; 72 while ((rp = self->robust_list.head) && rp != &self->robust_list.head) {
75 » while ((rp=self->robust_list.head) && rp != &self->robust_list.head) { 73 pthread_mutex_t* m =
76 » » pthread_mutex_t *m = (void *)((char *)rp 74 (void*)((char*)rp - offsetof(pthread_mutex_t, _m_next));
77 » » » - offsetof(pthread_mutex_t, _m_next)); 75 int waiters = m->_m_waiters;
78 » » int waiters = m->_m_waiters; 76 int priv = (m->_m_type & 128) ^ 128;
79 » » int priv = (m->_m_type & 128) ^ 128; 77 self->robust_list.pending = rp;
80 » » self->robust_list.pending = rp; 78 self->robust_list.head = *rp;
81 » » self->robust_list.head = *rp; 79 int cont = a_swap(&m->_m_lock, self->tid | 0x40000000);
82 » » int cont = a_swap(&m->_m_lock, self->tid|0x40000000); 80 self->robust_list.pending = 0;
83 » » self->robust_list.pending = 0; 81 if (cont < 0 || waiters)
84 » » if (cont < 0 || waiters) 82 __wake(&m->_m_lock, 1, priv);
85 » » » __wake(&m->_m_lock, 1, priv); 83 }
86 » } 84 __vm_unlock();
87 » __vm_unlock(); 85
88 86 __do_orphaned_stdio_locks();
89 » __do_orphaned_stdio_locks(); 87 __dl_thread_cleanup();
90 » __dl_thread_cleanup(); 88
91 89 if (self->detached && self->map_base) {
92 » if (self->detached && self->map_base) { 90 /* Detached threads must avoid the kernel clear_child_tid
93 » » /* Detached threads must avoid the kernel clear_child_tid 91 * feature, since the virtual address will have been
94 » » * feature, since the virtual address will have been 92 * unmapped and possibly already reused by a new mapping
95 » » * unmapped and possibly already reused by a new mapping 93 * at the time the kernel would perform the write. In
96 » » * at the time the kernel would perform the write. In 94 * the case of threads that started out detached, the
97 » » * the case of threads that started out detached, the 95 * initial clone flags are correct, but if the thread was
98 » » * initial clone flags are correct, but if the thread was 96 * detached later (== 2), we need to clear it here. */
99 » » * detached later (== 2), we need to clear it here. */ 97 if (self->detached == 2)
100 » » if (self->detached == 2) __syscall(SYS_set_tid_address, 0); 98 __syscall(SYS_set_tid_address, 0);
101 99
102 » » /* Robust list will no longer be valid, and was already 100 /* Robust list will no longer be valid, and was already
103 » » * processed above, so unregister it with the kernel. */ 101 * processed above, so unregister it with the kernel. */
104 » » if (self->robust_list.off) 102 if (self->robust_list.off)
105 » » » __syscall(SYS_set_robust_list, 0, 3*sizeof(long)); 103 __syscall(SYS_set_robust_list, 0, 3 * sizeof(long));
106 104
107 » » /* Since __unmapself bypasses the normal munmap code path, 105 /* Since __unmapself bypasses the normal munmap code path,
108 » » * explicitly wait for vmlock holders first. */ 106 * explicitly wait for vmlock holders first. */
109 » » __vm_wait(); 107 __vm_wait();
110 108
111 » » /* The following call unmaps the thread's stack mapping 109 /* The following call unmaps the thread's stack mapping
112 » » * and then exits without touching the stack. */ 110 * and then exits without touching the stack. */
113 » » __unmapself(self->map_base, self->map_size); 111 __unmapself(self->map_base, self->map_size);
114 » } 112 }
115 113
116 » for (;;) __syscall(SYS_exit, 0); 114 for (;;)
117 } 115 __syscall(SYS_exit, 0);
118 116 }
119 void __do_cleanup_push(struct __ptcb *cb) 117
120 { 118 void __do_cleanup_push(struct __ptcb* cb) {
121 » struct pthread *self = __pthread_self(); 119 struct pthread* self = __pthread_self();
122 » cb->__next = self->cancelbuf; 120 cb->__next = self->cancelbuf;
123 » self->cancelbuf = cb; 121 self->cancelbuf = cb;
124 } 122 }
125 123
126 void __do_cleanup_pop(struct __ptcb *cb) 124 void __do_cleanup_pop(struct __ptcb* cb) {
127 { 125 __pthread_self()->cancelbuf = cb->__next;
128 » __pthread_self()->cancelbuf = cb->__next; 126 }
129 } 127
130 128 static int start(void* p) {
131 static int start(void *p) 129 pthread_t self = p;
132 { 130 if (self->startlock[0]) {
133 » pthread_t self = p; 131 __wait(self->startlock, 0, 1, 1);
134 » if (self->startlock[0]) { 132 if (self->startlock[0]) {
135 » » __wait(self->startlock, 0, 1, 1); 133 self->detached = 2;
136 » » if (self->startlock[0]) { 134 pthread_exit(0);
137 » » » self->detached = 2; 135 }
138 » » » pthread_exit(0); 136 __restore_sigs(self->sigmask);
139 » » } 137 }
140 » » __restore_sigs(self->sigmask); 138 if (self->unblock_cancel)
141 » } 139 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG / 8);
142 » if (self->unblock_cancel) 140 __pthread_exit(self->start(self->start_arg));
143 » » __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, 141 return 0;
144 » » » SIGPT_SET, 0, _NSIG/8); 142 }
145 » __pthread_exit(self->start(self->start_arg)); 143
146 » return 0; 144 static int start_c11(void* p) {
147 } 145 pthread_t self = p;
148 146 int (*start)(void*) = (int (*)(void*))self->start;
149 static int start_c11(void *p) 147 __pthread_exit((void*)(uintptr_t)start(self->start_arg));
150 { 148 return 0;
151 » pthread_t self = p; 149 }
152 » int (*start)(void*) = (int(*)(void*)) self->start; 150
153 » __pthread_exit((void *)(uintptr_t)start(self->start_arg)); 151 #define ROUND(x) (((x) + PAGE_SIZE - 1) & -PAGE_SIZE)
154 » return 0;
155 }
156
157 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
158 152
159 /* pthread_key_create.c overrides this */ 153 /* pthread_key_create.c overrides this */
160 static volatile size_t dummy = 0; 154 static volatile size_t dummy = 0;
161 weak_alias(dummy, __pthread_tsd_size); 155 weak_alias(dummy, __pthread_tsd_size);
162 static void *dummy_tsd[1] = { 0 }; 156 static void* dummy_tsd[1] = {0};
163 weak_alias(dummy_tsd, __pthread_tsd_main); 157 weak_alias(dummy_tsd, __pthread_tsd_main);
164 158
165 volatile int __block_new_threads = 0; 159 volatile int __block_new_threads = 0;
166 160
167 static FILE *volatile dummy_file = 0; 161 static FILE* volatile dummy_file = 0;
168 weak_alias(dummy_file, __stdin_used); 162 weak_alias(dummy_file, __stdin_used);
169 weak_alias(dummy_file, __stdout_used); 163 weak_alias(dummy_file, __stdout_used);
170 weak_alias(dummy_file, __stderr_used); 164 weak_alias(dummy_file, __stderr_used);
171 165
172 static void init_file_lock(FILE *f) 166 static void init_file_lock(FILE* f) {
173 { 167 if (f && f->lock < 0)
174 » if (f && f->lock<0) f->lock = 0; 168 f->lock = 0;
175 } 169 }
176 170
177 void *__copy_tls(unsigned char *); 171 void* __copy_tls(unsigned char*);
178 172
179 int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict att rp, void *(*entry)(void *), void *restrict arg) 173 int __pthread_create(pthread_t* restrict res,
180 { 174 const pthread_attr_t* restrict attrp,
181 » int ret, c11 = (attrp == __ATTRP_C11_THREAD); 175 void* (*entry)(void*),
182 » size_t size, guard; 176 void* restrict arg) {
183 » struct pthread *self, *new; 177 int ret, c11 = (attrp == __ATTRP_C11_THREAD);
184 » unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit; 178 size_t size, guard;
185 » unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND 179 struct pthread *self, *new;
186 » » | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS 180 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
187 » » | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED; 181 unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND |
188 » int do_sched = 0; 182 CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS |
189 » pthread_attr_t attr = {0}; 183 CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED;
190 184 int do_sched = 0;
191 » if (!libc.can_do_threads) return ENOSYS; 185 pthread_attr_t attr = {0};
192 » self = __pthread_self(); 186
193 » if (!libc.threaded) { 187 if (!libc.can_do_threads)
194 » » for (FILE *f=*__ofl_lock(); f; f=f->next) 188 return ENOSYS;
195 » » » init_file_lock(f); 189 self = __pthread_self();
196 » » __ofl_unlock(); 190 if (!libc.threaded) {
197 » » init_file_lock(__stdin_used); 191 for (FILE* f = *__ofl_lock(); f; f = f->next)
198 » » init_file_lock(__stdout_used); 192 init_file_lock(f);
199 » » init_file_lock(__stderr_used); 193 __ofl_unlock();
200 » » __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8 ); 194 init_file_lock(__stdin_used);
201 » » self->tsd = (void **)__pthread_tsd_main; 195 init_file_lock(__stdout_used);
202 » » libc.threaded = 1; 196 init_file_lock(__stderr_used);
203 » } 197 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG / 8);
204 » if (attrp && !c11) attr = *attrp; 198 self->tsd = (void**)__pthread_tsd_main;
205 199 libc.threaded = 1;
206 » __acquire_ptc(); 200 }
207 » if (__block_new_threads) __wait(&__block_new_threads, 0, 1, 1); 201 if (attrp && !c11)
208 202 attr = *attrp;
209 » if (attr._a_stackaddr) { 203
210 » » size_t need = libc.tls_size + __pthread_tsd_size; 204 __acquire_ptc();
211 » » size = attr._a_stacksize + DEFAULT_STACK_SIZE; 205 if (__block_new_threads)
212 » » stack = (void *)(attr._a_stackaddr & -16); 206 __wait(&__block_new_threads, 0, 1, 1);
213 » » stack_limit = (void *)(attr._a_stackaddr - size); 207
214 » » /* Use application-provided stack for TLS only when 208 if (attr._a_stackaddr) {
215 » » * it does not take more than ~12% or 2k of the 209 size_t need = libc.tls_size + __pthread_tsd_size;
216 » » * application's stack space. */ 210 size = attr._a_stacksize + DEFAULT_STACK_SIZE;
217 » » if (need < size/8 && need < 2048) { 211 stack = (void*)(attr._a_stackaddr & -16);
218 » » » tsd = stack - __pthread_tsd_size; 212 stack_limit = (void*)(attr._a_stackaddr - size);
219 » » » stack = tsd - libc.tls_size; 213 /* Use application-provided stack for TLS only when
220 » » » memset(stack, 0, need); 214 * it does not take more than ~12% or 2k of the
221 » » } else { 215 * application's stack space. */
222 » » » size = ROUND(need); 216 if (need < size / 8 && need < 2048) {
223 » » » guard = 0; 217 tsd = stack - __pthread_tsd_size;
224 » » } 218 stack = tsd - libc.tls_size;
225 » } else { 219 memset(stack, 0, need);
226 » » guard = ROUND(DEFAULT_GUARD_SIZE + attr._a_guardsize); 220 } else {
227 » » size = guard + ROUND(DEFAULT_STACK_SIZE + attr._a_stacksize 221 size = ROUND(need);
228 » » » + libc.tls_size + __pthread_tsd_size); 222 guard = 0;
229 » } 223 }
230 224 } else {
231 » if (!tsd) { 225 guard = ROUND(DEFAULT_GUARD_SIZE + attr._a_guardsize);
232 » » if (guard) { 226 size = guard + ROUND(DEFAULT_STACK_SIZE + attr._a_stacksize +
233 » » » map = __mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, - 1, 0); 227 libc.tls_size + __pthread_tsd_size);
234 » » » if (map == MAP_FAILED) goto fail; 228 }
235 » » » if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRI TE) 229
236 » » » && errno != ENOSYS) { 230 if (!tsd) {
237 » » » » __munmap(map, size); 231 if (guard) {
238 » » » » goto fail; 232 map = __mmap(0, size, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0);
239 » » » } 233 if (map == MAP_FAILED)
240 » » } else { 234 goto fail;
241 » » » map = __mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE| MAP_ANON, -1, 0); 235 if (__mprotect(map + guard, size - guard, PROT_READ | PROT_WRITE) &&
242 » » » if (map == MAP_FAILED) goto fail; 236 errno != ENOSYS) {
243 » » } 237 __munmap(map, size);
244 » » tsd = map + size - __pthread_tsd_size; 238 goto fail;
245 » » if (!stack) { 239 }
246 » » » stack = tsd - libc.tls_size; 240 } else {
247 » » » stack_limit = map + guard; 241 map = __mmap(0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1,
248 » » } 242 0);
249 » } 243 if (map == MAP_FAILED)
250 244 goto fail;
251 » new = __copy_tls(tsd - libc.tls_size); 245 }
252 » new->map_base = map; 246 tsd = map + size - __pthread_tsd_size;
253 » new->map_size = size; 247 if (!stack) {
254 » new->stack = stack; 248 stack = tsd - libc.tls_size;
255 » new->stack_size = stack - stack_limit; 249 stack_limit = map + guard;
256 » new->start = entry; 250 }
257 » new->start_arg = arg; 251 }
258 » new->self = new; 252
259 » new->tsd = (void *)tsd; 253 new = __copy_tls(tsd - libc.tls_size);
260 » new->locale = &libc.global_locale; 254 new->map_base = map;
261 » if (attr._a_detach) { 255 new->map_size = size;
262 » » new->detached = 1; 256 new->stack = stack;
263 » » flags -= CLONE_CHILD_CLEARTID; 257 new->stack_size = stack - stack_limit;
264 » } 258 new->start = entry;
265 » if (attr._a_sched) { 259 new->start_arg = arg;
266 » » do_sched = new->startlock[0] = 1; 260 new->self = new;
267 » » __block_app_sigs(new->sigmask); 261 new->tsd = (void*)tsd;
268 » } 262 new->locale = &libc.global_locale;
269 » new->robust_list.head = &new->robust_list.head; 263 if (attr._a_detach) {
270 » new->unblock_cancel = self->cancel; 264 new->detached = 1;
271 » new->CANARY = self->CANARY; 265 flags -= CLONE_CHILD_CLEARTID;
272 266 }
273 » a_inc(&libc.threads_minus_1); 267 if (attr._a_sched) {
274 » ret = __clone((c11 ? start_c11 : start), stack, flags, new, &new->tid, T P_ADJ(new), &new->tid); 268 do_sched = new->startlock[0] = 1;
275 269 __block_app_sigs(new->sigmask);
276 » __release_ptc(); 270 }
277 271 new->robust_list.head = &new->robust_list.head;
278 » if (do_sched) { 272 new->unblock_cancel = self->cancel;
279 » » __restore_sigs(new->sigmask); 273 new->CANARY = self->CANARY;
280 » } 274
281 275 a_inc(&libc.threads_minus_1);
282 » if (ret < 0) { 276 ret = __clone((c11 ? start_c11 : start), stack, flags, new, &new->tid,
283 » » a_dec(&libc.threads_minus_1); 277 TP_ADJ(new), &new->tid);
284 » » if (map) __munmap(map, size); 278
285 » » return EAGAIN; 279 __release_ptc();
286 » } 280
287 281 if (do_sched) {
288 » if (do_sched) { 282 __restore_sigs(new->sigmask);
289 » » ret = __syscall(SYS_sched_setscheduler, new->tid, 283 }
290 » » » attr._a_policy, &attr._a_prio); 284
291 » » a_store(new->startlock, ret<0 ? 2 : 0); 285 if (ret < 0) {
292 » » __wake(new->startlock, 1, 1); 286 a_dec(&libc.threads_minus_1);
293 » » if (ret < 0) return -ret; 287 if (map)
294 » } 288 __munmap(map, size);
295 289 return EAGAIN;
296 » *res = new; 290 }
297 » return 0; 291
292 if (do_sched) {
293 ret = __syscall(SYS_sched_setscheduler, new->tid, attr._a_policy,
294 &attr._a_prio);
295 a_store(new->startlock, ret < 0 ? 2 : 0);
296 __wake(new->startlock, 1, 1);
297 if (ret < 0)
298 return -ret;
299 }
300
301 *res = new;
302 return 0;
298 fail: 303 fail:
299 » __release_ptc(); 304 __release_ptc();
300 » return EAGAIN; 305 return EAGAIN;
301 } 306 }
302 307
303 weak_alias(__pthread_exit, pthread_exit); 308 weak_alias(__pthread_exit, pthread_exit);
304 weak_alias(__pthread_create, pthread_create); 309 weak_alias(__pthread_create, pthread_create);
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698