| OLD | NEW |
| 1 #include "pthread_impl.h" | 1 #include "pthread_impl.h" |
| 2 #include <semaphore.h> | 2 #include <semaphore.h> |
| 3 #include <unistd.h> | 3 #include <unistd.h> |
| 4 #include <dirent.h> | 4 #include <dirent.h> |
| 5 #include <string.h> | 5 #include <string.h> |
| 6 #include <ctype.h> | 6 #include <ctype.h> |
| 7 #include "futex.h" | 7 #include "futex.h" |
| 8 #include "atomic.h" | 8 #include "atomic.h" |
| 9 #include "../dirent/__dirent.h" | 9 #include "../dirent/__dirent.h" |
| 10 | 10 |
| 11 static struct chain { | 11 static struct chain { |
| 12 » struct chain *next; | 12 struct chain* next; |
| 13 » int tid; | 13 int tid; |
| 14 » sem_t target_sem, caller_sem; | 14 sem_t target_sem, caller_sem; |
| 15 } *volatile head; | 15 } * volatile head; |
| 16 | 16 |
| 17 static volatile int synccall_lock[2]; | 17 static volatile int synccall_lock[2]; |
| 18 static volatile int target_tid; | 18 static volatile int target_tid; |
| 19 static void (*callback)(void *), *context; | 19 static void (*callback)(void*), *context; |
| 20 static volatile int dummy = 0; | 20 static volatile int dummy = 0; |
| 21 weak_alias(dummy, __block_new_threads); | 21 weak_alias(dummy, __block_new_threads); |
| 22 | 22 |
| 23 static void handler(int sig) | 23 static void handler(int sig) { |
| 24 { | 24 struct chain ch; |
| 25 » struct chain ch; | 25 int old_errno = errno; |
| 26 » int old_errno = errno; | |
| 27 | 26 |
| 28 » sem_init(&ch.target_sem, 0, 0); | 27 sem_init(&ch.target_sem, 0, 0); |
| 29 » sem_init(&ch.caller_sem, 0, 0); | 28 sem_init(&ch.caller_sem, 0, 0); |
| 30 | 29 |
| 31 » ch.tid = __syscall(SYS_gettid); | 30 ch.tid = __syscall(SYS_gettid); |
| 32 | 31 |
| 33 » do ch.next = head; | 32 do |
| 34 » while (a_cas_p(&head, ch.next, &ch) != ch.next); | 33 ch.next = head; |
| 34 while (a_cas_p(&head, ch.next, &ch) != ch.next); |
| 35 | 35 |
| 36 » if (a_cas(&target_tid, ch.tid, 0) == (ch.tid | 0x80000000)) | 36 if (a_cas(&target_tid, ch.tid, 0) == (ch.tid | 0x80000000)) |
| 37 » » __syscall(SYS_futex, &target_tid, FUTEX_UNLOCK_PI|FUTEX_PRIVATE)
; | 37 __syscall(SYS_futex, &target_tid, FUTEX_UNLOCK_PI | FUTEX_PRIVATE); |
| 38 | 38 |
| 39 » sem_wait(&ch.target_sem); | 39 sem_wait(&ch.target_sem); |
| 40 » callback(context); | 40 callback(context); |
| 41 » sem_post(&ch.caller_sem); | 41 sem_post(&ch.caller_sem); |
| 42 » sem_wait(&ch.target_sem); | 42 sem_wait(&ch.target_sem); |
| 43 | 43 |
| 44 » errno = old_errno; | 44 errno = old_errno; |
| 45 } | 45 } |
| 46 | 46 |
| 47 void __synccall(void (*func)(void *), void *ctx) | 47 void __synccall(void (*func)(void*), void* ctx) { |
| 48 { | 48 sigset_t oldmask; |
| 49 » sigset_t oldmask; | 49 int cs, i, r, pid, self; |
| 50 » int cs, i, r, pid, self;; | 50 ; |
| 51 » DIR dir = {0}; | 51 DIR dir = {0}; |
| 52 » struct dirent *de; | 52 struct dirent* de; |
| 53 » struct sigaction sa = { .sa_flags = 0, .sa_handler = handler }; | 53 struct sigaction sa = {.sa_flags = 0, .sa_handler = handler}; |
| 54 » struct chain *cp, *next; | 54 struct chain *cp, *next; |
| 55 » struct timespec ts; | 55 struct timespec ts; |
| 56 | 56 |
| 57 » /* Blocking signals in two steps, first only app-level signals | 57 /* Blocking signals in two steps, first only app-level signals |
| 58 » * before taking the lock, then all signals after taking the lock, | 58 * before taking the lock, then all signals after taking the lock, |
| 59 » * is necessary to achieve AS-safety. Blocking them all first would | 59 * is necessary to achieve AS-safety. Blocking them all first would |
| 60 » * deadlock if multiple threads called __synccall. Waiting to block | 60 * deadlock if multiple threads called __synccall. Waiting to block |
| 61 » * any until after the lock would allow re-entry in the same thread | 61 * any until after the lock would allow re-entry in the same thread |
| 62 » * with the lock already held. */ | 62 * with the lock already held. */ |
| 63 » __block_app_sigs(&oldmask); | 63 __block_app_sigs(&oldmask); |
| 64 » LOCK(synccall_lock); | 64 LOCK(synccall_lock); |
| 65 » __block_all_sigs(0); | 65 __block_all_sigs(0); |
| 66 » pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs); | 66 pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs); |
| 67 | 67 |
| 68 » head = 0; | 68 head = 0; |
| 69 | 69 |
| 70 » if (!libc.threaded) goto single_threaded; | 70 if (!libc.threaded) |
| 71 goto single_threaded; |
| 71 | 72 |
| 72 » callback = func; | 73 callback = func; |
| 73 » context = ctx; | 74 context = ctx; |
| 74 | 75 |
| 75 » /* This atomic store ensures that any signaled threads will see the | 76 /* This atomic store ensures that any signaled threads will see the |
| 76 » * above stores, and prevents more than a bounded number of threads, | 77 * above stores, and prevents more than a bounded number of threads, |
| 77 » * those already in pthread_create, from creating new threads until | 78 * those already in pthread_create, from creating new threads until |
| 78 » * the value is cleared to zero again. */ | 79 * the value is cleared to zero again. */ |
| 79 » a_store(&__block_new_threads, 1); | 80 a_store(&__block_new_threads, 1); |
| 80 | 81 |
| 81 » /* Block even implementation-internal signals, so that nothing | 82 /* Block even implementation-internal signals, so that nothing |
| 82 » * interrupts the SIGSYNCCALL handlers. The main possible source | 83 * interrupts the SIGSYNCCALL handlers. The main possible source |
| 83 » * of trouble is asynchronous cancellation. */ | 84 * of trouble is asynchronous cancellation. */ |
| 84 » memset(&sa.sa_mask, -1, sizeof sa.sa_mask); | 85 memset(&sa.sa_mask, -1, sizeof sa.sa_mask); |
| 85 » __libc_sigaction(SIGSYNCCALL, &sa, 0); | 86 __libc_sigaction(SIGSYNCCALL, &sa, 0); |
| 86 | 87 |
| 87 » pid = __syscall(SYS_getpid); | 88 pid = __syscall(SYS_getpid); |
| 88 » self = __syscall(SYS_gettid); | 89 self = __syscall(SYS_gettid); |
| 89 | 90 |
| 90 » /* Since opendir is not AS-safe, the DIR needs to be setup manually | 91 /* Since opendir is not AS-safe, the DIR needs to be setup manually |
| 91 » * in automatic storage. Thankfully this is easy. */ | 92 * in automatic storage. Thankfully this is easy. */ |
| 92 » dir.fd = open("/proc/self/task", O_RDONLY|O_DIRECTORY|O_CLOEXEC); | 93 dir.fd = open("/proc/self/task", O_RDONLY | O_DIRECTORY | O_CLOEXEC); |
| 93 » if (dir.fd < 0) goto out; | 94 if (dir.fd < 0) |
| 95 goto out; |
| 94 | 96 |
| 95 » /* Initially send one signal per counted thread. But since we can't | 97 /* Initially send one signal per counted thread. But since we can't |
| 96 » * synchronize with thread creation/exit here, there could be too | 98 * synchronize with thread creation/exit here, there could be too |
| 97 » * few signals. This initial signaling is just an optimization, not | 99 * few signals. This initial signaling is just an optimization, not |
| 98 » * part of the logic. */ | 100 * part of the logic. */ |
| 99 » for (i=libc.threads_minus_1; i; i--) | 101 for (i = libc.threads_minus_1; i; i--) |
| 100 » » __syscall(SYS_kill, pid, SIGSYNCCALL); | 102 __syscall(SYS_kill, pid, SIGSYNCCALL); |
| 101 | 103 |
| 102 » /* Loop scanning the kernel-provided thread list until it shows no | 104 /* Loop scanning the kernel-provided thread list until it shows no |
| 103 » * threads that have not already replied to the signal. */ | 105 * threads that have not already replied to the signal. */ |
| 104 » for (;;) { | 106 for (;;) { |
| 105 » » int miss_cnt = 0; | 107 int miss_cnt = 0; |
| 106 » » while ((de = readdir(&dir))) { | 108 while ((de = readdir(&dir))) { |
| 107 » » » if (!isdigit(de->d_name[0])) continue; | 109 if (!isdigit(de->d_name[0])) |
| 108 » » » int tid = atoi(de->d_name); | 110 continue; |
| 109 » » » if (tid == self || !tid) continue; | 111 int tid = atoi(de->d_name); |
| 112 if (tid == self || !tid) |
| 113 continue; |
| 110 | 114 |
| 111 » » » /* Set the target thread as the PI futex owner before | 115 /* Set the target thread as the PI futex owner before |
| 112 » » » * checking if it's in the list of caught threads. If it | 116 * checking if it's in the list of caught threads. If it |
| 113 » » » * adds itself to the list after we check for it, then | 117 * adds itself to the list after we check for it, then |
| 114 » » » * it will see its own tid in the PI futex and perform | 118 * it will see its own tid in the PI futex and perform |
| 115 » » » * the unlock operation. */ | 119 * the unlock operation. */ |
| 116 » » » a_store(&target_tid, tid); | 120 a_store(&target_tid, tid); |
| 117 | 121 |
| 118 » » » /* Thread-already-caught is a success condition. */ | 122 /* Thread-already-caught is a success condition. */ |
| 119 » » » for (cp = head; cp && cp->tid != tid; cp=cp->next); | 123 for (cp = head; cp && cp->tid != tid; cp = cp->next) |
| 120 » » » if (cp) continue; | 124 ; |
| 125 if (cp) |
| 126 continue; |
| 121 | 127 |
| 122 » » » r = -__syscall(SYS_tgkill, pid, tid, SIGSYNCCALL); | 128 r = -__syscall(SYS_tgkill, pid, tid, SIGSYNCCALL); |
| 123 | 129 |
| 124 » » » /* Target thread exit is a success condition. */ | 130 /* Target thread exit is a success condition. */ |
| 125 » » » if (r == ESRCH) continue; | 131 if (r == ESRCH) |
| 132 continue; |
| 126 | 133 |
| 127 » » » /* The FUTEX_LOCK_PI operation is used to loan priority | 134 /* The FUTEX_LOCK_PI operation is used to loan priority |
| 128 » » » * to the target thread, which otherwise may be unable | 135 * to the target thread, which otherwise may be unable |
| 129 » » » * to run. Timeout is necessary because there is a race | 136 * to run. Timeout is necessary because there is a race |
| 130 » » » * condition where the tid may be reused by a different | 137 * condition where the tid may be reused by a different |
| 131 » » » * process. */ | 138 * process. */ |
| 132 » » » clock_gettime(CLOCK_REALTIME, &ts); | 139 clock_gettime(CLOCK_REALTIME, &ts); |
| 133 » » » ts.tv_nsec += 10000000; | 140 ts.tv_nsec += 10000000; |
| 134 » » » if (ts.tv_nsec >= 1000000000) { | 141 if (ts.tv_nsec >= 1000000000) { |
| 135 » » » » ts.tv_sec++; | 142 ts.tv_sec++; |
| 136 » » » » ts.tv_nsec -= 1000000000; | 143 ts.tv_nsec -= 1000000000; |
| 137 » » » } | 144 } |
| 138 » » » r = -__syscall(SYS_futex, &target_tid, | 145 r = -__syscall(SYS_futex, &target_tid, FUTEX_LOCK_PI | FUTEX_PRIVATE, 0, |
| 139 » » » » FUTEX_LOCK_PI|FUTEX_PRIVATE, 0, &ts); | 146 &ts); |
| 140 | 147 |
| 141 » » » /* Obtaining the lock means the thread responded. ESRCH | 148 /* Obtaining the lock means the thread responded. ESRCH |
| 142 » » » * means the target thread exited, which is okay too. */ | 149 * means the target thread exited, which is okay too. */ |
| 143 » » » if (!r || r == ESRCH) continue; | 150 if (!r || r == ESRCH) |
| 151 continue; |
| 144 | 152 |
| 145 » » » miss_cnt++; | 153 miss_cnt++; |
| 146 » » } | 154 } |
| 147 » » if (!miss_cnt) break; | 155 if (!miss_cnt) |
| 148 » » rewinddir(&dir); | 156 break; |
| 149 » } | 157 rewinddir(&dir); |
| 150 » close(dir.fd); | 158 } |
| 159 close(dir.fd); |
| 151 | 160 |
| 152 » /* Serialize execution of callback in caught threads. */ | 161 /* Serialize execution of callback in caught threads. */ |
| 153 » for (cp=head; cp; cp=cp->next) { | 162 for (cp = head; cp; cp = cp->next) { |
| 154 » » sem_post(&cp->target_sem); | 163 sem_post(&cp->target_sem); |
| 155 » » sem_wait(&cp->caller_sem); | 164 sem_wait(&cp->caller_sem); |
| 156 » } | 165 } |
| 157 | 166 |
| 158 » sa.sa_handler = SIG_IGN; | 167 sa.sa_handler = SIG_IGN; |
| 159 » __libc_sigaction(SIGSYNCCALL, &sa, 0); | 168 __libc_sigaction(SIGSYNCCALL, &sa, 0); |
| 160 | 169 |
| 161 single_threaded: | 170 single_threaded: |
| 162 » func(ctx); | 171 func(ctx); |
| 163 | 172 |
| 164 » /* Only release the caught threads once all threads, including the | 173 /* Only release the caught threads once all threads, including the |
| 165 » * caller, have returned from the callback function. */ | 174 * caller, have returned from the callback function. */ |
| 166 » for (cp=head; cp; cp=next) { | 175 for (cp = head; cp; cp = next) { |
| 167 » » next = cp->next; | 176 next = cp->next; |
| 168 » » sem_post(&cp->target_sem); | 177 sem_post(&cp->target_sem); |
| 169 » } | 178 } |
| 170 | 179 |
| 171 out: | 180 out: |
| 172 » a_store(&__block_new_threads, 0); | 181 a_store(&__block_new_threads, 0); |
| 173 » __wake(&__block_new_threads, -1, 1); | 182 __wake(&__block_new_threads, -1, 1); |
| 174 | 183 |
| 175 » pthread_setcancelstate(cs, 0); | 184 pthread_setcancelstate(cs, 0); |
| 176 » UNLOCK(synccall_lock); | 185 UNLOCK(synccall_lock); |
| 177 » __restore_sigs(&oldmask); | 186 __restore_sigs(&oldmask); |
| 178 } | 187 } |
| OLD | NEW |