Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(14)

Side by Side Diff: src/untrusted/pthread/nc_thread.c

Issue 11299315: Cleanup: Fix various coding style issues in src/untrusted/pthread (Closed) Base URL: svn://svn.chromium.org/native_client/trunk/src/native_client
Patch Set: Review Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/untrusted/pthread/nc_mutex.c ('k') | src/untrusted/pthread/pthread.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The Native Client Authors. All rights reserved. 2 * Copyright (c) 2012 The Native Client Authors. All rights reserved.
3 * Use of this source code is governed by a BSD-style license that can be 3 * Use of this source code is governed by a BSD-style license that can be
4 * found in the LICENSE file. 4 * found in the LICENSE file.
5 */ 5 */
6 6
7 /* 7 /*
8 * Native Client threads library 8 * Native Client threads library
9 */ 9 */
10 10
(...skipping 15 matching lines...) Expand all
26 #include "native_client/src/untrusted/pthread/pthread.h" 26 #include "native_client/src/untrusted/pthread/pthread.h"
27 #include "native_client/src/untrusted/pthread/pthread_internal.h" 27 #include "native_client/src/untrusted/pthread/pthread_internal.h"
28 #include "native_client/src/untrusted/pthread/pthread_types.h" 28 #include "native_client/src/untrusted/pthread/pthread_types.h"
29 29
30 #include "native_client/src/untrusted/valgrind/dynamic_annotations.h" 30 #include "native_client/src/untrusted/valgrind/dynamic_annotations.h"
31 31
32 #if defined(NACL_IN_IRT) 32 #if defined(NACL_IN_IRT)
33 # include "native_client/src/untrusted/irt/irt_private.h" 33 # include "native_client/src/untrusted/irt/irt_private.h"
34 #endif 34 #endif
35 35
36 #define FUN_TO_VOID_PTR(a) ((void*)((uintptr_t) a)) 36 #define FUN_TO_VOID_PTR(a) ((void *) (uintptr_t) (a))
37 37
38 /* 38 /*
39 * ABI tables for underyling NaCl thread interfaces. 39 * ABI tables for underyling NaCl thread interfaces.
40 */ 40 */
41 static struct nacl_irt_thread irt_thread; 41 static struct nacl_irt_thread irt_thread;
42 42
43 /* 43 /*
44 * These days, the thread_create() syscall/IRT call will align the 44 * These days, the thread_create() syscall/IRT call will align the
45 * stack for us, but for compatibility with older, released x86 45 * stack for us, but for compatibility with older, released x86
46 * versions of NaCl where thread_create() does not align the stack, we 46 * versions of NaCl where thread_create() does not align the stack, we
47 * align the stack ourselves. 47 * align the stack ourselves.
48 */ 48 */
49 #if defined(__i386__) 49 #if defined(__i386__)
50 static const uint32_t kStackAlignment = 32; 50 static const uint32_t kStackAlignment = 32;
51 static const uint32_t kStackPadBelowAlign = 4; /* Return address size */ 51 static const uint32_t kStackPadBelowAlign = 4; /* Return address size */
52 #elif defined(__x86_64__) 52 #elif defined(__x86_64__)
53 static const uint32_t kStackAlignment = 32; 53 static const uint32_t kStackAlignment = 32;
54 static const uint32_t kStackPadBelowAlign = 8; /* Return address size */ 54 static const uint32_t kStackPadBelowAlign = 8; /* Return address size */
55 #else 55 #else
56 static const uint32_t kStackAlignment = 1; 56 static const uint32_t kStackAlignment = 1;
57 static const uint32_t kStackPadBelowAlign = 0; 57 static const uint32_t kStackPadBelowAlign = 0;
58 #endif 58 #endif
59 59
60 #define TDB_SIZE (sizeof(struct nc_combined_tdb)) 60 #define TDB_SIZE (sizeof(struct nc_combined_tdb))
61 61
62 static inline char* align(uint32_t offset, uint32_t alignment) { 62 static inline char *align(uint32_t offset, uint32_t alignment) {
63 return (char*) ((offset + alignment - 1) & ~(alignment - 1)); 63 return (char *) ((offset + alignment - 1) & ~(alignment - 1));
64 } 64 }
65 65
66 /* Thread management global variables */ 66 /* Thread management global variables. */
67 const int __nc_kMaxCachedMemoryBlocks = 50; 67 const int __nc_kMaxCachedMemoryBlocks = 50;
68 68
69 int __nc_thread_initialized; 69 int __nc_thread_initialized;
70 70
71 /* mutex used to synchronize thread management code */ 71 /* Mutex used to synchronize thread management code. */
72 pthread_mutex_t __nc_thread_management_lock; 72 pthread_mutex_t __nc_thread_management_lock;
73 73
74 /* condition variable that gets signaled when all the threads 74 /*
75 * except the main thread have terminated 75 * Condition variable that gets signaled when all the threads
76 * except the main thread have terminated.
76 */ 77 */
77 static pthread_cond_t __nc_last_thread_cond; 78 static pthread_cond_t __nc_last_thread_cond;
78 static pthread_t __nc_initial_thread_id; 79 static pthread_t __nc_initial_thread_id;
79 80
80 /* number of threads currently running in this NaCl module */ 81 /* Number of threads currently running in this NaCl module. */
81 int __nc_running_threads_counter = 1; 82 int __nc_running_threads_counter = 1;
82 83
83 /* we have two queues of memory blocks - one for each type */ 84 /* We have two queues of memory blocks - one for each type. */
84 STAILQ_HEAD(tailhead, entry) __nc_thread_memory_blocks[2]; 85 STAILQ_HEAD(tailhead, entry) __nc_thread_memory_blocks[2];
85 /* We need a counter for each queue to keep track of number of blocks */ 86 /* We need a counter for each queue to keep track of number of blocks. */
86 int __nc_memory_block_counter[2]; 87 int __nc_memory_block_counter[2];
87 88
88 #define NODE_TO_PAYLOAD(TlsNode) \ 89 #define NODE_TO_PAYLOAD(TlsNode) \
89 ((char*)(TlsNode) + sizeof(nc_thread_memory_block_t)) 90 ((char *) (TlsNode) + sizeof(nc_thread_memory_block_t))
90 91
91 /* Internal functions */ 92 /* Internal functions */
92 93
93 static inline void nc_abort(void) { 94 static inline void nc_abort(void) {
94 while (1) *(volatile int *) 0 = 0; /* Crash. */ 95 while (1) *(volatile int *) 0 = 0; /* Crash. */
95 } 96 }
96 97
97 static inline nc_thread_descriptor_t *nc_get_tdb(void) { 98 static inline nc_thread_descriptor_t *nc_get_tdb(void) {
98 /* 99 /*
99 * Fetch the thread-specific data pointer. This is usually just 100 * Fetch the thread-specific data pointer. This is usually just
100 * a wrapper around __libnacl_irt_tls.tls_get() but we don't use 101 * a wrapper around __libnacl_irt_tls.tls_get() but we don't use
101 * that here so that the IRT build can override the definition. 102 * that here so that the IRT build can override the definition.
102 */ 103 */
103 return (void *) ((char *) __nacl_read_tp() + __nacl_tp_tdb_offset(TDB_SIZE)); 104 return (void *) ((char *) __nacl_read_tp() + __nacl_tp_tdb_offset(TDB_SIZE));
104 } 105 }
105 106
106 static void nc_thread_starter(void) { 107 static void nc_thread_starter(void) {
107 nc_thread_descriptor_t *tdb = nc_get_tdb(); 108 nc_thread_descriptor_t *tdb = nc_get_tdb();
108 __newlib_thread_init(); 109 __newlib_thread_init();
109 #if defined(NACL_IN_IRT) 110 #if defined(NACL_IN_IRT)
110 g_is_irt_internal_thread = 1; 111 g_is_irt_internal_thread = 1;
111 #endif 112 #endif
112 void *retval = tdb->start_func(tdb->state); 113 void *retval = tdb->start_func(tdb->state);
113 /* if the function returns, terminate the thread */ 114 /* If the function returns, terminate the thread. */
114 pthread_exit(retval); 115 pthread_exit(retval);
115 /* NOTREACHED */ 116 /* NOTREACHED */
116 /* TODO(gregoryd) - add assert */ 117 /* TODO(gregoryd) - add assert */
117 } 118 }
118 119
119 static nc_thread_memory_block_t* nc_allocate_memory_block_mu( 120 static nc_thread_memory_block_t *nc_allocate_memory_block_mu(
120 nc_thread_memory_block_type_t type, 121 nc_thread_memory_block_type_t type,
121 int required_size) { 122 int required_size) {
122 struct tailhead *head; 123 struct tailhead *head;
123 nc_thread_memory_block_t *node; 124 nc_thread_memory_block_t *node;
124 /* assume the lock is held!!! */ 125 /* Assume the lock is held!!! */
125 if (type >= MAX_MEMORY_TYPE) 126 if (type >= MAX_MEMORY_TYPE)
126 return NULL; 127 return NULL;
127 head = &__nc_thread_memory_blocks[type]; 128 head = &__nc_thread_memory_blocks[type];
128 129
129 /* We need to know the size even if we find a free node - to memset it to 0 */ 130 /* We need to know the size even if we find a free node - to memset it to 0 */
130 switch (type) { 131 switch (type) {
131 case THREAD_STACK_MEMORY: 132 case THREAD_STACK_MEMORY:
132 required_size = required_size + kStackAlignment - 1; 133 required_size = required_size + kStackAlignment - 1;
133 break; 134 break;
134 case TLS_AND_TDB_MEMORY: 135 case TLS_AND_TDB_MEMORY:
135 break; 136 break;
136 case MAX_MEMORY_TYPE: 137 case MAX_MEMORY_TYPE:
137 default: 138 default:
138 return NULL; 139 return NULL;
139 } 140 }
140 141
141 if (!STAILQ_EMPTY(head)) { 142 if (!STAILQ_EMPTY(head)) {
142 /* try to get one from queue */ 143 /* Try to get one from queue. */
143 nc_thread_memory_block_t *node = STAILQ_FIRST(head); 144 nc_thread_memory_block_t *node = STAILQ_FIRST(head);
144 145
145 /* 146 /*
146 * On average the memory blocks will be marked as not used in the same order 147 * On average the memory blocks will be marked as not used in the same order
147 * as they are added to the queue, therefore there is no need to check the 148 * as they are added to the queue, therefore there is no need to check the
148 * next queue entries if the first one is still in use. 149 * next queue entries if the first one is still in use.
149 */ 150 */
150 if (0 == node->is_used && node->size >= required_size) { 151 if (0 == node->is_used && node->size >= required_size) {
151 /* This will only re-use the first node possibly, and could be 152 /*
153 * This will only re-use the first node possibly, and could be
152 * improved to provide the stack with a best-fit algorithm if needed. 154 * improved to provide the stack with a best-fit algorithm if needed.
153 * TODO: we should scan all nodes to see if there is one that fits 155 * TODO: we should scan all nodes to see if there is one that fits
154 * before allocating another. 156 * before allocating another.
155 * http://code.google.com/p/nativeclient/issues/detail?id=1569 157 * http://code.google.com/p/nativeclient/issues/detail?id=1569
156 */ 158 */
157 int size = node->size; 159 int size = node->size;
158 STAILQ_REMOVE_HEAD(head, entries); 160 STAILQ_REMOVE_HEAD(head, entries);
159 --__nc_memory_block_counter[type]; 161 --__nc_memory_block_counter[type];
160 162
161 memset(node, 0,sizeof(*node)); 163 memset(node, 0,sizeof(*node));
(...skipping 14 matching lines...) Expand all
176 * support variable stack size. 178 * support variable stack size.
177 */ 179 */
178 nc_thread_memory_block_t *tmp = STAILQ_FIRST(head); 180 nc_thread_memory_block_t *tmp = STAILQ_FIRST(head);
179 if (0 == tmp->is_used) { 181 if (0 == tmp->is_used) {
180 STAILQ_REMOVE_HEAD(head, entries); 182 STAILQ_REMOVE_HEAD(head, entries);
181 --__nc_memory_block_counter[type]; 183 --__nc_memory_block_counter[type];
182 free(tmp); 184 free(tmp);
183 } else { 185 } else {
184 /* 186 /*
185 * Stop once we find a block that is still in use, 187 * Stop once we find a block that is still in use,
186 * since probably there is no point to continue 188 * since probably there is no point to continue.
187 */ 189 */
188 break; 190 break;
189 } 191 }
190 } 192 }
191 193
192 } 194 }
193 /* no available blocks of the required type/size - allocate one */ 195 /* No available blocks of the required type/size - allocate one. */
194 node = malloc(MEMORY_BLOCK_ALLOCATION_SIZE(required_size)); 196 node = malloc(MEMORY_BLOCK_ALLOCATION_SIZE(required_size));
195 if (NULL != node) { 197 if (NULL != node) {
196 memset(node, 0, sizeof(*node)); 198 memset(node, 0, sizeof(*node));
197 node->size = required_size; 199 node->size = required_size;
198 node->is_used = 1; 200 node->is_used = 1;
199 } 201 }
200 return node; 202 return node;
201 } 203 }
202 204
203 static void nc_free_memory_block_mu(nc_thread_memory_block_type_t type, 205 static void nc_free_memory_block_mu(nc_thread_memory_block_type_t type,
204 nc_thread_memory_block_t* node) { 206 nc_thread_memory_block_t *node) {
205 /* assume the lock is held !!! */ 207 /* Assume the lock is held!!! */
206 struct tailhead *head = &__nc_thread_memory_blocks[type]; 208 struct tailhead *head = &__nc_thread_memory_blocks[type];
207 STAILQ_INSERT_TAIL(head, node, entries); 209 STAILQ_INSERT_TAIL(head, node, entries);
208 ++__nc_memory_block_counter[type]; 210 ++__nc_memory_block_counter[type];
209 } 211 }
210 212
211 static void nc_release_basic_data_mu(nc_basic_thread_data_t *basic_data) { 213 static void nc_release_basic_data_mu(nc_basic_thread_data_t *basic_data) {
212 /* join_condvar can be initialized only if tls_node exists */ 214 /* join_condvar can be initialized only if tls_node exists. */
213 pthread_cond_destroy(&basic_data->join_condvar); 215 pthread_cond_destroy(&basic_data->join_condvar);
214 free(basic_data); 216 free(basic_data);
215 } 217 }
216 218
217 static void nc_release_tls_node(nc_thread_memory_block_t *block, 219 static void nc_release_tls_node(nc_thread_memory_block_t *block,
218 nc_thread_descriptor_t *tdb) { 220 nc_thread_descriptor_t *tdb) {
219 if (block) { 221 if (block) {
220 if (NULL != tdb->basic_data) { 222 if (NULL != tdb->basic_data) {
221 tdb->basic_data->tdb = NULL; 223 tdb->basic_data->tdb = NULL;
222 } 224 }
223 block->is_used = 0; 225 block->is_used = 0;
224 nc_free_memory_block_mu(TLS_AND_TDB_MEMORY, block); 226 nc_free_memory_block_mu(TLS_AND_TDB_MEMORY, block);
225 } 227 }
226 } 228 }
227 229
228 /* Initialize a newly allocated TDB to some default values */ 230 /* Initialize a newly allocated TDB to some default values. */
229 static int nc_tdb_init(nc_thread_descriptor_t *tdb, 231 static int nc_tdb_init(nc_thread_descriptor_t *tdb,
230 nc_basic_thread_data_t * basic_data) { 232 nc_basic_thread_data_t *basic_data) {
231 tdb->tls_base = tdb; 233 tdb->tls_base = tdb;
232 tdb->basic_data = basic_data; 234 tdb->basic_data = basic_data;
233 basic_data->tdb = tdb; 235 basic_data->tdb = tdb;
234 tdb->basic_data->retval = 0; 236 tdb->basic_data->retval = 0;
235 tdb->basic_data->status = THREAD_RUNNING; 237 tdb->basic_data->status = THREAD_RUNNING;
236 238
237 tdb->joinable = PTHREAD_CREATE_JOINABLE; 239 tdb->joinable = PTHREAD_CREATE_JOINABLE;
238 tdb->join_waiting = 0; 240 tdb->join_waiting = 0;
239 241
240 tdb->tls_node = NULL; 242 tdb->tls_node = NULL;
241 tdb->stack_node = NULL; 243 tdb->stack_node = NULL;
242 244
243 tdb->start_func = NULL; 245 tdb->start_func = NULL;
244 tdb->state = NULL; 246 tdb->state = NULL;
245 247
246 tdb->irt_thread_data = NULL; 248 tdb->irt_thread_data = NULL;
247 249
248 /* Imitate PTHREAD_COND_INITIALIZER - we cannot use it directly here, 250 /*
251 * Imitate PTHREAD_COND_INITIALIZER - we cannot use it directly here,
249 * since this is not variable initialization. 252 * since this is not variable initialization.
250 */ 253 */
251 nc_pthread_condvar_ctor(&basic_data->join_condvar); 254 nc_pthread_condvar_ctor(&basic_data->join_condvar);
252 return 0; 255 return 0;
253 } 256 }
254 257
255 /* Initializes all globals except for the initial thread structure. */ 258 /* Initializes all globals except for the initial thread structure. */
256 void __nc_initialize_globals(void) { 259 void __nc_initialize_globals(void) {
257 /* 260 /*
258 * Fetch the ABI tables from the IRT. If we don't have these, all is lost. 261 * Fetch the ABI tables from the IRT. If we don't have these, all is lost.
259 */ 262 */
260 __nc_initialize_interfaces(&irt_thread); 263 __nc_initialize_interfaces(&irt_thread);
261 264
262 if (pthread_mutex_init(&__nc_thread_management_lock, NULL) != 0) 265 if (pthread_mutex_init(&__nc_thread_management_lock, NULL) != 0)
263 nc_abort(); 266 nc_abort();
264 267
265 /* Tell ThreadSanitizer to not generate happens-before arcs between uses of 268 /*
266 this mutex. Otherwise we miss to many real races. 269 * Tell ThreadSanitizer to not generate happens-before arcs between uses of
267 When not running under ThreadSanitizer, this is just a call to an empty 270 * this mutex. Otherwise we miss to many real races.
268 function. */ 271 * When not running under ThreadSanitizer, this is just a call to an empty
272 * function.
273 */
269 ANNOTATE_NOT_HAPPENS_BEFORE_MUTEX(&__nc_thread_management_lock); 274 ANNOTATE_NOT_HAPPENS_BEFORE_MUTEX(&__nc_thread_management_lock);
270 275
271 if (pthread_cond_init(&__nc_last_thread_cond, NULL) != 0) 276 if (pthread_cond_init(&__nc_last_thread_cond, NULL) != 0)
272 nc_abort(); 277 nc_abort();
273 STAILQ_INIT(&__nc_thread_memory_blocks[0]); 278 STAILQ_INIT(&__nc_thread_memory_blocks[0]);
274 STAILQ_INIT(&__nc_thread_memory_blocks[1]); 279 STAILQ_INIT(&__nc_thread_memory_blocks[1]);
275 280
276 __nc_thread_initialized = 1; 281 __nc_thread_initialized = 1;
277 } 282 }
278 283
(...skipping 20 matching lines...) Expand all
299 tdb->basic_data.status = THREAD_RUNNING; 304 tdb->basic_data.status = THREAD_RUNNING;
300 pthread_cond_t condvar_init = PTHREAD_COND_INITIALIZER; 305 pthread_cond_t condvar_init = PTHREAD_COND_INITIALIZER;
301 tdb->basic_data.join_condvar = condvar_init; 306 tdb->basic_data.join_condvar = condvar_init;
302 307
303 tdb->tdb.basic_data = &tdb->basic_data; 308 tdb->tdb.basic_data = &tdb->basic_data;
304 tdb->basic_data.tdb = &tdb->tdb; 309 tdb->basic_data.tdb = &tdb->tdb;
305 } 310 }
306 311
307 #else 312 #else
308 313
309 /* Will be called from the library startup code, 314 /*
310 * which always happens on the application's main thread 315 * Will be called from the library startup code,
316 * which always happens on the application's main thread.
311 */ 317 */
312 void __pthread_initialize(void) { 318 void __pthread_initialize(void) {
313 __pthread_initialize_minimal(TDB_SIZE); 319 __pthread_initialize_minimal(TDB_SIZE);
314 320
315 struct nc_combined_tdb *tdb = (struct nc_combined_tdb *) nc_get_tdb(); 321 struct nc_combined_tdb *tdb = (struct nc_combined_tdb *) nc_get_tdb();
316 nc_tdb_init(&tdb->tdb, &tdb->basic_data); 322 nc_tdb_init(&tdb->tdb, &tdb->basic_data);
317 __nc_initial_thread_id = &tdb->basic_data; 323 __nc_initial_thread_id = &tdb->basic_data;
318 324
319 __nc_initialize_globals(); 325 __nc_initialize_globals();
320 326
321 __nc_futex_init(); 327 __nc_futex_init();
322 } 328 }
323 329
324 #endif 330 #endif
325 331
326 332
327 /* pthread functions */ 333 /* pthread functions */
328 334
329 int pthread_create(pthread_t *thread_id, 335 int pthread_create(pthread_t *thread_id,
330 const pthread_attr_t *attr, 336 const pthread_attr_t *attr,
331 void *(*start_routine) (void *), 337 void *(*start_routine)(void *),
332 void *arg) { 338 void *arg) {
333 int retval = EAGAIN; 339 int retval = EAGAIN;
334 void *esp; 340 void *esp;
335 /* declare the variables outside of the while scope */ 341 /* Declare the variables outside of the while scope. */
336 nc_thread_memory_block_t *stack_node = NULL; 342 nc_thread_memory_block_t *stack_node = NULL;
337 char *thread_stack = NULL; 343 char *thread_stack = NULL;
338 nc_thread_descriptor_t *new_tdb = NULL; 344 nc_thread_descriptor_t *new_tdb = NULL;
339 nc_basic_thread_data_t *new_basic_data = NULL; 345 nc_basic_thread_data_t *new_basic_data = NULL;
340 nc_thread_memory_block_t *tls_node = NULL; 346 nc_thread_memory_block_t *tls_node = NULL;
341 size_t stacksize = PTHREAD_STACK_DEFAULT; 347 size_t stacksize = PTHREAD_STACK_DEFAULT;
342 void *new_tp; 348 void *new_tp;
343 349
344 /* TODO(gregoryd) - right now a single lock is used, try to optimize? */ 350 /* TODO(gregoryd) - right now a single lock is used, try to optimize? */
345 pthread_mutex_lock(&__nc_thread_management_lock); 351 pthread_mutex_lock(&__nc_thread_management_lock);
346 352
347 do { 353 do {
348 /* Allocate the combined TLS + TDB block---see tls.h for explanation. */ 354 /* Allocate the combined TLS + TDB block---see tls.h for explanation. */
349 355
350 tls_node = nc_allocate_memory_block_mu(TLS_AND_TDB_MEMORY, 356 tls_node = nc_allocate_memory_block_mu(TLS_AND_TDB_MEMORY,
351 __nacl_tls_combined_size(TDB_SIZE)); 357 __nacl_tls_combined_size(TDB_SIZE));
352 if (NULL == tls_node) 358 if (NULL == tls_node)
353 break; 359 break;
354 360
355 new_tp = __nacl_tls_initialize_memory(NODE_TO_PAYLOAD(tls_node), TDB_SIZE); 361 new_tp = __nacl_tls_initialize_memory(NODE_TO_PAYLOAD(tls_node), TDB_SIZE);
356 362
357 new_tdb = (nc_thread_descriptor_t *) 363 new_tdb = (nc_thread_descriptor_t *)
358 ((char *) new_tp + __nacl_tp_tdb_offset(TDB_SIZE)); 364 ((char *) new_tp + __nacl_tp_tdb_offset(TDB_SIZE));
359 365
360 /* TODO(gregoryd): consider creating a pool of basic_data structs, 366 /*
367 * TODO(gregoryd): consider creating a pool of basic_data structs,
361 * similar to stack and TLS+TDB (probably when adding the support for 368 * similar to stack and TLS+TDB (probably when adding the support for
362 * variable stack size). 369 * variable stack size).
363 */ 370 */
364 new_basic_data = malloc(sizeof(*new_basic_data)); 371 new_basic_data = malloc(sizeof(*new_basic_data));
365 if (NULL == new_basic_data) { 372 if (NULL == new_basic_data) {
366 /* 373 /*
367 * The tdb should be zero intialized. 374 * The tdb should be zero intialized.
368 * This just re-emphasizes this requirement. 375 * This just re-emphasizes this requirement.
369 */ 376 */
370 new_tdb->basic_data = NULL; 377 new_tdb->basic_data = NULL;
371 break; 378 break;
372 } 379 }
373 380
374 nc_tdb_init(new_tdb, new_basic_data); 381 nc_tdb_init(new_tdb, new_basic_data);
375 new_tdb->tls_node = tls_node; 382 new_tdb->tls_node = tls_node;
376 383
377 /* all the required members of the tdb must be initialized before 384 /*
385 * All the required members of the tdb must be initialized before
378 * the thread is started and actually before the global lock is released, 386 * the thread is started and actually before the global lock is released,
379 * since another thread can call pthread_join() or pthread_detach() 387 * since another thread can call pthread_join() or pthread_detach().
380 */ 388 */
381 new_tdb->start_func = start_routine; 389 new_tdb->start_func = start_routine;
382 new_tdb->state = arg; 390 new_tdb->state = arg;
383 if (attr != NULL) { 391 if (attr != NULL) {
384 new_tdb->joinable = attr->joinable; 392 new_tdb->joinable = attr->joinable;
385 stacksize = attr->stacksize; 393 stacksize = attr->stacksize;
386 } 394 }
387 395
388 /* Allocate the stack for the thread */ 396 /* Allocate the stack for the thread. */
389 stack_node = nc_allocate_memory_block_mu(THREAD_STACK_MEMORY, stacksize); 397 stack_node = nc_allocate_memory_block_mu(THREAD_STACK_MEMORY, stacksize);
390 if (NULL == stack_node) { 398 if (NULL == stack_node) {
391 retval = EAGAIN; 399 retval = EAGAIN;
392 break; 400 break;
393 } 401 }
394 thread_stack = align((uint32_t) NODE_TO_PAYLOAD(stack_node), 402 thread_stack = align((uint32_t) NODE_TO_PAYLOAD(stack_node),
395 kStackAlignment); 403 kStackAlignment);
396 new_tdb->stack_node = stack_node; 404 new_tdb->stack_node = stack_node;
397 405
398 retval = 0; 406 retval = 0;
399 } while (0); 407 } while (0);
400 408
401 if (0 != retval) { 409 if (0 != retval) {
402 pthread_mutex_unlock(&__nc_thread_management_lock); 410 pthread_mutex_unlock(&__nc_thread_management_lock);
403 goto ret; /* error */ 411 goto ret; /* error */
404 } 412 }
405 413
406 /* Speculatively increase the thread count. If thread creation 414 /*
407 fails, we will decrease it back. This way the thread count will 415 * Speculatively increase the thread count. If thread creation
408 never be lower than the actual number of threads, but can briefly be 416 * fails, we will decrease it back. This way the thread count will
409 higher than that. */ 417 * never be lower than the actual number of threads, but can briefly
418 * be higher than that.
419 */
410 ++__nc_running_threads_counter; 420 ++__nc_running_threads_counter;
411 421
412 /* Save the new thread id. This can not be done after the syscall, 422 /*
413 because the child thread could have already finished by that 423 * Save the new thread id. This can not be done after the syscall,
414 time. If thread creation fails, it will be overriden with -1 later.*/ 424 * because the child thread could have already finished by that
425 * time. If thread creation fails, it will be overriden with -1
426 * later.
427 */
415 *thread_id = new_basic_data; 428 *thread_id = new_basic_data;
416 429
417 pthread_mutex_unlock(&__nc_thread_management_lock); 430 pthread_mutex_unlock(&__nc_thread_management_lock);
418 431
419 /* 432 /*
420 * Calculate the top-of-stack location. The very first location is a 433 * Calculate the top-of-stack location. The very first location is a
421 * zero address of architecture-dependent width, needed to satisfy the 434 * zero address of architecture-dependent width, needed to satisfy the
422 * normal ABI alignment requirements for the stack. (On some machines 435 * normal ABI alignment requirements for the stack. (On some machines
423 * this is the dummy return address of the thread-start function.) 436 * this is the dummy return address of the thread-start function.)
424 * 437 *
425 * Both thread_stack and stacksize are multiples of 16. 438 * Both thread_stack and stacksize are multiples of 16.
426 */ 439 */
427 esp = (void *) (thread_stack + stacksize - kStackPadBelowAlign); 440 esp = (void *) (thread_stack + stacksize - kStackPadBelowAlign);
428 memset(esp, 0, kStackPadBelowAlign); 441 memset(esp, 0, kStackPadBelowAlign);
429 442
430 /* start the thread */ 443 /* Start the thread. */
431 retval = irt_thread.thread_create( 444 retval = irt_thread.thread_create(
432 FUN_TO_VOID_PTR(nc_thread_starter), esp, new_tp); 445 FUN_TO_VOID_PTR(nc_thread_starter), esp, new_tp);
433 if (0 != retval) { 446 if (0 != retval) {
434 pthread_mutex_lock(&__nc_thread_management_lock); 447 pthread_mutex_lock(&__nc_thread_management_lock);
435 /* TODO(gregoryd) : replace with atomic decrement? */ 448 /* TODO(gregoryd) : replace with atomic decrement? */
436 --__nc_running_threads_counter; 449 --__nc_running_threads_counter;
437 pthread_mutex_unlock(&__nc_thread_management_lock); 450 pthread_mutex_unlock(&__nc_thread_management_lock);
438 goto ret; 451 goto ret;
439 } 452 }
440 453
441 assert(0 == retval); 454 assert(0 == retval);
442 455
443 ret: 456 ret:
444 if (0 != retval) { 457 if (0 != retval) {
445 /* failed to create a thread */ 458 /* Failed to create a thread. */
446 pthread_mutex_lock(&__nc_thread_management_lock); 459 pthread_mutex_lock(&__nc_thread_management_lock);
447 460
448 nc_release_tls_node(tls_node, new_tdb); 461 nc_release_tls_node(tls_node, new_tdb);
449 if (new_basic_data) { 462 if (new_basic_data) {
450 nc_release_basic_data_mu(new_basic_data); 463 nc_release_basic_data_mu(new_basic_data);
451 } 464 }
452 if (stack_node) { 465 if (stack_node) {
453 stack_node->is_used = 0; 466 stack_node->is_used = 0;
454 nc_free_memory_block_mu(THREAD_STACK_MEMORY, stack_node); 467 nc_free_memory_block_mu(THREAD_STACK_MEMORY, stack_node);
455 } 468 }
(...skipping 11 matching lines...) Expand all
467 while (1 != __nc_running_threads_counter) { 480 while (1 != __nc_running_threads_counter) {
468 pthread_cond_wait(&__nc_last_thread_cond, &__nc_thread_management_lock); 481 pthread_cond_wait(&__nc_last_thread_cond, &__nc_thread_management_lock);
469 } 482 }
470 ANNOTATE_CONDVAR_LOCK_WAIT(&__nc_last_thread_cond, 483 ANNOTATE_CONDVAR_LOCK_WAIT(&__nc_last_thread_cond,
471 &__nc_thread_management_lock); 484 &__nc_thread_management_lock);
472 485
473 pthread_mutex_unlock(&__nc_thread_management_lock); 486 pthread_mutex_unlock(&__nc_thread_management_lock);
474 return 0; 487 return 0;
475 } 488 }
476 489
477 void pthread_exit (void* retval) { 490 void pthread_exit(void *retval) {
478 /* get all we need from the tdb before releasing it */ 491 /* Get all we need from the tdb before releasing it. */
479 nc_thread_descriptor_t *tdb = nc_get_tdb(); 492 nc_thread_descriptor_t *tdb = nc_get_tdb();
480 nc_thread_memory_block_t *stack_node = tdb->stack_node; 493 nc_thread_memory_block_t *stack_node = tdb->stack_node;
481 int32_t *is_used = &stack_node->is_used; 494 int32_t *is_used = &stack_node->is_used;
482 nc_basic_thread_data_t *basic_data = tdb->basic_data; 495 nc_basic_thread_data_t *basic_data = tdb->basic_data;
483 int joinable = tdb->joinable; 496 int joinable = tdb->joinable;
484 497
485 /* call the destruction functions for TSD */ 498 /* Call the destruction functions for TSD. */
486 __nc_tsd_exit(); 499 __nc_tsd_exit();
487 500
488 __newlib_thread_exit(); 501 __newlib_thread_exit();
489 502
490 __nc_futex_thread_exit(); 503 __nc_futex_thread_exit();
491 504
492 if (__nc_initial_thread_id != basic_data) { 505 if (__nc_initial_thread_id != basic_data) {
493 pthread_mutex_lock(&__nc_thread_management_lock); 506 pthread_mutex_lock(&__nc_thread_management_lock);
494 --__nc_running_threads_counter; 507 --__nc_running_threads_counter;
495 pthread_mutex_unlock(&__nc_thread_management_lock); 508 pthread_mutex_unlock(&__nc_thread_management_lock);
496 } else { 509 } else {
497 /* This is the main thread - wait for other threads to complete */ 510 /* This is the main thread - wait for other threads to complete. */
498 wait_for_threads(); 511 wait_for_threads();
499 exit(0); 512 exit(0);
500 } 513 }
501 514
502 pthread_mutex_lock(&__nc_thread_management_lock); 515 pthread_mutex_lock(&__nc_thread_management_lock);
503 516
504 basic_data->retval = retval; 517 basic_data->retval = retval;
505 518
506 if (joinable) { 519 if (joinable) {
507 /* If somebody is waiting for this thread, signal */ 520 /* If somebody is waiting for this thread, signal. */
508 basic_data->status = THREAD_TERMINATED; 521 basic_data->status = THREAD_TERMINATED;
509 pthread_cond_signal(&basic_data->join_condvar); 522 pthread_cond_signal(&basic_data->join_condvar);
510 } 523 }
511 /* 524 /*
512 * We can release TLS+TDB - thread id and its return value are still 525 * We can release TLS+TDB - thread id and its return value are still
513 * kept in basic_data 526 * kept in basic_data.
514 */ 527 */
515 nc_release_tls_node(tdb->tls_node, tdb); 528 nc_release_tls_node(tdb->tls_node, tdb);
516 529
517 if (!joinable) { 530 if (!joinable) {
518 nc_release_basic_data_mu(basic_data); 531 nc_release_basic_data_mu(basic_data);
519 } 532 }
520 533
521 /* now add the stack to the list but keep it marked as used */ 534 /* Now add the stack to the list but keep it marked as used. */
522 nc_free_memory_block_mu(THREAD_STACK_MEMORY, stack_node); 535 nc_free_memory_block_mu(THREAD_STACK_MEMORY, stack_node);
523 536
524 if (1 == __nc_running_threads_counter) { 537 if (1 == __nc_running_threads_counter) {
525 pthread_cond_signal(&__nc_last_thread_cond); 538 pthread_cond_signal(&__nc_last_thread_cond);
526 } 539 }
527 540
528 pthread_mutex_unlock(&__nc_thread_management_lock); 541 pthread_mutex_unlock(&__nc_thread_management_lock);
529 irt_thread.thread_exit(is_used); 542 irt_thread.thread_exit(is_used);
530 nc_abort(); 543 nc_abort();
531 } 544 }
532 545
533 int pthread_join(pthread_t thread_id, void **thread_return) { 546 int pthread_join(pthread_t thread_id, void **thread_return) {
534 int retval = 0; 547 int retval = 0;
535 nc_basic_thread_data_t *basic_data = thread_id; 548 nc_basic_thread_data_t *basic_data = thread_id;
536 if (pthread_self() == thread_id) { 549 if (pthread_self() == thread_id) {
537 return EDEADLK; 550 return EDEADLK;
538 } 551 }
539 552
540 pthread_mutex_lock(&__nc_thread_management_lock); 553 pthread_mutex_lock(&__nc_thread_management_lock);
541 554
542 if (basic_data->tdb != NULL) { 555 if (basic_data->tdb != NULL) {
543 /* The thread is still running */ 556 /* The thread is still running. */
544 nc_thread_descriptor_t *joined_tdb = basic_data->tdb; 557 nc_thread_descriptor_t *joined_tdb = basic_data->tdb;
545 if (!joined_tdb->joinable || joined_tdb->join_waiting) { 558 if (!joined_tdb->joinable || joined_tdb->join_waiting) {
546 /* the thread is detached or another thread is waiting to join */ 559 /* The thread is detached or another thread is waiting to join. */
547 retval = EINVAL; 560 retval = EINVAL;
548 goto ret; 561 goto ret;
549 } 562 }
550 joined_tdb->join_waiting = 1; 563 joined_tdb->join_waiting = 1;
551 /* wait till the thread terminates */ 564 /* Wait till the thread terminates. */
552 while (THREAD_TERMINATED != basic_data->status) { 565 while (THREAD_TERMINATED != basic_data->status) {
553 pthread_cond_wait(&basic_data->join_condvar, 566 pthread_cond_wait(&basic_data->join_condvar,
554 &__nc_thread_management_lock); 567 &__nc_thread_management_lock);
555 } 568 }
556 } 569 }
557 ANNOTATE_CONDVAR_LOCK_WAIT(&basic_data->join_condvar, 570 ANNOTATE_CONDVAR_LOCK_WAIT(&basic_data->join_condvar,
558 &__nc_thread_management_lock); 571 &__nc_thread_management_lock);
559 /* The thread has already terminated */ 572 /* The thread has already terminated. */
560 /* save the return value */ 573 /* Save the return value. */
561 if (thread_return != NULL) { 574 if (thread_return != NULL) {
562 *thread_return = basic_data->retval; 575 *thread_return = basic_data->retval;
563 } 576 }
564 577
565 /* release the resources */ 578 /* Release the resources. */
566 nc_release_basic_data_mu(basic_data); 579 nc_release_basic_data_mu(basic_data);
567 retval = 0; 580 retval = 0;
568 581
569 ret: 582 ret:
570 pthread_mutex_unlock(&__nc_thread_management_lock); 583 pthread_mutex_unlock(&__nc_thread_management_lock);
571 584
572 return retval; 585 return retval;
573 586
574 } 587 }
575 588
576 int pthread_detach(pthread_t thread_id) { 589 int pthread_detach(pthread_t thread_id) {
577 int retval = 0; 590 int retval = 0;
578 nc_basic_thread_data_t *basic_data = thread_id; 591 nc_basic_thread_data_t *basic_data = thread_id;
579 nc_thread_descriptor_t *detached_tdb; 592 nc_thread_descriptor_t *detached_tdb;
580 /* TODO(gregoryd) - can be optimized using InterlockedExchange 593 /*
581 * once it's available */ 594 * TODO(gregoryd) - can be optimized using InterlockedExchange
595 * once it's available.
596 */
582 pthread_mutex_lock(&__nc_thread_management_lock); 597 pthread_mutex_lock(&__nc_thread_management_lock);
583 detached_tdb = basic_data->tdb; 598 detached_tdb = basic_data->tdb;
584 599
585 if (NULL == detached_tdb) { 600 if (NULL == detached_tdb) {
586 /* The thread has already terminated */ 601 /* The thread has already terminated. */
587 nc_release_basic_data_mu(basic_data); 602 nc_release_basic_data_mu(basic_data);
588 } else { 603 } else {
589 if (!detached_tdb->join_waiting) { 604 if (!detached_tdb->join_waiting) {
590 if (detached_tdb->joinable) { 605 if (detached_tdb->joinable) {
591 detached_tdb->joinable = 0; 606 detached_tdb->joinable = 0;
592 } else { 607 } else {
593 /* already detached */ 608 /* Already detached. */
594 retval = EINVAL; 609 retval = EINVAL;
595 } 610 }
596 } else { 611 } else {
597 /* another thread is already waiting to join - do nothing */ 612 /* Another thread is already waiting to join - do nothing. */
598 } 613 }
599 } 614 }
600 pthread_mutex_unlock(&__nc_thread_management_lock); 615 pthread_mutex_unlock(&__nc_thread_management_lock);
601 return retval; 616 return retval;
602 } 617 }
603 618
604 int pthread_kill(pthread_t thread_id, 619 int pthread_kill(pthread_t thread_id,
605 int sig) { 620 int sig) {
606 /* This function is currently unimplemented. */ 621 /* This function is currently unimplemented. */
607 return ENOSYS; 622 return ENOSYS;
608 } 623 }
609 624
610 pthread_t pthread_self(void) { 625 pthread_t pthread_self(void) {
611 /* get the tdb pointer from gs and use it to return the thread handle*/ 626 /* Get the tdb pointer from gs and use it to return the thread handle. */
612 nc_thread_descriptor_t *tdb = nc_get_tdb(); 627 nc_thread_descriptor_t *tdb = nc_get_tdb();
613 return tdb->basic_data; 628 return tdb->basic_data;
614 } 629 }
615 630
616 int pthread_equal (pthread_t thread1, pthread_t thread2) { 631 int pthread_equal(pthread_t thread1, pthread_t thread2) {
617 return (thread1 == thread2); 632 return (thread1 == thread2);
618 } 633 }
619 634
620 int pthread_setschedprio(pthread_t thread_id, int prio) { 635 int pthread_setschedprio(pthread_t thread_id, int prio) {
621 if (thread_id != pthread_self()) { 636 if (thread_id != pthread_self()) {
622 /* 637 /*
623 * We can only support changing our own priority. 638 * We can only support changing our own priority.
624 */ 639 */
625 return EPERM; 640 return EPERM;
626 } 641 }
627 return irt_thread.thread_nice(prio); 642 return irt_thread.thread_nice(prio);
628 } 643 }
629 644
630 int pthread_attr_init (pthread_attr_t *attr) { 645 int pthread_attr_init(pthread_attr_t *attr) {
631 if (NULL == attr) { 646 if (NULL == attr) {
632 return EINVAL; 647 return EINVAL;
633 } 648 }
634 attr->joinable = PTHREAD_CREATE_JOINABLE; 649 attr->joinable = PTHREAD_CREATE_JOINABLE;
635 attr->stacksize = PTHREAD_STACK_DEFAULT; 650 attr->stacksize = PTHREAD_STACK_DEFAULT;
636 return 0; 651 return 0;
637 } 652 }
638 653
639 int pthread_attr_destroy (pthread_attr_t *attr) { 654 int pthread_attr_destroy(pthread_attr_t *attr) {
640 if (NULL == attr) { 655 if (NULL == attr) {
641 return EINVAL; 656 return EINVAL;
642 } 657 }
643 /* nothing to destroy */ 658 /* Nothing to destroy. */
644 return 0; 659 return 0;
645 } 660 }
646 661
647 int pthread_attr_setdetachstate (pthread_attr_t *attr, 662 int pthread_attr_setdetachstate(pthread_attr_t *attr,
648 int detachstate) { 663 int detachstate) {
649 if (NULL == attr) { 664 if (NULL == attr) {
650 return EINVAL; 665 return EINVAL;
651 } 666 }
652 attr->joinable = detachstate; 667 attr->joinable = detachstate;
653 return 0; 668 return 0;
654 } 669 }
655 670
656 int pthread_attr_getdetachstate (pthread_attr_t *attr, 671 int pthread_attr_getdetachstate(pthread_attr_t *attr,
657 int *detachstate) { 672 int *detachstate) {
658 if (NULL == attr) { 673 if (NULL == attr) {
659 return EINVAL; 674 return EINVAL;
660 } 675 }
661 return attr->joinable; 676 return attr->joinable;
662 } 677 }
663 678
664 int pthread_attr_setstacksize(pthread_attr_t *attr, 679 int pthread_attr_setstacksize(pthread_attr_t *attr,
665 size_t stacksize) { 680 size_t stacksize) {
666 if (NULL == attr) { 681 if (NULL == attr) {
667 return EINVAL; 682 return EINVAL;
668 } 683 }
669 if (PTHREAD_STACK_MIN < stacksize) { 684 if (PTHREAD_STACK_MIN < stacksize) {
670 attr->stacksize = stacksize; 685 attr->stacksize = stacksize;
671 } else { 686 } else {
672 attr->stacksize = PTHREAD_STACK_MIN; 687 attr->stacksize = PTHREAD_STACK_MIN;
673 } 688 }
674 return 0; 689 return 0;
675 } 690 }
676 691
677 int pthread_attr_getstacksize(pthread_attr_t *attr, 692 int pthread_attr_getstacksize(pthread_attr_t *attr,
678 size_t *stacksize) { 693 size_t *stacksize) {
679 if (NULL == attr) { 694 if (NULL == attr) {
680 return EINVAL; 695 return EINVAL;
681 } 696 }
682 *stacksize = attr->stacksize; 697 *stacksize = attr->stacksize;
683 return 0; 698 return 0;
684 } 699 }
685 700
686 void __local_lock_init(_LOCK_T* lock); 701 void __local_lock_init(_LOCK_T *lock);
687 void __local_lock_init_recursive(_LOCK_T* lock); 702 void __local_lock_init_recursive(_LOCK_T *lock);
688 void __local_lock_close(_LOCK_T* lock); 703 void __local_lock_close(_LOCK_T *lock);
689 void __local_lock_close_recursive(_LOCK_T* lock); 704 void __local_lock_close_recursive(_LOCK_T *lock);
690 void __local_lock_acquire(_LOCK_T* lock); 705 void __local_lock_acquire(_LOCK_T *lock);
691 void __local_lock_acquire_recursive(_LOCK_T* lock); 706 void __local_lock_acquire_recursive(_LOCK_T *lock);
692 int __local_lock_try_acquire(_LOCK_T* lock); 707 int __local_lock_try_acquire(_LOCK_T *lock);
693 int __local_lock_try_acquire_recursive(_LOCK_T* lock); 708 int __local_lock_try_acquire_recursive(_LOCK_T *lock);
694 void __local_lock_release(_LOCK_T* lock); 709 void __local_lock_release(_LOCK_T *lock);
695 void __local_lock_release_recursive(_LOCK_T* lock); 710 void __local_lock_release_recursive(_LOCK_T *lock);
696 711
697 void __local_lock_init(_LOCK_T* lock) { 712 void __local_lock_init(_LOCK_T *lock) {
698 if (lock != NULL) { 713 if (lock != NULL) {
699 pthread_mutexattr_t attr; 714 pthread_mutexattr_t attr;
700 pthread_mutexattr_init(&attr); 715 pthread_mutexattr_init(&attr);
701 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_FAST_NP); 716 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_FAST_NP);
702 pthread_mutex_init((pthread_mutex_t*)lock, &attr); 717 pthread_mutex_init((pthread_mutex_t*)lock, &attr);
703 } 718 }
704 } 719 }
705 720
706 void __local_lock_init_recursive(_LOCK_T* lock) { 721 void __local_lock_init_recursive(_LOCK_T *lock) {
707 if (lock != NULL) { 722 if (lock != NULL) {
708 pthread_mutexattr_t attr; 723 pthread_mutexattr_t attr;
709 pthread_mutexattr_init(&attr); 724 pthread_mutexattr_init(&attr);
710 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP); 725 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP);
711 pthread_mutex_init((pthread_mutex_t*)lock, &attr); 726 pthread_mutex_init((pthread_mutex_t*)lock, &attr);
712 } 727 }
713 } 728 }
714 729
715 void __local_lock_close(_LOCK_T* lock) { 730 void __local_lock_close(_LOCK_T *lock) {
716 if (lock != NULL) { 731 if (lock != NULL) {
717 pthread_mutex_destroy((pthread_mutex_t*)lock); 732 pthread_mutex_destroy((pthread_mutex_t*)lock);
718 } 733 }
719 } 734 }
720 735
721 void __local_lock_close_recursive(_LOCK_T* lock) { 736 void __local_lock_close_recursive(_LOCK_T *lock) {
722 __local_lock_close(lock); 737 __local_lock_close(lock);
723 } 738 }
724 739
725 void __local_lock_acquire(_LOCK_T* lock) { 740 void __local_lock_acquire(_LOCK_T *lock) {
726 if (!__nc_thread_initialized) { 741 if (!__nc_thread_initialized) {
727 /* 742 /*
728 * pthread library is not initialized yet - there is only one thread. 743 * pthread library is not initialized yet - there is only one thread.
729 * Calling pthread_mutex_lock will cause an access violation because it 744 * Calling pthread_mutex_lock will cause an access violation because it
730 * will attempt to access the TDB which is not initialized yet 745 * will attempt to access the TDB which is not initialized yet.
731 */ 746 */
732 return; 747 return;
733 } 748 }
734 if (lock != NULL) { 749 if (lock != NULL) {
735 pthread_mutex_lock((pthread_mutex_t*)lock); 750 pthread_mutex_lock((pthread_mutex_t*)lock);
736 } 751 }
737 } 752 }
738 753
739 void __local_lock_acquire_recursive(_LOCK_T* lock) { 754 void __local_lock_acquire_recursive(_LOCK_T *lock) {
740 __local_lock_acquire(lock); 755 __local_lock_acquire(lock);
741 } 756 }
742 757
743 int __local_lock_try_acquire(_LOCK_T* lock) { 758 int __local_lock_try_acquire(_LOCK_T *lock) {
744 if (!__nc_thread_initialized) { 759 if (!__nc_thread_initialized) {
745 /* 760 /*
746 * pthread library is not initialized yet - there is only one thread. 761 * pthread library is not initialized yet - there is only one thread.
747 * Calling pthread_mutex_lock will cause an access violation because it 762 * Calling pthread_mutex_lock will cause an access violation because it
748 * will attempt to access the TDB which is not initialized yet 763 * will attempt to access the TDB which is not initialized yet.
749 */ 764 */
750 return 0; 765 return 0;
751 } 766 }
752 767
753 if (lock != NULL) { 768 if (lock != NULL) {
754 return pthread_mutex_trylock((pthread_mutex_t*)lock); 769 return pthread_mutex_trylock((pthread_mutex_t*)lock);
755 } else { 770 } else {
756 return EINVAL; 771 return EINVAL;
757 } 772 }
758 } 773 }
759 774
760 int __local_lock_try_acquire_recursive(_LOCK_T* lock) { 775 int __local_lock_try_acquire_recursive(_LOCK_T *lock) {
761 return __local_lock_try_acquire(lock); 776 return __local_lock_try_acquire(lock);
762 } 777 }
763 778
764 void __local_lock_release(_LOCK_T* lock) { 779 void __local_lock_release(_LOCK_T *lock) {
765 if (!__nc_thread_initialized) { 780 if (!__nc_thread_initialized) {
766 /* 781 /*
767 * pthread library is not initialized yet - there is only one thread. 782 * pthread library is not initialized yet - there is only one thread.
768 * Calling pthread_mutex_lock will cause an access violation because it 783 * Calling pthread_mutex_lock will cause an access violation because it
769 * will attempt to access the TDB which is not initialized yet 784 * will attempt to access the TDB which is not initialized yet
770 * NOTE: there is no race condition here because the value of the counter 785 * NOTE: there is no race condition here because the value of the counter
771 * cannot change while the lock is held - the startup process is 786 * cannot change while the lock is held - the startup process is
772 * single-threaded. 787 * single-threaded.
773 */ 788 */
774 return; 789 return;
775 } 790 }
776 791
777 if (lock != NULL) { 792 if (lock != NULL) {
778 pthread_mutex_unlock((pthread_mutex_t*)lock); 793 pthread_mutex_unlock((pthread_mutex_t*)lock);
779 } 794 }
780 } 795 }
781 796
782 void __local_lock_release_recursive(_LOCK_T* lock) { 797 void __local_lock_release_recursive(_LOCK_T *lock) {
783 __local_lock_release(lock); 798 __local_lock_release(lock);
784 } 799 }
785 800
786 /* 801 /*
787 * We include this directly in this file rather than compiling it 802 * We include this directly in this file rather than compiling it
788 * separately because there is some code (e.g. libstdc++) that uses weak 803 * separately because there is some code (e.g. libstdc++) that uses weak
789 * references to all pthread functions, but conditionalizes its calls only 804 * references to all pthread functions, but conditionalizes its calls only
790 * on one symbol. So if these functions are in another file in a library 805 * on one symbol. So if these functions are in another file in a library
791 * archive, they might not be linked in by static linking. 806 * archive, they might not be linked in by static linking.
792 */ 807 */
793 #include "native_client/src/untrusted/pthread/nc_tsd.c" 808 #include "native_client/src/untrusted/pthread/nc_tsd.c"
OLDNEW
« no previous file with comments | « src/untrusted/pthread/nc_mutex.c ('k') | src/untrusted/pthread/pthread.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698