Chromium Code Reviews| Index: src/untrusted/pthread/nc_thread.c |
| diff --git a/src/untrusted/pthread/nc_thread.c b/src/untrusted/pthread/nc_thread.c |
| index fb2021a87d75ab2a50cb9bb49ecd833d84011624..b586eb94ca4c0f0aa02844b7599c3d7fd9bdc417 100644 |
| --- a/src/untrusted/pthread/nc_thread.c |
| +++ b/src/untrusted/pthread/nc_thread.c |
| @@ -33,7 +33,7 @@ |
| # include "native_client/src/untrusted/irt/irt_private.h" |
| #endif |
| -#define FUN_TO_VOID_PTR(a) ((void*)((uintptr_t) a)) |
| +#define FUN_TO_VOID_PTR(a) ((void *) ((uintptr_t) a)) |
|
Roland McGrath
2012/12/04 00:04:20
This also has superfluous parens around the second
Mark Seaborn
2012/12/05 05:10:57
Done.
|
| /* |
| * ABI tables for underyling NaCl thread interfaces. |
| @@ -59,8 +59,8 @@ static const uint32_t kStackPadBelowAlign = 0; |
| #define TDB_SIZE (sizeof(struct nc_combined_tdb)) |
| -static inline char* align(uint32_t offset, uint32_t alignment) { |
| - return (char*) ((offset + alignment - 1) & ~(alignment - 1)); |
| +static inline char *align(uint32_t offset, uint32_t alignment) { |
| + return (char *) ((offset + alignment - 1) & ~(alignment - 1)); |
| } |
| /* Thread management global variables */ |
| @@ -68,25 +68,26 @@ const int __nc_kMaxCachedMemoryBlocks = 50; |
| int __nc_thread_initialized; |
| -/* mutex used to synchronize thread management code */ |
| +/* Mutex used to synchronize thread management code */ |
| pthread_mutex_t __nc_thread_management_lock; |
| -/* condition variable that gets signaled when all the threads |
| +/* |
| + * Condition variable that gets signaled when all the threads |
| * except the main thread have terminated |
| */ |
| static pthread_cond_t __nc_last_thread_cond; |
| static pthread_t __nc_initial_thread_id; |
| -/* number of threads currently running in this NaCl module */ |
| +/* Number of threads currently running in this NaCl module */ |
| int __nc_running_threads_counter = 1; |
| -/* we have two queues of memory blocks - one for each type */ |
| +/* We have two queues of memory blocks - one for each type */ |
| STAILQ_HEAD(tailhead, entry) __nc_thread_memory_blocks[2]; |
| /* We need a counter for each queue to keep track of number of blocks */ |
| int __nc_memory_block_counter[2]; |
| #define NODE_TO_PAYLOAD(TlsNode) \ |
| - ((char*)(TlsNode) + sizeof(nc_thread_memory_block_t)) |
| + ((char *) (TlsNode) + sizeof(nc_thread_memory_block_t)) |
| /* Internal functions */ |
| @@ -110,13 +111,13 @@ static void nc_thread_starter(void) { |
| g_is_irt_internal_thread = 1; |
| #endif |
| void *retval = tdb->start_func(tdb->state); |
| - /* if the function returns, terminate the thread */ |
| + /* If the function returns, terminate the thread */ |
|
Roland McGrath
2012/12/04 00:04:20
If you're going to capitalize, you might as well p
Mark Seaborn
2012/12/05 05:10:57
Done.
|
| pthread_exit(retval); |
| /* NOTREACHED */ |
| /* TODO(gregoryd) - add assert */ |
| } |
| -static nc_thread_memory_block_t* nc_allocate_memory_block_mu( |
| +static nc_thread_memory_block_t *nc_allocate_memory_block_mu( |
| nc_thread_memory_block_type_t type, |
| int required_size) { |
| struct tailhead *head; |
| @@ -139,7 +140,7 @@ static nc_thread_memory_block_t* nc_allocate_memory_block_mu( |
| } |
| if (!STAILQ_EMPTY(head)) { |
| - /* try to get one from queue */ |
| + /* Try to get one from queue */ |
| nc_thread_memory_block_t *node = STAILQ_FIRST(head); |
| /* |
| @@ -148,7 +149,8 @@ static nc_thread_memory_block_t* nc_allocate_memory_block_mu( |
| * next queue entries if the first one is still in use. |
| */ |
| if (0 == node->is_used && node->size >= required_size) { |
| - /* This will only re-use the first node possibly, and could be |
| + /* |
| + * This will only re-use the first node possibly, and could be |
| * improved to provide the stack with a best-fit algorithm if needed. |
| * TODO: we should scan all nodes to see if there is one that fits |
| * before allocating another. |
| @@ -190,7 +192,7 @@ static nc_thread_memory_block_t* nc_allocate_memory_block_mu( |
| } |
| } |
| - /* no available blocks of the required type/size - allocate one */ |
| + /* No available blocks of the required type/size - allocate one */ |
| node = malloc(MEMORY_BLOCK_ALLOCATION_SIZE(required_size)); |
| if (NULL != node) { |
| memset(node, 0, sizeof(*node)); |
| @@ -201,7 +203,7 @@ static nc_thread_memory_block_t* nc_allocate_memory_block_mu( |
| } |
| static void nc_free_memory_block_mu(nc_thread_memory_block_type_t type, |
| - nc_thread_memory_block_t* node) { |
| + nc_thread_memory_block_t *node) { |
| /* assume the lock is held !!! */ |
| struct tailhead *head = &__nc_thread_memory_blocks[type]; |
| STAILQ_INSERT_TAIL(head, node, entries); |
| @@ -227,7 +229,7 @@ static void nc_release_tls_node(nc_thread_memory_block_t *block, |
| /* Initialize a newly allocated TDB to some default values */ |
| static int nc_tdb_init(nc_thread_descriptor_t *tdb, |
| - nc_basic_thread_data_t * basic_data) { |
| + nc_basic_thread_data_t *basic_data) { |
| tdb->tls_base = tdb; |
| tdb->basic_data = basic_data; |
| basic_data->tdb = tdb; |
| @@ -245,7 +247,8 @@ static int nc_tdb_init(nc_thread_descriptor_t *tdb, |
| tdb->irt_thread_data = NULL; |
| - /* Imitate PTHREAD_COND_INITIALIZER - we cannot use it directly here, |
| + /* |
| + * Imitate PTHREAD_COND_INITIALIZER - we cannot use it directly here, |
| * since this is not variable initialization. |
| */ |
| nc_pthread_condvar_ctor(&basic_data->join_condvar); |
| @@ -262,10 +265,12 @@ void __nc_initialize_globals(void) { |
| if (pthread_mutex_init(&__nc_thread_management_lock, NULL) != 0) |
| nc_abort(); |
| - /* Tell ThreadSanitizer to not generate happens-before arcs between uses of |
| - this mutex. Otherwise we miss to many real races. |
| - When not running under ThreadSanitizer, this is just a call to an empty |
| - function. */ |
| + /* |
| + * Tell ThreadSanitizer to not generate happens-before arcs between uses of |
| + * this mutex. Otherwise we miss to many real races. |
| + * When not running under ThreadSanitizer, this is just a call to an empty |
| + * function. |
| + */ |
| ANNOTATE_NOT_HAPPENS_BEFORE_MUTEX(&__nc_thread_management_lock); |
| if (pthread_cond_init(&__nc_last_thread_cond, NULL) != 0) |
| @@ -306,7 +311,8 @@ void __nc_initialize_unjoinable_thread(struct nc_combined_tdb *tdb) { |
| #else |
| -/* Will be called from the library startup code, |
| +/* |
| + * Will be called from the library startup code, |
| * which always happens on the application's main thread |
| */ |
| void __pthread_initialize(void) { |
| @@ -328,11 +334,11 @@ void __pthread_initialize(void) { |
| int pthread_create(pthread_t *thread_id, |
| const pthread_attr_t *attr, |
| - void *(*start_routine) (void *), |
| + void *(*start_routine)(void *), |
| void *arg) { |
| int retval = EAGAIN; |
| void *esp; |
| - /* declare the variables outside of the while scope */ |
| + /* Declare the variables outside of the while scope */ |
| nc_thread_memory_block_t *stack_node = NULL; |
| char *thread_stack = NULL; |
| nc_thread_descriptor_t *new_tdb = NULL; |
| @@ -358,7 +364,8 @@ int pthread_create(pthread_t *thread_id, |
| new_tdb = (nc_thread_descriptor_t *) |
| ((char *) new_tp + __nacl_tp_tdb_offset(TDB_SIZE)); |
| - /* TODO(gregoryd): consider creating a pool of basic_data structs, |
| + /* |
| + * TODO(gregoryd): consider creating a pool of basic_data structs, |
| * similar to stack and TLS+TDB (probably when adding the support for |
| * variable stack size). |
| */ |
| @@ -375,7 +382,8 @@ int pthread_create(pthread_t *thread_id, |
| nc_tdb_init(new_tdb, new_basic_data); |
| new_tdb->tls_node = tls_node; |
| - /* all the required members of the tdb must be initialized before |
| + /* |
| + * All the required members of the tdb must be initialized before |
| * the thread is started and actually before the global lock is released, |
| * since another thread can call pthread_join() or pthread_detach() |
| */ |
| @@ -404,15 +412,20 @@ int pthread_create(pthread_t *thread_id, |
| goto ret; /* error */ |
| } |
| - /* Speculatively increase the thread count. If thread creation |
| - fails, we will decrease it back. This way the thread count will |
| - never be lower than the actual number of threads, but can briefly be |
| - higher than that. */ |
| + /* |
| + * Speculatively increase the thread count. If thread creation |
| + * fails, we will decrease it back. This way the thread count will |
| + * never be lower than the actual number of threads, but can briefly |
| + * be higher than that. |
| + */ |
| ++__nc_running_threads_counter; |
| - /* Save the new thread id. This can not be done after the syscall, |
| - because the child thread could have already finished by that |
| - time. If thread creation fails, it will be overriden with -1 later.*/ |
| + /* |
| + * Save the new thread id. This can not be done after the syscall, |
| + * because the child thread could have already finished by that |
| + * time. If thread creation fails, it will be overriden with -1 |
| + * later. |
| + */ |
| *thread_id = new_basic_data; |
| pthread_mutex_unlock(&__nc_thread_management_lock); |
| @@ -428,7 +441,7 @@ int pthread_create(pthread_t *thread_id, |
| esp = (void *) (thread_stack + stacksize - kStackPadBelowAlign); |
| memset(esp, 0, kStackPadBelowAlign); |
| - /* start the thread */ |
| + /* Start the thread */ |
| retval = irt_thread.thread_create( |
| FUN_TO_VOID_PTR(nc_thread_starter), esp, new_tp); |
| if (0 != retval) { |
| @@ -443,7 +456,7 @@ int pthread_create(pthread_t *thread_id, |
| ret: |
| if (0 != retval) { |
| - /* failed to create a thread */ |
| + /* Failed to create a thread */ |
| pthread_mutex_lock(&__nc_thread_management_lock); |
| nc_release_tls_node(tls_node, new_tdb); |
| @@ -475,15 +488,15 @@ static int wait_for_threads(void) { |
| return 0; |
| } |
| -void pthread_exit (void* retval) { |
| - /* get all we need from the tdb before releasing it */ |
| +void pthread_exit(void *retval) { |
| + /* Get all we need from the tdb before releasing it */ |
| nc_thread_descriptor_t *tdb = nc_get_tdb(); |
| nc_thread_memory_block_t *stack_node = tdb->stack_node; |
| int32_t *is_used = &stack_node->is_used; |
| nc_basic_thread_data_t *basic_data = tdb->basic_data; |
| int joinable = tdb->joinable; |
| - /* call the destruction functions for TSD */ |
| + /* Call the destruction functions for TSD */ |
| __nc_tsd_exit(); |
| __newlib_thread_exit(); |
| @@ -519,7 +532,7 @@ void pthread_exit (void* retval) { |
| nc_release_basic_data_mu(basic_data); |
| } |
| - /* now add the stack to the list but keep it marked as used */ |
| + /* Now add the stack to the list but keep it marked as used */ |
| nc_free_memory_block_mu(THREAD_STACK_MEMORY, stack_node); |
| if (1 == __nc_running_threads_counter) { |
| @@ -544,12 +557,12 @@ int pthread_join(pthread_t thread_id, void **thread_return) { |
| /* The thread is still running */ |
| nc_thread_descriptor_t *joined_tdb = basic_data->tdb; |
| if (!joined_tdb->joinable || joined_tdb->join_waiting) { |
| - /* the thread is detached or another thread is waiting to join */ |
| + /* The thread is detached or another thread is waiting to join */ |
| retval = EINVAL; |
| goto ret; |
| } |
| joined_tdb->join_waiting = 1; |
| - /* wait till the thread terminates */ |
| + /* Wait till the thread terminates */ |
| while (THREAD_TERMINATED != basic_data->status) { |
| pthread_cond_wait(&basic_data->join_condvar, |
| &__nc_thread_management_lock); |
| @@ -558,12 +571,12 @@ int pthread_join(pthread_t thread_id, void **thread_return) { |
| ANNOTATE_CONDVAR_LOCK_WAIT(&basic_data->join_condvar, |
| &__nc_thread_management_lock); |
| /* The thread has already terminated */ |
| - /* save the return value */ |
| + /* Save the return value */ |
| if (thread_return != NULL) { |
| *thread_return = basic_data->retval; |
| } |
| - /* release the resources */ |
| + /* Release the resources */ |
| nc_release_basic_data_mu(basic_data); |
| retval = 0; |
| @@ -578,8 +591,10 @@ int pthread_detach(pthread_t thread_id) { |
| int retval = 0; |
| nc_basic_thread_data_t *basic_data = thread_id; |
| nc_thread_descriptor_t *detached_tdb; |
| - /* TODO(gregoryd) - can be optimized using InterlockedExchange |
| - * once it's available */ |
| + /* |
| + * TODO(gregoryd) - can be optimized using InterlockedExchange |
| + * once it's available |
| + */ |
| pthread_mutex_lock(&__nc_thread_management_lock); |
| detached_tdb = basic_data->tdb; |
| @@ -591,11 +606,11 @@ int pthread_detach(pthread_t thread_id) { |
| if (detached_tdb->joinable) { |
| detached_tdb->joinable = 0; |
| } else { |
| - /* already detached */ |
| + /* Already detached */ |
| retval = EINVAL; |
| } |
| } else { |
| - /* another thread is already waiting to join - do nothing */ |
| + /* Another thread is already waiting to join - do nothing */ |
| } |
| } |
| pthread_mutex_unlock(&__nc_thread_management_lock); |
| @@ -609,12 +624,12 @@ int pthread_kill(pthread_t thread_id, |
| } |
| pthread_t pthread_self(void) { |
| - /* get the tdb pointer from gs and use it to return the thread handle*/ |
| + /* Get the tdb pointer from gs and use it to return the thread handle*/ |
| nc_thread_descriptor_t *tdb = nc_get_tdb(); |
| return tdb->basic_data; |
| } |
| -int pthread_equal (pthread_t thread1, pthread_t thread2) { |
| +int pthread_equal(pthread_t thread1, pthread_t thread2) { |
| return (thread1 == thread2); |
| } |
| @@ -628,7 +643,7 @@ int pthread_setschedprio(pthread_t thread_id, int prio) { |
| return irt_thread.thread_nice(prio); |
| } |
| -int pthread_attr_init (pthread_attr_t *attr) { |
| +int pthread_attr_init(pthread_attr_t *attr) { |
| if (NULL == attr) { |
| return EINVAL; |
| } |
| @@ -637,16 +652,16 @@ int pthread_attr_init (pthread_attr_t *attr) { |
| return 0; |
| } |
| -int pthread_attr_destroy (pthread_attr_t *attr) { |
| +int pthread_attr_destroy(pthread_attr_t *attr) { |
| if (NULL == attr) { |
| return EINVAL; |
| } |
| - /* nothing to destroy */ |
| + /* Nothing to destroy */ |
| return 0; |
| } |
| -int pthread_attr_setdetachstate (pthread_attr_t *attr, |
| - int detachstate) { |
| +int pthread_attr_setdetachstate(pthread_attr_t *attr, |
| + int detachstate) { |
| if (NULL == attr) { |
| return EINVAL; |
| } |
| @@ -654,8 +669,8 @@ int pthread_attr_setdetachstate (pthread_attr_t *attr, |
| return 0; |
| } |
| -int pthread_attr_getdetachstate (pthread_attr_t *attr, |
| - int *detachstate) { |
| +int pthread_attr_getdetachstate(pthread_attr_t *attr, |
| + int *detachstate) { |
| if (NULL == attr) { |
| return EINVAL; |
| } |
| @@ -684,18 +699,18 @@ int pthread_attr_getstacksize(pthread_attr_t *attr, |
| return 0; |
| } |
| -void __local_lock_init(_LOCK_T* lock); |
| -void __local_lock_init_recursive(_LOCK_T* lock); |
| -void __local_lock_close(_LOCK_T* lock); |
| -void __local_lock_close_recursive(_LOCK_T* lock); |
| -void __local_lock_acquire(_LOCK_T* lock); |
| -void __local_lock_acquire_recursive(_LOCK_T* lock); |
| -int __local_lock_try_acquire(_LOCK_T* lock); |
| -int __local_lock_try_acquire_recursive(_LOCK_T* lock); |
| -void __local_lock_release(_LOCK_T* lock); |
| -void __local_lock_release_recursive(_LOCK_T* lock); |
| +void __local_lock_init(_LOCK_T *lock); |
| +void __local_lock_init_recursive(_LOCK_T *lock); |
| +void __local_lock_close(_LOCK_T *lock); |
| +void __local_lock_close_recursive(_LOCK_T *lock); |
| +void __local_lock_acquire(_LOCK_T *lock); |
| +void __local_lock_acquire_recursive(_LOCK_T *lock); |
| +int __local_lock_try_acquire(_LOCK_T *lock); |
| +int __local_lock_try_acquire_recursive(_LOCK_T *lock); |
| +void __local_lock_release(_LOCK_T *lock); |
| +void __local_lock_release_recursive(_LOCK_T *lock); |
| -void __local_lock_init(_LOCK_T* lock) { |
| +void __local_lock_init(_LOCK_T *lock) { |
| if (lock != NULL) { |
| pthread_mutexattr_t attr; |
| pthread_mutexattr_init(&attr); |
| @@ -704,7 +719,7 @@ void __local_lock_init(_LOCK_T* lock) { |
| } |
| } |
| -void __local_lock_init_recursive(_LOCK_T* lock) { |
| +void __local_lock_init_recursive(_LOCK_T *lock) { |
| if (lock != NULL) { |
| pthread_mutexattr_t attr; |
| pthread_mutexattr_init(&attr); |
| @@ -713,17 +728,17 @@ void __local_lock_init_recursive(_LOCK_T* lock) { |
| } |
| } |
| -void __local_lock_close(_LOCK_T* lock) { |
| +void __local_lock_close(_LOCK_T *lock) { |
| if (lock != NULL) { |
| pthread_mutex_destroy((pthread_mutex_t*)lock); |
| } |
| } |
| -void __local_lock_close_recursive(_LOCK_T* lock) { |
| +void __local_lock_close_recursive(_LOCK_T *lock) { |
| __local_lock_close(lock); |
| } |
| -void __local_lock_acquire(_LOCK_T* lock) { |
| +void __local_lock_acquire(_LOCK_T *lock) { |
| if (!__nc_thread_initialized) { |
| /* |
| * pthread library is not initialized yet - there is only one thread. |
| @@ -737,17 +752,17 @@ void __local_lock_acquire(_LOCK_T* lock) { |
| } |
| } |
| -void __local_lock_acquire_recursive(_LOCK_T* lock) { |
| +void __local_lock_acquire_recursive(_LOCK_T *lock) { |
| __local_lock_acquire(lock); |
| } |
| -int __local_lock_try_acquire(_LOCK_T* lock) { |
| +int __local_lock_try_acquire(_LOCK_T *lock) { |
| if (!__nc_thread_initialized) { |
| /* |
| - * pthread library is not initialized yet - there is only one thread. |
| - * Calling pthread_mutex_lock will cause an access violation because it |
| - * will attempt to access the TDB which is not initialized yet |
| - */ |
| + * pthread library is not initialized yet - there is only one thread. |
| + * Calling pthread_mutex_lock will cause an access violation because it |
| + * will attempt to access the TDB which is not initialized yet |
| + */ |
| return 0; |
| } |
| @@ -758,20 +773,20 @@ int __local_lock_try_acquire(_LOCK_T* lock) { |
| } |
| } |
| -int __local_lock_try_acquire_recursive(_LOCK_T* lock) { |
| +int __local_lock_try_acquire_recursive(_LOCK_T *lock) { |
| return __local_lock_try_acquire(lock); |
| } |
| -void __local_lock_release(_LOCK_T* lock) { |
| +void __local_lock_release(_LOCK_T *lock) { |
| if (!__nc_thread_initialized) { |
| /* |
| - * pthread library is not initialized yet - there is only one thread. |
| - * Calling pthread_mutex_lock will cause an access violation because it |
| - * will attempt to access the TDB which is not initialized yet |
| - * NOTE: there is no race condition here because the value of the counter |
| - * cannot change while the lock is held - the startup process is |
| - * single-threaded. |
| - */ |
| + * pthread library is not initialized yet - there is only one thread. |
| + * Calling pthread_mutex_lock will cause an access violation because it |
| + * will attempt to access the TDB which is not initialized yet |
| + * NOTE: there is no race condition here because the value of the counter |
| + * cannot change while the lock is held - the startup process is |
| + * single-threaded. |
| + */ |
| return; |
| } |
| @@ -780,7 +795,7 @@ void __local_lock_release(_LOCK_T* lock) { |
| } |
| } |
| -void __local_lock_release_recursive(_LOCK_T* lock) { |
| +void __local_lock_release_recursive(_LOCK_T *lock) { |
| __local_lock_release(lock); |
| } |