| OLD | NEW |
| 1 #include "pthread_impl.h" | 1 #include "pthread_impl.h" |
| 2 | 2 |
| 3 int __pthread_mutex_trylock_owner(pthread_mutex_t *m) | 3 int __pthread_mutex_trylock_owner(pthread_mutex_t* m) { |
| 4 { | 4 int old, own; |
| 5 » int old, own; | 5 int type = m->_m_type & 15; |
| 6 » int type = m->_m_type & 15; | 6 pthread_t self = __pthread_self(); |
| 7 » pthread_t self = __pthread_self(); | 7 int tid = self->tid; |
| 8 » int tid = self->tid; | |
| 9 | 8 |
| 10 » old = m->_m_lock; | 9 old = m->_m_lock; |
| 11 » own = old & 0x7fffffff; | 10 own = old & 0x7fffffff; |
| 12 » if (own == tid && (type&3) == PTHREAD_MUTEX_RECURSIVE) { | 11 if (own == tid && (type & 3) == PTHREAD_MUTEX_RECURSIVE) { |
| 13 » » if ((unsigned)m->_m_count >= INT_MAX) return EAGAIN; | 12 if ((unsigned)m->_m_count >= INT_MAX) |
| 14 » » m->_m_count++; | 13 return EAGAIN; |
| 15 » » return 0; | 14 m->_m_count++; |
| 16 » } | 15 return 0; |
| 17 » if (own == 0x40000000) return ENOTRECOVERABLE; | 16 } |
| 17 if (own == 0x40000000) |
| 18 return ENOTRECOVERABLE; |
| 18 | 19 |
| 19 » if (m->_m_type & 128) { | 20 if (m->_m_type & 128) { |
| 20 » » if (!self->robust_list.off) { | 21 if (!self->robust_list.off) { |
| 21 » » » self->robust_list.off = (char*)&m->_m_lock-(char *)&m->_
m_next; | 22 self->robust_list.off = (char*)&m->_m_lock - (char*)&m->_m_next; |
| 22 » » » __syscall(SYS_set_robust_list, &self->robust_list, 3*siz
eof(long)); | 23 __syscall(SYS_set_robust_list, &self->robust_list, 3 * sizeof(long)); |
| 23 » » } | 24 } |
| 24 » » if (m->_m_waiters) tid |= 0x80000000; | 25 if (m->_m_waiters) |
| 25 » » self->robust_list.pending = &m->_m_next; | 26 tid |= 0x80000000; |
| 26 » } | 27 self->robust_list.pending = &m->_m_next; |
| 28 } |
| 27 | 29 |
| 28 » if ((own && (!(own & 0x40000000) || !(type & 4))) | 30 if ((own && (!(own & 0x40000000) || !(type & 4))) || |
| 29 » || a_cas(&m->_m_lock, old, tid) != old) { | 31 a_cas(&m->_m_lock, old, tid) != old) { |
| 30 » » self->robust_list.pending = 0; | 32 self->robust_list.pending = 0; |
| 31 » » return EBUSY; | 33 return EBUSY; |
| 32 » } | 34 } |
| 33 | 35 |
| 34 » volatile void *next = self->robust_list.head; | 36 volatile void* next = self->robust_list.head; |
| 35 » m->_m_next = next; | 37 m->_m_next = next; |
| 36 » m->_m_prev = &self->robust_list.head; | 38 m->_m_prev = &self->robust_list.head; |
| 37 » if (next != &self->robust_list.head) *(volatile void *volatile *) | 39 if (next != &self->robust_list.head) |
| 38 » » ((char *)next - sizeof(void *)) = &m->_m_next; | 40 *(volatile void* volatile*)((char*)next - sizeof(void*)) = &m->_m_next; |
| 39 » self->robust_list.head = &m->_m_next; | 41 self->robust_list.head = &m->_m_next; |
| 40 » self->robust_list.pending = 0; | 42 self->robust_list.pending = 0; |
| 41 | 43 |
| 42 » if (own) { | 44 if (own) { |
| 43 » » m->_m_count = 0; | 45 m->_m_count = 0; |
| 44 » » m->_m_type |= 8; | 46 m->_m_type |= 8; |
| 45 » » return EOWNERDEAD; | 47 return EOWNERDEAD; |
| 46 » } | 48 } |
| 47 | 49 |
| 48 » return 0; | 50 return 0; |
| 49 } | 51 } |
| 50 | 52 |
| 51 int __pthread_mutex_trylock(pthread_mutex_t *m) | 53 int __pthread_mutex_trylock(pthread_mutex_t* m) { |
| 52 { | 54 if ((m->_m_type & 15) == PTHREAD_MUTEX_NORMAL) |
| 53 » if ((m->_m_type&15) == PTHREAD_MUTEX_NORMAL) | 55 return a_cas(&m->_m_lock, 0, EBUSY) & EBUSY; |
| 54 » » return a_cas(&m->_m_lock, 0, EBUSY) & EBUSY; | 56 return __pthread_mutex_trylock_owner(m); |
| 55 » return __pthread_mutex_trylock_owner(m); | |
| 56 } | 57 } |
| 57 | 58 |
| 58 weak_alias(__pthread_mutex_trylock, pthread_mutex_trylock); | 59 weak_alias(__pthread_mutex_trylock, pthread_mutex_trylock); |
| OLD | NEW |