OLD | NEW |
| (Empty) |
1 | |
2 /* | |
3 * Copyright 2011 Google Inc. | |
4 * | |
5 * Use of this source code is governed by a BSD-style license that can be | |
6 * found in the LICENSE file. | |
7 */ | |
8 #include "SkThread.h" | |
9 | |
10 #include <pthread.h> | |
11 #include <errno.h> | |
12 | |
13 #ifndef SK_BUILD_FOR_ANDROID | |
14 | |
15 /** | |
16 We prefer the GCC intrinsic implementation of the atomic operations over the | |
17 SkMutex-based implementation. The SkMutex version suffers from static | |
18 destructor ordering problems. | |
19 Note clang also defines the GCC version macros and implements the intrinsics. | |
20 TODO: Verify that gcc-style __sync_* intrinsics work on ARM | |
21 According to this the intrinsics are supported on ARM in LLVM 2.7+ | |
22 http://llvm.org/releases/2.7/docs/ReleaseNotes.html | |
23 */ | |
24 #if (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) || __GNUC__ > 4 | |
25 #if (defined(__x86_64) || defined(__i386__)) | |
26 #define GCC_INTRINSIC | |
27 #endif | |
28 #endif | |
29 | |
30 #if defined(GCC_INTRINSIC) | |
31 | |
32 int32_t sk_atomic_inc(int32_t* addr) | |
33 { | |
34 return __sync_fetch_and_add(addr, 1); | |
35 } | |
36 | |
37 int32_t sk_atomic_add(int32_t* addr, int32_t inc) | |
38 { | |
39 return __sync_fetch_and_add(addr, inc); | |
40 } | |
41 | |
42 int32_t sk_atomic_dec(int32_t* addr) | |
43 { | |
44 return __sync_fetch_and_add(addr, -1); | |
45 } | |
46 void sk_membar_aquire__after_atomic_dec() { } | |
47 | |
48 int32_t sk_atomic_conditional_inc(int32_t* addr) | |
49 { | |
50 int32_t value = *addr; | |
51 | |
52 while (true) { | |
53 if (value == 0) { | |
54 return 0; | |
55 } | |
56 | |
57 int32_t before = __sync_val_compare_and_swap(addr, value, value + 1); | |
58 | |
59 if (before == value) { | |
60 return value; | |
61 } else { | |
62 value = before; | |
63 } | |
64 } | |
65 } | |
66 void sk_membar_aquire__after_atomic_conditional_inc() { } | |
67 | |
68 #else | |
69 | |
70 SkMutex gAtomicMutex; | |
71 | |
72 int32_t sk_atomic_inc(int32_t* addr) | |
73 { | |
74 SkAutoMutexAcquire ac(gAtomicMutex); | |
75 | |
76 int32_t value = *addr; | |
77 *addr = value + 1; | |
78 return value; | |
79 } | |
80 | |
81 int32_t sk_atomic_add(int32_t* addr, int32_t inc) | |
82 { | |
83 SkAutoMutexAcquire ac(gAtomicMutex); | |
84 | |
85 int32_t value = *addr; | |
86 *addr = value + inc; | |
87 return value; | |
88 } | |
89 | |
90 int32_t sk_atomic_dec(int32_t* addr) | |
91 { | |
92 SkAutoMutexAcquire ac(gAtomicMutex); | |
93 | |
94 int32_t value = *addr; | |
95 *addr = value - 1; | |
96 return value; | |
97 } | |
98 void sk_membar_aquire__after_atomic_dec() { } | |
99 | |
100 int32_t sk_atomic_conditional_inc(int32_t* addr) | |
101 { | |
102 SkAutoMutexAcquire ac(gAtomicMutex); | |
103 | |
104 int32_t value = *addr; | |
105 if (value != 0) ++*addr; | |
106 return value; | |
107 } | |
108 void sk_membar_aquire__after_atomic_conditional_inc() { } | |
109 | |
110 #endif | |
111 | |
112 #endif // SK_BUILD_FOR_ANDROID | |
113 | |
114 ////////////////////////////////////////////////////////////////////////////// | |
115 | |
116 static void print_pthread_error(int status) { | |
117 switch (status) { | |
118 case 0: // success | |
119 break; | |
120 case EINVAL: | |
121 SkDebugf("pthread error [%d] EINVAL\n", status); | |
122 break; | |
123 case EBUSY: | |
124 SkDebugf("pthread error [%d] EBUSY\n", status); | |
125 break; | |
126 default: | |
127 SkDebugf("pthread error [%d] unknown\n", status); | |
128 break; | |
129 } | |
130 } | |
131 | |
132 #ifdef SK_USE_POSIX_THREADS | |
133 | |
134 SkMutex::SkMutex() { | |
135 int status; | |
136 | |
137 status = pthread_mutex_init(&fMutex, NULL); | |
138 if (status != 0) { | |
139 print_pthread_error(status); | |
140 SkASSERT(0 == status); | |
141 } | |
142 } | |
143 | |
144 SkMutex::~SkMutex() { | |
145 int status = pthread_mutex_destroy(&fMutex); | |
146 | |
147 // only report errors on non-global mutexes | |
148 if (status != 0) { | |
149 print_pthread_error(status); | |
150 SkASSERT(0 == status); | |
151 } | |
152 } | |
153 | |
154 #else // !SK_USE_POSIX_THREADS | |
155 | |
156 SkMutex::SkMutex() { | |
157 if (sizeof(pthread_mutex_t) > sizeof(fStorage)) { | |
158 SkDEBUGF(("pthread mutex size = %d\n", sizeof(pthread_mutex_t))); | |
159 SkDEBUGFAIL("mutex storage is too small"); | |
160 } | |
161 | |
162 int status; | |
163 pthread_mutexattr_t attr; | |
164 | |
165 status = pthread_mutexattr_init(&attr); | |
166 print_pthread_error(status); | |
167 SkASSERT(0 == status); | |
168 | |
169 status = pthread_mutex_init((pthread_mutex_t*)fStorage, &attr); | |
170 print_pthread_error(status); | |
171 SkASSERT(0 == status); | |
172 } | |
173 | |
174 SkMutex::~SkMutex() { | |
175 int status = pthread_mutex_destroy((pthread_mutex_t*)fStorage); | |
176 #if 0 | |
177 // only report errors on non-global mutexes | |
178 if (!fIsGlobal) { | |
179 print_pthread_error(status); | |
180 SkASSERT(0 == status); | |
181 } | |
182 #endif | |
183 } | |
184 | |
185 void SkMutex::acquire() { | |
186 int status = pthread_mutex_lock((pthread_mutex_t*)fStorage); | |
187 print_pthread_error(status); | |
188 SkASSERT(0 == status); | |
189 } | |
190 | |
191 void SkMutex::release() { | |
192 int status = pthread_mutex_unlock((pthread_mutex_t*)fStorage); | |
193 print_pthread_error(status); | |
194 SkASSERT(0 == status); | |
195 } | |
196 | |
197 #endif // !SK_USE_POSIX_THREADS | |
OLD | NEW |