OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2014 Google Inc. | 2 * Copyright 2014 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkOnce.h" | 8 #include "SkOnce.h" |
9 #include "SkRunnable.h" | 9 #include "SkRunnable.h" |
10 #include "SkSemaphore.h" | 10 #include "SkSemaphore.h" |
(...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
189 // and that's the only way to decrement fWorkAvailable. | 189 // and that's the only way to decrement fWorkAvailable. |
190 // So fWorkAvailable may overcount actual the work available. | 190 // So fWorkAvailable may overcount actual the work available. |
191 // We make do, but this means some worker threads may wake spuriously. | 191 // We make do, but this means some worker threads may wake spuriously. |
192 SkSemaphore fWorkAvailable; | 192 SkSemaphore fWorkAvailable; |
193 | 193 |
194 // These are only changed in a single-threaded context. | 194 // These are only changed in a single-threaded context. |
195 SkTDArray<SkThread*> fThreads; | 195 SkTDArray<SkThread*> fThreads; |
196 static ThreadPool* gGlobal; | 196 static ThreadPool* gGlobal; |
197 | 197 |
198 friend struct SkTaskGroup::Enabler; | 198 friend struct SkTaskGroup::Enabler; |
| 199 friend int ::sk_parallel_for_thread_count(); |
199 }; | 200 }; |
200 ThreadPool* ThreadPool::gGlobal = nullptr; | 201 ThreadPool* ThreadPool::gGlobal = nullptr; |
201 | 202 |
202 } // namespace | 203 } // namespace |
203 | 204 |
204 SkTaskGroup::Enabler::Enabler(int threads) { | 205 SkTaskGroup::Enabler::Enabler(int threads) { |
205 SkASSERT(ThreadPool::gGlobal == nullptr); | 206 SkASSERT(ThreadPool::gGlobal == nullptr); |
206 if (threads != 0) { | 207 if (threads != 0) { |
207 ThreadPool::gGlobal = new ThreadPool(threads); | 208 ThreadPool::gGlobal = new ThreadPool(threads); |
208 } | 209 } |
209 } | 210 } |
210 | 211 |
211 SkTaskGroup::Enabler::~Enabler() { delete ThreadPool::gGlobal; } | 212 SkTaskGroup::Enabler::~Enabler() { delete ThreadPool::gGlobal; } |
212 | 213 |
213 SkTaskGroup::SkTaskGroup() : fPending(0) {} | 214 SkTaskGroup::SkTaskGroup() : fPending(0) {} |
214 | 215 |
215 void SkTaskGroup::wait() { ThreadPool::Wait(&fPending
); } | 216 void SkTaskGroup::wait() { ThreadPool::Wait(&fPending
); } |
216 void SkTaskGroup::add(SkRunnable* task) { ThreadPool::Add(task, &fPe
nding); } | 217 void SkTaskGroup::add(SkRunnable* task) { ThreadPool::Add(task, &fPe
nding); } |
217 void SkTaskGroup::add(void (*fn)(void*), void* arg) { ThreadPool::Add(fn, arg, &
fPending); } | 218 void SkTaskGroup::add(void (*fn)(void*), void* arg) { ThreadPool::Add(fn, arg, &
fPending); } |
218 void SkTaskGroup::batch (void (*fn)(void*), void* args, int N, size_t stride) { | 219 void SkTaskGroup::batch (void (*fn)(void*), void* args, int N, size_t stride) { |
219 ThreadPool::Batch(fn, args, N, stride, &fPending); | 220 ThreadPool::Batch(fn, args, N, stride, &fPending); |
220 } | 221 } |
221 | 222 |
| 223 int sk_parallel_for_thread_count() { |
| 224 if (ThreadPool::gGlobal != nullptr) { |
| 225 return ThreadPool::gGlobal->fThreads.count(); |
| 226 } |
| 227 return 0; |
| 228 } |
OLD | NEW |