Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(357)

Side by Side Diff: src/core/SkTaskGroup.cpp

Issue 1552093002: If we swap its arguments, SkTaskGroup::batch() _is_ sk_parallel_for. (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/core/SkTaskGroup.h ('k') | tests/BlendTest.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2014 Google Inc. 2 * Copyright 2014 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "SkOnce.h" 8 #include "SkOnce.h"
9 #include "SkRunnable.h" 9 #include "SkRunnable.h"
10 #include "SkSemaphore.h" 10 #include "SkSemaphore.h"
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
47 gGlobal->add([task]() { task->run(); }, pending); 47 gGlobal->add([task]() { task->run(); }, pending);
48 } 48 }
49 49
50 static void Add(std::function<void(void)> fn, SkAtomic<int32_t>* pending) { 50 static void Add(std::function<void(void)> fn, SkAtomic<int32_t>* pending) {
51 if (!gGlobal) { 51 if (!gGlobal) {
52 return fn(); 52 return fn();
53 } 53 }
54 gGlobal->add(fn, pending); 54 gGlobal->add(fn, pending);
55 } 55 }
56 56
57 static void Batch(std::function<void(int)> fn, int N, SkAtomic<int32_t>* pen ding) { 57 static void Batch(int N, std::function<void(int)> fn, SkAtomic<int32_t>* pen ding) {
58 if (!gGlobal) { 58 if (!gGlobal) {
59 for (int i = 0; i < N; i++) { fn(i); } 59 for (int i = 0; i < N; i++) { fn(i); }
60 return; 60 return;
61 } 61 }
62 gGlobal->batch(fn, N, pending); 62 gGlobal->batch(N, fn, pending);
63 } 63 }
64 64
65 static void Wait(SkAtomic<int32_t>* pending) { 65 static void Wait(SkAtomic<int32_t>* pending) {
66 if (!gGlobal) { // If we have no threads, the work must already be done . 66 if (!gGlobal) { // If we have no threads, the work must already be done .
67 SkASSERT(pending->load(sk_memory_order_relaxed) == 0); 67 SkASSERT(pending->load(sk_memory_order_relaxed) == 0);
68 return; 68 return;
69 } 69 }
70 // Acquire pairs with decrement release here or in Loop. 70 // Acquire pairs with decrement release here or in Loop.
71 while (pending->load(sk_memory_order_acquire) > 0) { 71 while (pending->load(sk_memory_order_acquire) > 0) {
72 // Lend a hand until our SkTaskGroup of interest is done. 72 // Lend a hand until our SkTaskGroup of interest is done.
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
135 void add(std::function<void(void)> fn, SkAtomic<int32_t>* pending) { 135 void add(std::function<void(void)> fn, SkAtomic<int32_t>* pending) {
136 Work work = { fn, pending }; 136 Work work = { fn, pending };
137 pending->fetch_add(+1, sk_memory_order_relaxed); // No barrier needed. 137 pending->fetch_add(+1, sk_memory_order_relaxed); // No barrier needed.
138 { 138 {
139 AutoLock lock(&fWorkLock); 139 AutoLock lock(&fWorkLock);
140 fWork.push_back(work); 140 fWork.push_back(work);
141 } 141 }
142 fWorkAvailable.signal(1); 142 fWorkAvailable.signal(1);
143 } 143 }
144 144
145 void batch(std::function<void(int)> fn, int N, SkAtomic<int32_t>* pending) { 145 void batch(int N, std::function<void(int)> fn, SkAtomic<int32_t>* pending) {
146 pending->fetch_add(+N, sk_memory_order_relaxed); // No barrier needed. 146 pending->fetch_add(+N, sk_memory_order_relaxed); // No barrier needed.
147 { 147 {
148 AutoLock lock(&fWorkLock); 148 AutoLock lock(&fWorkLock);
149 for (int i = 0; i < N; i++) { 149 for (int i = 0; i < N; i++) {
150 Work work = { [i, fn]() { fn(i); }, pending }; 150 Work work = { [i, fn]() { fn(i); }, pending };
151 fWork.push_back(work); 151 fWork.push_back(work);
152 } 152 }
153 } 153 }
154 fWorkAvailable.signal(N); 154 fWorkAvailable.signal(N);
155 } 155 }
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
189 // and that's the only way to decrement fWorkAvailable. 189 // and that's the only way to decrement fWorkAvailable.
190 // So fWorkAvailable may overcount actual the work available. 190 // So fWorkAvailable may overcount actual the work available.
191 // We make do, but this means some worker threads may wake spuriously. 191 // We make do, but this means some worker threads may wake spuriously.
192 SkSemaphore fWorkAvailable; 192 SkSemaphore fWorkAvailable;
193 193
194 // These are only changed in a single-threaded context. 194 // These are only changed in a single-threaded context.
195 SkTDArray<SkThread*> fThreads; 195 SkTDArray<SkThread*> fThreads;
196 static ThreadPool* gGlobal; 196 static ThreadPool* gGlobal;
197 197
198 friend struct SkTaskGroup::Enabler; 198 friend struct SkTaskGroup::Enabler;
199 friend int ::sk_parallel_for_thread_count();
200 }; 199 };
201 ThreadPool* ThreadPool::gGlobal = nullptr; 200 ThreadPool* ThreadPool::gGlobal = nullptr;
202 201
203 } // namespace 202 } // namespace
204 203
205 SkTaskGroup::Enabler::Enabler(int threads) { 204 SkTaskGroup::Enabler::Enabler(int threads) {
206 SkASSERT(ThreadPool::gGlobal == nullptr); 205 SkASSERT(ThreadPool::gGlobal == nullptr);
207 if (threads != 0) { 206 if (threads != 0) {
208 ThreadPool::gGlobal = new ThreadPool(threads); 207 ThreadPool::gGlobal = new ThreadPool(threads);
209 } 208 }
210 } 209 }
211 210
212 SkTaskGroup::Enabler::~Enabler() { delete ThreadPool::gGlobal; } 211 SkTaskGroup::Enabler::~Enabler() { delete ThreadPool::gGlobal; }
213 212
214 SkTaskGroup::SkTaskGroup() : fPending(0) {} 213 SkTaskGroup::SkTaskGroup() : fPending(0) {}
215 214
216 void SkTaskGroup::wait() { ThreadPool::Wait(&fPending ); } 215 void SkTaskGroup::wait() { ThreadPool::Wait(&fPending ); }
217 void SkTaskGroup::add(SkRunnable* task) { ThreadPool::Add(task, &fPe nding); } 216 void SkTaskGroup::add(SkRunnable* task) { ThreadPool::Add(task, &fPe nding); }
218 void SkTaskGroup::add(std::function<void(void)> fn) { ThreadPool::Add(fn, &fPend ing); } 217 void SkTaskGroup::add(std::function<void(void)> fn) { ThreadPool::Add(fn, &fPend ing); }
219 void SkTaskGroup::batch (std::function<void(int)> fn, int N) { 218 void SkTaskGroup::batch(int N, std::function<void(int)> fn) {
220 ThreadPool::Batch(fn, N, &fPending); 219 ThreadPool::Batch(N, fn, &fPending);
221 } 220 }
222 221
223 int sk_parallel_for_thread_count() {
224 if (ThreadPool::gGlobal != nullptr) {
225 return ThreadPool::gGlobal->fThreads.count();
226 }
227 return 0;
228 }
OLDNEW
« no previous file with comments | « src/core/SkTaskGroup.h ('k') | tests/BlendTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698