Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(698)

Unified Diff: src/utils/SkTaskGroup.cpp

Issue 531653002: SkThreadPool ~~> SkTaskGroup (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Lazy and leaky -> proactive and clean. Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/utils/SkTaskGroup.h ('k') | src/utils/SkThreadPool.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/utils/SkTaskGroup.cpp
diff --git a/src/utils/SkTaskGroup.cpp b/src/utils/SkTaskGroup.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e7de6becd81ca55ef5040822d44d476b19e75358
--- /dev/null
+++ b/src/utils/SkTaskGroup.cpp
@@ -0,0 +1,143 @@
+#include "SkTaskGroup.h"
+
+#include "SkCondVar.h"
+#include "SkTDArray.h"
+#include "SkThread.h"
+#include "SkThreadUtils.h"
+
+#if defined(SK_BUILD_FOR_WIN32)
+ static inline int num_cores() {
+ SYSTEM_INFO sysinfo;
+ GetSystemInfo(&sysinfo);
+ return sysinfo.dwNumberOfProcessors;
+ }
+#else
+ #include <unistd.h>
+ static inline int num_cores() {
+ return (int) sysconf(_SC_NPROCESSORS_ONLN);
+ }
+#endif
+
+namespace {
+
+class ThreadPool : SkNoncopyable {
+public:
+ static void Add(SkRunnable* task, int32_t* pending) {
+ SkASSERT(gGlobal); // If this fails, see SkTaskGroup::Enabler.
+ gGlobal->add(task, pending);
+ }
+
+ static void Wait(int32_t* pending) {
+ SkASSERT(gGlobal); // If this fails, see SkTaskGroup::Enabler.
+ while (sk_acquire_load(pending) > 0) { // Pairs with sk_atomic_dec here or in Loop.
+ // Lend a hand until our SkTaskGroup of interest is done.
+ Work work;
+ {
+ AutoLock lock(&gGlobal->fReady);
+ if (gGlobal->fWork.isEmpty()) {
+ // Someone has picked up all the work (including ours). How nice of them!
+ // (They may still be working on it, so we can't assert *pending == 0 here.)
+ continue;
+ }
+ gGlobal->fWork.pop(&work);
+ }
+ // This Work isn't necessarily part of our SkTaskGroup of interest, but that's fine.
+ // We threads gotta stick together. We're always making forward progress.
+ work.task->run();
+ sk_atomic_dec(work.pending); // Release pairs with the sk_acquire_load() just above.
+ }
+ }
+
+private:
+ struct AutoLock {
+ AutoLock(SkCondVar* c) : fC(c) { fC->lock(); }
+ ~AutoLock() { fC->unlock(); }
+ private:
+ SkCondVar* fC;
+ };
+
+ struct Work {
+ SkRunnable* task; // A task to ->run(),
+ int32_t* pending; // then sk_atomic_dec(pending) afterwards.
+ };
+
+ explicit ThreadPool(int threads) : fDraining(false) {
+ if (threads == 0) {
+ threads = num_cores();
+ }
+ for (int i = 0; i < threads; i++) {
+ fThreads.push(SkNEW_ARGS(SkThread, (&ThreadPool::Loop, this)));
+ fThreads.top()->start();
+ }
+ }
+
+ ~ThreadPool() {
+ SkASSERT(fWork.isEmpty()); // All SkTaskGroups should be destroyed by now.
+ {
+ AutoLock lock(&fReady);
+ fDraining = true;
+ fReady.broadcast();
+ }
+ for (int i = 0; i < fThreads.count(); i++) {
+ fThreads[i]->join();
+ }
+ SkASSERT(fWork.isEmpty()); // Can't hurt to double check.
+ fThreads.deleteAll();
+ }
+
+ void add(SkRunnable* task, int32_t* pending) {
+ Work work = { task, pending };
+ sk_atomic_inc(pending); // No barrier needed.
+ {
+ AutoLock lock(&fReady);
+ fWork.push(work);
+ fReady.signal();
+ }
+ }
+
+ static void Loop(void* arg) {
+ ThreadPool* pool = (ThreadPool*)arg;
+ Work work;
+ while (true) {
+ {
+ AutoLock lock(&pool->fReady);
+ while (pool->fWork.isEmpty()) {
+ if (pool->fDraining) {
+ return;
+ }
+ pool->fReady.wait();
+ }
+ pool->fWork.pop(&work);
+ }
+ work.task->run();
+ sk_atomic_dec(work.pending); // Release pairs with sk_acquire_load() in Wait().
+ }
+ }
+
+ SkTDArray<Work> fWork;
+ SkTDArray<SkThread*> fThreads;
+ SkCondVar fReady;
+ bool fDraining;
+
+ static ThreadPool* gGlobal;
+ friend struct SkTaskGroup::Enabler;
+};
+ThreadPool* ThreadPool::gGlobal = NULL;
+
+} // namespace
+
+SkTaskGroup::Enabler::Enabler(int threads) {
+ SkASSERT(ThreadPool::gGlobal == NULL);
+ ThreadPool::gGlobal = SkNEW_ARGS(ThreadPool, (threads));
+}
+
+SkTaskGroup::Enabler::~Enabler() {
+ SkASSERT(ThreadPool::gGlobal != NULL);
+ SkDELETE(ThreadPool::gGlobal);
+}
+
+SkTaskGroup::SkTaskGroup() : fPending(0) {}
+
+void SkTaskGroup::add(SkRunnable* task) { ThreadPool::Add(task, &fPending); }
+void SkTaskGroup::wait() { ThreadPool::Wait(&fPending); }
+
« no previous file with comments | « src/utils/SkTaskGroup.h ('k') | src/utils/SkThreadPool.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698