OLD | NEW |
(Empty) | |
| 1 #include "SkTaskGroup.h" |
| 2 |
| 3 #include "SkCondVar.h" |
| 4 #include "SkLazyPtr.h" |
| 5 #include "SkTDArray.h" |
| 6 #include "SkThread.h" |
| 7 #include "SkThreadUtils.h" |
| 8 |
| 9 #if defined(SK_BUILD_FOR_WIN32) |
| 10 static inline int num_cores() { |
| 11 SYSTEM_INFO sysinfo; |
| 12 GetSystemInfo(&sysinfo); |
| 13 return sysinfo.dwNumberOfProcessors; |
| 14 } |
| 15 #else |
| 16 #include <unistd.h> |
| 17 static inline int num_cores() { |
| 18 return (int) sysconf(_SC_NPROCESSORS_ONLN); |
| 19 } |
| 20 #endif |
| 21 |
| 22 namespace { |
| 23 |
| 24 static int gThreadCount = 0; |
| 25 |
| 26 class ThreadPool : SkNoncopyable { |
| 27 public: |
| 28 static void Add(SkRunnable* task, int32_t* pending) { |
| 29 Global()->add(task, pending); |
| 30 } |
| 31 |
| 32 static void Wait(int32_t* pending) { |
| 33 while (sk_acquire_load(pending) > 0) { // Pairs with sk_atomic_dec here
or in Loop. |
| 34 // Lend a hand until our SkTaskGroup of interest is done. |
| 35 ThreadPool* pool = Global(); |
| 36 Work work; |
| 37 { |
| 38 AutoLock lock(&pool->fReady); |
| 39 if (pool->fWork.isEmpty()) { |
| 40 // Someone has picked up all the work (including ours). How
nice of them! |
| 41 // (They may still be working on it, so we can't assert *pen
ding == 0 here.) |
| 42 continue; |
| 43 } |
| 44 pool->fWork.pop(&work); |
| 45 } |
| 46 // This Work isn't necessarily part of our SkTaskGroup of interest,
but that's fine. |
| 47 // We threads gotta stick together. We're always making forward pro
gress. |
| 48 work.task->run(); |
| 49 sk_atomic_dec(work.pending); // Release pairs with the sk_acquire_l
oad() just above. |
| 50 } |
| 51 } |
| 52 |
| 53 private: |
| 54 struct AutoLock { |
| 55 AutoLock(SkCondVar* c) : fC(c) { fC->lock(); } |
| 56 ~AutoLock() { fC->unlock(); } |
| 57 private: |
| 58 SkCondVar* fC; |
| 59 }; |
| 60 |
| 61 struct Work { |
| 62 SkRunnable* task; // A task to ->run(), |
| 63 int32_t* pending; // then sk_atomic_dec(pending) afterwards. |
| 64 }; |
| 65 |
| 66 static ThreadPool* Create() { return SkNEW(ThreadPool); } |
| 67 static void Destroy(ThreadPool* p) { SkDELETE(p); } |
| 68 static ThreadPool* Global() { |
| 69 SK_DECLARE_STATIC_LAZY_PTR(ThreadPool, global, Create, Destroy); |
| 70 return global.get(); |
| 71 } |
| 72 |
| 73 ThreadPool() : fDraining(false) { |
| 74 const int threads = gThreadCount ? gThreadCount : num_cores(); |
| 75 for (int i = 0; i < threads; i++) { |
| 76 fThreads.push(SkNEW_ARGS(SkThread, (&ThreadPool::Loop, this))); |
| 77 fThreads.top()->start(); |
| 78 } |
| 79 } |
| 80 |
| 81 ~ThreadPool() { |
| 82 SkASSERT(fWork.isEmpty()); // All SkTaskGroups should be destroyed by n
ow. |
| 83 { |
| 84 AutoLock lock(&fReady); |
| 85 fDraining = true; |
| 86 fReady.broadcast(); |
| 87 } |
| 88 for (int i = 0; i < fThreads.count(); i++) { |
| 89 fThreads[i]->join(); |
| 90 } |
| 91 SkASSERT(fWork.isEmpty()); // Can't hurt to double check. |
| 92 fThreads.deleteAll(); |
| 93 } |
| 94 |
| 95 void add(SkRunnable* task, int32_t* pending) { |
| 96 Work work = { task, pending }; |
| 97 sk_atomic_inc(pending); // No barrier needed. |
| 98 { |
| 99 AutoLock lock(&fReady); |
| 100 fWork.push(work); |
| 101 fReady.signal(); |
| 102 } |
| 103 } |
| 104 |
| 105 static void Loop(void* arg) { |
| 106 ThreadPool* pool = (ThreadPool*)arg; |
| 107 Work work; |
| 108 while (true) { |
| 109 { |
| 110 AutoLock lock(&pool->fReady); |
| 111 while (pool->fWork.isEmpty()) { |
| 112 if (pool->fDraining) { |
| 113 return; |
| 114 } |
| 115 pool->fReady.wait(); |
| 116 } |
| 117 pool->fWork.pop(&work); |
| 118 } |
| 119 work.task->run(); |
| 120 sk_atomic_dec(work.pending); // Release pairs with sk_acquire_load(
) in Wait(). |
| 121 } |
| 122 } |
| 123 |
| 124 SkTDArray<Work> fWork; |
| 125 SkTDArray<SkThread*> fThreads; |
| 126 SkCondVar fReady; |
| 127 bool fDraining; |
| 128 }; |
| 129 |
| 130 } // namespace |
| 131 |
| 132 void SkTaskGroup::SetThreadCount(int n) { gThreadCount = n; } |
| 133 |
| 134 SkTaskGroup::SkTaskGroup() : fPending(0) {} |
| 135 |
| 136 void SkTaskGroup::add(SkRunnable* task) { ThreadPool::Add(task, &fPending); } |
| 137 void SkTaskGroup::wait() { ThreadPool::Wait(&fPending); } |
OLD | NEW |