Index: src/gpu/GrBatch.cpp |
diff --git a/src/gpu/GrBatch.cpp b/src/gpu/GrBatch.cpp |
index 4df765e7b2f9f8b8823f4716eeed2866ac9a1e08..43475373848daeb9c699d92f29ae97c91122047c 100644 |
--- a/src/gpu/GrBatch.cpp |
+++ b/src/gpu/GrBatch.cpp |
@@ -1,7 +1,14 @@ |
+/* |
+ * Copyright 2015 Google Inc. |
+ * |
+ * Use of this source code is governed by a BSD-style license that can be |
+ * found in the LICENSE file. |
+ */ |
+ |
#include "GrBatch.h" |
#include "GrMemoryPool.h" |
-#include "SkMutex.h" |
+#include "SkSpinlock.h" |
// TODO I noticed a small benefit to using a larger exclusive pool for batches. Its very small, |
// but seems to be mostly consistent. There is a lot in flux right now, but we should really |
@@ -13,12 +20,12 @@ |
// barrier between accesses of a context on different threads. Also, there may be multiple |
// GrContexts and those contexts may be in use concurrently on different threads. |
namespace { |
-SK_DECLARE_STATIC_MUTEX(gBatchPoolMutex); |
+static SkSpinlock gBatchSpinlock; |
mtklein
2015/03/30 15:24:10
Slightly nicer to use SK_DECLARE_STATIC_SPINLOCK(g
|
class MemoryPoolAccessor { |
public: |
- MemoryPoolAccessor() { gBatchPoolMutex.acquire(); } |
+ MemoryPoolAccessor() { gBatchSpinlock.acquire(); } |
- ~MemoryPoolAccessor() { gBatchPoolMutex.release(); } |
+ ~MemoryPoolAccessor() { gBatchSpinlock.release(); } |
GrMemoryPool* pool() const { |
static GrMemoryPool gPool(16384, 16384); |