Index: src/gpu/GrBatch.cpp |
diff --git a/src/gpu/GrBatch.cpp b/src/gpu/GrBatch.cpp |
index 4df765e7b2f9f8b8823f4716eeed2866ac9a1e08..ce30499ff8d4b4202c92170f6b5844158591355a 100644 |
--- a/src/gpu/GrBatch.cpp |
+++ b/src/gpu/GrBatch.cpp |
@@ -1,24 +1,31 @@ |
+/* |
+ * Copyright 2015 Google Inc. |
+ * |
+ * Use of this source code is governed by a BSD-style license that can be |
+ * found in the LICENSE file. |
+ */ |
+ |
#include "GrBatch.h" |
#include "GrMemoryPool.h" |
-#include "SkMutex.h" |
+#include "SkSpinlock.h" |
// TODO I noticed a small benefit to using a larger exclusive pool for batches. Its very small, |
// but seems to be mostly consistent. There is a lot in flux right now, but we should really |
// revisit this when batch is everywhere |
-// We use a global pool protected by a mutex. Chrome may use the same GrContext on different |
-// threads. The GrContext is not used concurrently on different threads and there is a memory |
-// barrier between accesses of a context on different threads. Also, there may be multiple |
+// We use a global pool protected by a mutex(spinlock). Chrome may use the same GrContext on |
+// different threads. The GrContext is not used concurrently on different threads and there is a |
+// memory barrier between accesses of a context on different threads. Also, there may be multiple |
// GrContexts and those contexts may be in use concurrently on different threads. |
namespace { |
-SK_DECLARE_STATIC_MUTEX(gBatchPoolMutex); |
+SK_DECLARE_STATIC_SPINLOCK(gBatchSpinlock); |
class MemoryPoolAccessor { |
public: |
- MemoryPoolAccessor() { gBatchPoolMutex.acquire(); } |
+ MemoryPoolAccessor() { gBatchSpinlock.acquire(); } |
- ~MemoryPoolAccessor() { gBatchPoolMutex.release(); } |
+ ~MemoryPoolAccessor() { gBatchSpinlock.release(); } |
GrMemoryPool* pool() const { |
static GrMemoryPool gPool(16384, 16384); |