Index: src/core/SkRWBuffer.cpp |
diff --git a/src/core/SkRWBuffer.cpp b/src/core/SkRWBuffer.cpp |
new file mode 100644 |
index 0000000000000000000000000000000000000000..eee7d94ea4fd1f8849d12e3d8eb47ae55aab5ca4 |
--- /dev/null |
+++ b/src/core/SkRWBuffer.cpp |
@@ -0,0 +1,273 @@ |
+/* |
+ * Copyright 2015 Google Inc. |
+ * |
+ * Use of this source code is governed by a BSD-style license that can be |
+ * found in the LICENSE file. |
+ */ |
+ |
+#include "SkData.h" |
+#include "SkRWBuffer.h" |
+#include "SkStream.h" |
+ |
+struct SkBufferBlock { |
+ SkBufferBlock* fNext; |
+ size_t fUsed; |
+ size_t fCapacity; |
+ |
+ const void* startData() const { return this + 1; }; |
+ |
+ static SkBufferBlock* Alloc(size_t length) { |
+ size_t capacity = LengthToCapacity(length); |
+ SkBufferBlock* block = (SkBufferBlock*)sk_malloc_throw(sizeof(SkBufferBlock) + capacity); |
+ block->fNext = NULL; |
+ block->fUsed = 0; |
+ block->fCapacity = capacity; |
+ return block; |
+ } |
+ |
+ // Return number of bytes actually appended |
+ size_t append(const void* src, size_t length) { |
+ this->validate(); |
+ size_t avail = SkTMin(fCapacity - fUsed, length); |
+ memcpy((char*)(this + 1) + fUsed, src, avail); |
+ fUsed += avail; |
+ this->validate(); |
+ return avail; |
+ } |
+ |
+ void validate() const { |
+#ifdef SK_DEBUG |
+ SkASSERT(fCapacity > 0); |
+ SkASSERT(fUsed <= fCapacity); |
+#endif |
+ } |
+ |
+private: |
+ static size_t LengthToCapacity(size_t length) { |
+ const size_t minSize = 4096 - sizeof(SkBufferBlock); |
+ return SkTMax(length, minSize); |
+ } |
+}; |
+ |
+struct SkBufferHead : public SkBufferBlock { |
+ mutable int32_t fRefCnt; |
mtklein
2015/04/25 18:49:53
How come we're not using SkRefCnt / SkNVRefCnt?
|
+ |
+ static size_t LengthToCapacity(size_t length) { |
+ const size_t minSize = 4096 - sizeof(SkBufferHead); |
+ return SkTMax(length, minSize); |
+ } |
+ |
+ static SkBufferHead* Alloc(size_t length) { |
+ size_t capacity = LengthToCapacity(length); |
+ size_t size = sizeof(SkBufferHead) + capacity; |
+ SkBufferHead* head = (SkBufferHead*)sk_malloc_throw(size); |
+ head->fRefCnt = 1; |
+ // init Block |
+ head->fNext = NULL; |
+ head->fUsed = 0; |
+ head->fCapacity = capacity; |
+ return head; |
+ } |
+ |
+ void ref() const { |
+ SkASSERT(fRefCnt > 0); |
+ sk_atomic_inc(&fRefCnt); |
+ } |
+ |
+ void unref() const { |
+ SkASSERT(fRefCnt > 0); |
+ // A release here acts in place of all releases we "should" have been doing in ref(). |
+ if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) { |
+ // Like unique(), the acquire is only needed on success. |
+ SkBufferBlock* block = fNext; |
+ sk_free((void*)this); |
+ while (block) { |
+ SkBufferBlock* next = block->fNext; |
+ sk_free(block); |
+ block = next; |
+ } |
+ } |
+ } |
+ |
+ void validate(size_t minUsed, SkBufferBlock* tail = NULL) const { |
+#ifdef SK_DEBUG |
+ SkASSERT(fRefCnt > 0); |
+ size_t totalUsed = 0; |
+ const SkBufferBlock* block = this; |
+ const SkBufferBlock* lastBlock = block; |
+ while (block) { |
+ block->validate(); |
+ totalUsed += block->fUsed; |
+ lastBlock = block; |
+ block = block->fNext; |
+ } |
+ SkASSERT(minUsed <= totalUsed); |
+ if (tail) { |
+ SkASSERT(tail == lastBlock); |
+ } |
+#endif |
+ } |
+}; |
+ |
+SkRBuffer::SkRBuffer(const SkBufferHead* head, size_t used) : fHead(head), fUsed(used) { |
+ if (head) { |
+ SkASSERT(used > 0); |
+ head->validate(used); |
+ } else { |
+ SkASSERT(0 == used); |
+ } |
+} |
+ |
+SkRBuffer::~SkRBuffer() { |
+ if (fHead) { |
+ fHead->validate(fUsed); |
+ fHead->unref(); |
+ } |
+} |
+ |
+SkRBuffer::Iter::Iter(const SkRBuffer* buffer) { |
+ this->reset(buffer); |
+} |
+ |
+void SkRBuffer::Iter::reset(const SkRBuffer* buffer) { |
+ if (buffer) { |
+ fBlock = buffer->fHead; |
+ fRemaining = buffer->fUsed; |
+ } else { |
+ fBlock = NULL; |
+ fRemaining = 0; |
+ } |
+} |
+ |
+const void* SkRBuffer::Iter::data() const { |
+ return fRemaining ? fBlock->startData() : NULL; |
+} |
+ |
+size_t SkRBuffer::Iter::size() const { |
+ return SkTMin(fBlock->fUsed, fRemaining); |
+} |
+ |
+bool SkRBuffer::Iter::next() { |
+ if (fRemaining) { |
+ fRemaining -= this->size(); |
+ fBlock = fBlock->fNext; |
+ } |
+ return fRemaining != 0; |
+} |
+ |
+SkRWBuffer::SkRWBuffer(size_t initialCapacity) : fHead(NULL), fTail(NULL), fTotalUsed(0) {} |
+ |
+SkRWBuffer::~SkRWBuffer() { |
+ fHead->unref(); |
+} |
+ |
+void SkRWBuffer::append(const void* src, size_t length) { |
+ this->validate(); |
+ if (0 == length) { |
+ return; |
+ } |
+ |
+ if (NULL == fHead) { |
+ fHead = SkBufferHead::Alloc(length); |
+ fTail = fHead; // fTail will point into fHead, since fHead is a subclass of Block |
+ } |
+ |
+ size_t written = fTail->append(src, length); |
+ SkASSERT(written <= length); |
+ src = (const char*)src + written; |
+ length -= written; |
+ |
+ if (length) { |
+ SkBufferBlock* block = SkBufferBlock::Alloc(length); |
+ fTail->fNext = block; // does this need to be atomic? |
mtklein
2015/04/25 18:49:53
Which parts of these APIs are meant to be thread s
|
+ fTail = block; |
+ written = fTail->append(src, length); |
+ SkASSERT(written == length); |
+ } |
+ this->validate(); |
+} |
+ |
+#ifdef SK_DEBUG |
+void SkRWBuffer::validate() const { |
+ if (fHead) { |
+ fHead->validate(fTotalUsed, fTail); |
+ } else { |
+ SkASSERT(NULL == fTail); |
+ SkASSERT(0 == fTotalUsed); |
+ } |
+} |
+#endif |
+ |
+SkRBuffer* SkRWBuffer::newRBufferSnapshot() const { |
+ return SkNEW_ARGS(SkRBuffer, (fHead, fTotalUsed)); |
+} |
+ |
+SkData* SkRWBuffer::newDataSnapshot() const { |
+ SkData* data = SkData::NewUninitialized(fTotalUsed); |
+ |
+ const SkBufferBlock* block = fHead; |
+ char* dst = (char*)data->writable_data(); |
+ while (block) { |
+ memcpy(dst, block->startData(), block->fUsed); |
+ dst += block->fUsed; |
+ block = block->fNext; |
+ } |
+ return data; |
+} |
+ |
+#if 0 |
+class SkRBufferStreamAsset : public SkStreamAsset { |
+public: |
+ SkRBufferStreamAsset(const SkRBuffer* buffer) : fBuffer(SkRef(buffer)), fIter(buffer) { |
+ fUsedInCurrIter = 0; |
+ } |
+ |
+ virtual ~SkRBufferStreamAsset() { fBuffer->unref(); } |
+ |
+// SkStreamAsset* duplicate() const override = 0; |
+// SkStreamAsset* fork() const override = 0; |
+ |
+ size_t getLength() const override { return fBuffer->size(); } |
+ |
+ bool rewind() override { |
+ fIter.reset(fBuffer); |
+ fUsedInCurrIter = 0; |
+ return true; |
+ } |
+ |
+ size_t read(void* dst, size_t request) override { |
+ size_t bytesRead = 0; |
+ for (;;) { |
+ size_t size = fIter.size(); |
+ SkASSERT(fUsedInCurrIter <= size); |
+ size_t avail = SkTMin(size - fUsedInCurrIter, request); |
+ if (dst) { |
+ memcpy(dst, (const char*)fIter.data() + fUsedInCurrIter, avail); |
+ dst = (char*)dst + avail; |
+ } |
+ bytesRead += avail; |
+ fUsedInCurrIter += avail; |
+ SkASSERT(bytesRead <= request); |
+ if (bytesRead == request) { |
+ return bytesRead; |
+ } |
+ // If we get here, we've exhausted the current iter |
+ SkASSERT(fUsedInCurrIter == size); |
+ fUsedInCurrIter = 0; |
+ if (!fIter.next()) { |
+ return bytesRead; // ran out of data |
+ } |
+ } |
+ } |
+ |
+private: |
+ const SkRBuffer* fBuffer; |
+ SkRBuffer::Iter fIter; |
+ size_t fUsedInCurrIter; |
+}; |
+ |
+SkStreamAsset* SkRWBuffer::newStreamSnapshot() const { |
+ SkAutoTUnref<SkRBuffer> buffer(this->newRBufferSnapshot()); |
+ return SkNEW_ARGS(SkRBufferStreamAsset, (buffer)); |
+} |
+#endif |