Chromium Code Reviews| Index: src/core/SkRWBuffer.cpp |
| diff --git a/src/core/SkRWBuffer.cpp b/src/core/SkRWBuffer.cpp |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..ec60d5e364483f1ad8155064e7bcb8bb98736a48 |
| --- /dev/null |
| +++ b/src/core/SkRWBuffer.cpp |
| @@ -0,0 +1,340 @@ |
| +/* |
| + * Copyright 2015 Google Inc. |
| + * |
| + * Use of this source code is governed by a BSD-style license that can be |
| + * found in the LICENSE file. |
| + */ |
| + |
| +#include "SkData.h" |
| +#include "SkRWBuffer.h" |
| +#include "SkStream.h" |
| + |
| +struct SkBufferBlock { |
| + SkBufferBlock* fNext; |
| + size_t fUsed; |
| + size_t fCapacity; |
| + |
| + const void* startData() const { return this + 1; }; |
| + |
| + static SkBufferBlock* Alloc(size_t length) { |
| + size_t capacity = LengthToCapacity(length); |
| + SkBufferBlock* block = (SkBufferBlock*)sk_malloc_throw(sizeof(SkBufferBlock) + capacity); |
| + block->fNext = NULL; |
| + block->fUsed = 0; |
| + block->fCapacity = capacity; |
| + return block; |
| + } |
| + |
| + // Return number of bytes actually appended |
| + size_t append(const void* src, size_t length) { |
| + this->validate(); |
| + size_t avail = SkTMin(fCapacity - fUsed, length); |
| + memcpy((char*)(this + 1) + fUsed, src, avail); |
| + fUsed += avail; |
| + this->validate(); |
| + return avail; |
| + } |
| + |
| + void validate() const { |
| +#ifdef SK_DEBUG |
| + SkASSERT(fCapacity > 0); |
| + SkASSERT(fUsed <= fCapacity); |
| +#endif |
| + } |
| + |
| +private: |
| + static size_t LengthToCapacity(size_t length) { |
| + const size_t minSize = 4096 - sizeof(SkBufferBlock); |
| + return SkTMax(length, minSize); |
| + } |
| +}; |
| + |
| +struct SkBufferHead { |
| + mutable int32_t fRefCnt; |
| + SkBufferBlock fBlock; |
| + |
| + static size_t LengthToCapacity(size_t length) { |
| + const size_t minSize = 4096 - sizeof(SkBufferHead); |
| + return SkTMax(length, minSize); |
| + } |
| + |
| + static SkBufferHead* Alloc(size_t length) { |
| + size_t capacity = LengthToCapacity(length); |
| + size_t size = sizeof(SkBufferHead) + capacity; |
| + SkBufferHead* head = (SkBufferHead*)sk_malloc_throw(size); |
| + head->fRefCnt = 1; |
| + head->fBlock.fNext = NULL; |
| + head->fBlock.fUsed = 0; |
| + head->fBlock.fCapacity = capacity; |
| + return head; |
| + } |
| + |
| + void ref() const { |
| + SkASSERT(fRefCnt > 0); |
| + sk_atomic_inc(&fRefCnt); |
| + } |
| + |
| + void unref() const { |
| + SkASSERT(fRefCnt > 0); |
| + // A release here acts in place of all releases we "should" have been doing in ref(). |
| + if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) { |
| + // Like unique(), the acquire is only needed on success. |
| + SkBufferBlock* block = fBlock.fNext; |
| + sk_free((void*)this); |
| + while (block) { |
| + SkBufferBlock* next = block->fNext; |
| + sk_free(block); |
| + block = next; |
| + } |
| + SkDebugf("done\n"); |
| + } |
| + } |
| + |
| + void validate(size_t minUsed, SkBufferBlock* tail = NULL) const { |
| +#ifdef SK_DEBUG |
| + SkASSERT(fRefCnt > 0); |
| + size_t totalUsed = 0; |
| + const SkBufferBlock* block = &fBlock; |
| + const SkBufferBlock* lastBlock = block; |
| + while (block) { |
| + block->validate(); |
| + totalUsed += block->fUsed; |
| + lastBlock = block; |
| + block = block->fNext; |
| + } |
| + SkASSERT(minUsed <= totalUsed); |
| + if (tail) { |
| + SkASSERT(tail == lastBlock); |
| + } |
| +#endif |
| + } |
| +}; |
| + |
| +SkRBuffer::SkRBuffer(const SkBufferHead* head, size_t used) : fHead(SkRef(head)), fUsed(used) { |
| + if (head) { |
| + SkASSERT(used > 0); |
| + head->validate(used); |
| + } else { |
| + SkASSERT(0 == used); |
| + } |
| +} |
| + |
| +SkRBuffer::~SkRBuffer() { |
| + if (fHead) { |
| + fHead->validate(fUsed); |
| + fHead->unref(); |
| + } |
| +} |
| + |
| +SkRBuffer::Iter::Iter(const SkRBuffer* buffer) { |
| + this->reset(buffer); |
| +} |
| + |
| +void SkRBuffer::Iter::reset(const SkRBuffer* buffer) { |
| + if (buffer) { |
| + fBlock = &buffer->fHead->fBlock; |
| + fRemaining = buffer->fUsed; |
| + } else { |
| + fBlock = NULL; |
| + fRemaining = 0; |
| + } |
| +} |
| + |
| +const void* SkRBuffer::Iter::data() const { |
| + return fRemaining ? fBlock->startData() : NULL; |
| +} |
| + |
| +size_t SkRBuffer::Iter::size() const { |
| + return SkTMin(fBlock->fUsed, fRemaining); |
| +} |
| + |
| +bool SkRBuffer::Iter::next() { |
| + if (fRemaining) { |
| + fRemaining -= this->size(); |
| + fBlock = fBlock->fNext; |
| + } |
| + return fRemaining != 0; |
| +} |
| + |
| +SkRWBuffer::SkRWBuffer(size_t initialCapacity) : fHead(NULL), fTail(NULL), fTotalUsed(0) {} |
| + |
| +SkRWBuffer::~SkRWBuffer() { |
| + this->validate(); |
| + fHead->unref(); |
| +} |
| + |
| +void SkRWBuffer::append(const void* src, size_t length) { |
| + this->validate(); |
| + if (0 == length) { |
| + return; |
| + } |
| + |
| + fTotalUsed += length; |
| + |
| + if (NULL == fHead) { |
| + fHead = SkBufferHead::Alloc(length); |
| + fTail = &fHead->fBlock; |
| + } |
| + |
| + size_t written = fTail->append(src, length); |
| + SkASSERT(written <= length); |
| + src = (const char*)src + written; |
| + length -= written; |
| + |
| + if (length) { |
| + SkBufferBlock* block = SkBufferBlock::Alloc(length); |
| + fTail->fNext = block; |
| + fTail = block; |
| + written = fTail->append(src, length); |
| + SkASSERT(written == length); |
| + } |
| + this->validate(); |
| +} |
| + |
| +#ifdef SK_DEBUG |
| +void SkRWBuffer::validate() const { |
| + if (fHead) { |
| + fHead->validate(fTotalUsed, fTail); |
| + } else { |
| + SkASSERT(NULL == fTail); |
| + SkASSERT(0 == fTotalUsed); |
| + } |
| +} |
| +#endif |
| + |
| +SkRBuffer* SkRWBuffer::newRBufferSnapshot() const { |
| + return SkNEW_ARGS(SkRBuffer, (fHead, fTotalUsed)); |
| +} |
| + |
| +SkData* SkRWBuffer::newDataSnapshot() const { |
| + SkData* data = SkData::NewUninitialized(fTotalUsed); |
| + |
| + const SkBufferBlock* block = &fHead->fBlock; |
| + char* dst = (char*)data->writable_data(); |
| + while (block) { |
| + memcpy(dst, block->startData(), block->fUsed); |
| + dst += block->fUsed; |
| + block = block->fNext; |
| + } |
| + return data; |
| +} |
| + |
| +/////////////////////////////////////////////////////////////////////////////////////////////////// |
| + |
| +class SkRBufferStreamAsset : public SkStreamAsset { |
|
bungeman-skia
2015/04/27 14:52:42
You may want to take a look at SkBlockMemoryStream
reed2
2015/04/28 02:25:00
Acknowledged.
|
| + void validate() const { |
| +#ifdef SK_DEBUG |
| + SkASSERT(fGlobalOffset <= fBuffer->size()); |
| + SkASSERT(fLocalOffset <= fIter.size()); |
| + SkASSERT(fLocalOffset <= fGlobalOffset); |
| +#endif |
| + } |
| + |
| + class AutoValidate { |
| + SkRBufferStreamAsset* fStream; |
| + public: |
| + AutoValidate(SkRBufferStreamAsset* stream) : fStream(stream) { stream->validate(); } |
| + ~AutoValidate() { fStream->validate(); } |
| + }; |
| + |
| +#ifdef SK_DEBUG |
| + #define AUTO_VALIDATE AutoValidate av(this); |
| +#else |
| + #define AUTO_VALIDATE |
| +#endif |
| + |
| +public: |
| + SkRBufferStreamAsset(const SkRBuffer* buffer) : fBuffer(SkRef(buffer)), fIter(buffer) { |
| + fGlobalOffset = fLocalOffset = 0; |
| + } |
| + |
| + virtual ~SkRBufferStreamAsset() { fBuffer->unref(); } |
| + |
| + size_t getLength() const override { return fBuffer->size(); } |
| + |
| + bool rewind() override { |
| + AUTO_VALIDATE |
| + fIter.reset(fBuffer); |
| + fGlobalOffset = fLocalOffset = 0; |
| + return true; |
| + } |
| + |
| + size_t read(void* dst, size_t request) override { |
| + AUTO_VALIDATE |
| + size_t bytesRead = 0; |
| + for (;;) { |
| + size_t size = fIter.size(); |
| + SkASSERT(fLocalOffset <= size); |
| + size_t avail = SkTMin(size - fLocalOffset, request - bytesRead); |
| + if (dst) { |
| + memcpy(dst, (const char*)fIter.data() + fLocalOffset, avail); |
| + dst = (char*)dst + avail; |
| + } |
| + bytesRead += avail; |
| + fLocalOffset += avail; |
| + SkASSERT(bytesRead <= request); |
| + if (bytesRead == request) { |
| + break; |
| + } |
| + // If we get here, we've exhausted the current iter |
| + SkASSERT(fLocalOffset == size); |
| + fLocalOffset = 0; |
| + if (!fIter.next()) { |
| + break; // ran out of data |
| + } |
| + } |
| + fGlobalOffset += bytesRead; |
| + SkASSERT(fGlobalOffset <= fBuffer->size()); |
| + return bytesRead; |
| + } |
| + |
| + bool isAtEnd() const override { |
| + return fBuffer->size() == fGlobalOffset; |
| + } |
| + |
| + SkStreamAsset* duplicate() const override { |
| + return SkNEW_ARGS(SkRBufferStreamAsset, (fBuffer)); |
| + } |
| + |
| + size_t getPosition() const { |
| + return fGlobalOffset; |
| + } |
| + |
| + bool seek(size_t position) { |
| + AUTO_VALIDATE |
| + if (position < fGlobalOffset) { |
| + this->rewind(); |
| + } |
| + (void)this->skip(position - fGlobalOffset); |
| + return true; |
| + } |
| + |
| + bool move(long offset) { |
| + AUTO_VALIDATE |
| + offset += fGlobalOffset; |
| + if (offset <= 0) { |
| + this->rewind(); |
| + } else { |
| + (void)this->seek(SkToSizeT(offset)); |
| + } |
| + return true; |
| + } |
| + |
| + SkStreamAsset* fork() const override { |
| + SkStreamAsset* clone = this->duplicate(); |
| + clone->seek(this->getPosition()); |
| + return clone; |
| + } |
| + |
| + |
| +private: |
| + const SkRBuffer* fBuffer; |
| + SkRBuffer::Iter fIter; |
| + size_t fLocalOffset; |
| + size_t fGlobalOffset; |
| +}; |
| + |
| +SkStreamAsset* SkRWBuffer::newStreamSnapshot() const { |
| + SkAutoTUnref<SkRBuffer> buffer(this->newRBufferSnapshot()); |
| + return SkNEW_ARGS(SkRBufferStreamAsset, (buffer)); |
| +} |