Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 /* | |
| 2 * Copyright 2015 Google Inc. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license that can be | |
| 5 * found in the LICENSE file. | |
| 6 */ | |
| 7 | |
| 8 #include "SkData.h" | |
| 9 #include "SkRWBuffer.h" | |
| 10 #include "SkStream.h" | |
| 11 | |
| 12 struct SkBufferBlock { | |
| 13 SkBufferBlock* fNext; | |
| 14 size_t fUsed; | |
| 15 size_t fCapacity; | |
| 16 | |
| 17 const void* startData() const { return this + 1; }; | |
| 18 | |
| 19 static SkBufferBlock* Alloc(size_t length) { | |
| 20 size_t capacity = LengthToCapacity(length); | |
| 21 SkBufferBlock* block = (SkBufferBlock*)sk_malloc_throw(sizeof(SkBufferBl ock) + capacity); | |
| 22 block->fNext = NULL; | |
| 23 block->fUsed = 0; | |
| 24 block->fCapacity = capacity; | |
| 25 return block; | |
| 26 } | |
| 27 | |
| 28 // Return number of bytes actually appended | |
| 29 size_t append(const void* src, size_t length) { | |
| 30 this->validate(); | |
| 31 size_t avail = SkTMin(fCapacity - fUsed, length); | |
| 32 memcpy((char*)(this + 1) + fUsed, src, avail); | |
| 33 fUsed += avail; | |
| 34 this->validate(); | |
| 35 return avail; | |
| 36 } | |
| 37 | |
| 38 void validate() const { | |
| 39 #ifdef SK_DEBUG | |
| 40 SkASSERT(fCapacity > 0); | |
| 41 SkASSERT(fUsed <= fCapacity); | |
| 42 #endif | |
| 43 } | |
| 44 | |
| 45 private: | |
| 46 static size_t LengthToCapacity(size_t length) { | |
| 47 const size_t minSize = 4096 - sizeof(SkBufferBlock); | |
| 48 return SkTMax(length, minSize); | |
| 49 } | |
| 50 }; | |
| 51 | |
| 52 struct SkBufferHead : public SkBufferBlock { | |
| 53 mutable int32_t fRefCnt; | |
|
mtklein
2015/04/25 18:49:53
How come we're not using SkRefCnt / SkNVRefCnt?
| |
| 54 | |
| 55 static size_t LengthToCapacity(size_t length) { | |
| 56 const size_t minSize = 4096 - sizeof(SkBufferHead); | |
| 57 return SkTMax(length, minSize); | |
| 58 } | |
| 59 | |
| 60 static SkBufferHead* Alloc(size_t length) { | |
| 61 size_t capacity = LengthToCapacity(length); | |
| 62 size_t size = sizeof(SkBufferHead) + capacity; | |
| 63 SkBufferHead* head = (SkBufferHead*)sk_malloc_throw(size); | |
| 64 head->fRefCnt = 1; | |
| 65 // init Block | |
| 66 head->fNext = NULL; | |
| 67 head->fUsed = 0; | |
| 68 head->fCapacity = capacity; | |
| 69 return head; | |
| 70 } | |
| 71 | |
| 72 void ref() const { | |
| 73 SkASSERT(fRefCnt > 0); | |
| 74 sk_atomic_inc(&fRefCnt); | |
| 75 } | |
| 76 | |
| 77 void unref() const { | |
| 78 SkASSERT(fRefCnt > 0); | |
| 79 // A release here acts in place of all releases we "should" have been do ing in ref(). | |
| 80 if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) { | |
| 81 // Like unique(), the acquire is only needed on success. | |
| 82 SkBufferBlock* block = fNext; | |
| 83 sk_free((void*)this); | |
| 84 while (block) { | |
| 85 SkBufferBlock* next = block->fNext; | |
| 86 sk_free(block); | |
| 87 block = next; | |
| 88 } | |
| 89 } | |
| 90 } | |
| 91 | |
| 92 void validate(size_t minUsed, SkBufferBlock* tail = NULL) const { | |
| 93 #ifdef SK_DEBUG | |
| 94 SkASSERT(fRefCnt > 0); | |
| 95 size_t totalUsed = 0; | |
| 96 const SkBufferBlock* block = this; | |
| 97 const SkBufferBlock* lastBlock = block; | |
| 98 while (block) { | |
| 99 block->validate(); | |
| 100 totalUsed += block->fUsed; | |
| 101 lastBlock = block; | |
| 102 block = block->fNext; | |
| 103 } | |
| 104 SkASSERT(minUsed <= totalUsed); | |
| 105 if (tail) { | |
| 106 SkASSERT(tail == lastBlock); | |
| 107 } | |
| 108 #endif | |
| 109 } | |
| 110 }; | |
| 111 | |
| 112 SkRBuffer::SkRBuffer(const SkBufferHead* head, size_t used) : fHead(head), fUsed (used) { | |
| 113 if (head) { | |
| 114 SkASSERT(used > 0); | |
| 115 head->validate(used); | |
| 116 } else { | |
| 117 SkASSERT(0 == used); | |
| 118 } | |
| 119 } | |
| 120 | |
| 121 SkRBuffer::~SkRBuffer() { | |
| 122 if (fHead) { | |
| 123 fHead->validate(fUsed); | |
| 124 fHead->unref(); | |
| 125 } | |
| 126 } | |
| 127 | |
| 128 SkRBuffer::Iter::Iter(const SkRBuffer* buffer) { | |
| 129 this->reset(buffer); | |
| 130 } | |
| 131 | |
| 132 void SkRBuffer::Iter::reset(const SkRBuffer* buffer) { | |
| 133 if (buffer) { | |
| 134 fBlock = buffer->fHead; | |
| 135 fRemaining = buffer->fUsed; | |
| 136 } else { | |
| 137 fBlock = NULL; | |
| 138 fRemaining = 0; | |
| 139 } | |
| 140 } | |
| 141 | |
| 142 const void* SkRBuffer::Iter::data() const { | |
| 143 return fRemaining ? fBlock->startData() : NULL; | |
| 144 } | |
| 145 | |
| 146 size_t SkRBuffer::Iter::size() const { | |
| 147 return SkTMin(fBlock->fUsed, fRemaining); | |
| 148 } | |
| 149 | |
| 150 bool SkRBuffer::Iter::next() { | |
| 151 if (fRemaining) { | |
| 152 fRemaining -= this->size(); | |
| 153 fBlock = fBlock->fNext; | |
| 154 } | |
| 155 return fRemaining != 0; | |
| 156 } | |
| 157 | |
| 158 SkRWBuffer::SkRWBuffer(size_t initialCapacity) : fHead(NULL), fTail(NULL), fTota lUsed(0) {} | |
| 159 | |
| 160 SkRWBuffer::~SkRWBuffer() { | |
| 161 fHead->unref(); | |
| 162 } | |
| 163 | |
| 164 void SkRWBuffer::append(const void* src, size_t length) { | |
| 165 this->validate(); | |
| 166 if (0 == length) { | |
| 167 return; | |
| 168 } | |
| 169 | |
| 170 if (NULL == fHead) { | |
| 171 fHead = SkBufferHead::Alloc(length); | |
| 172 fTail = fHead; // fTail will point into fHead, since fHead is a subclas s of Block | |
| 173 } | |
| 174 | |
| 175 size_t written = fTail->append(src, length); | |
| 176 SkASSERT(written <= length); | |
| 177 src = (const char*)src + written; | |
| 178 length -= written; | |
| 179 | |
| 180 if (length) { | |
| 181 SkBufferBlock* block = SkBufferBlock::Alloc(length); | |
| 182 fTail->fNext = block; // does this need to be atomic? | |
|
mtklein
2015/04/25 18:49:53
Which parts of these APIs are meant to be thread s
| |
| 183 fTail = block; | |
| 184 written = fTail->append(src, length); | |
| 185 SkASSERT(written == length); | |
| 186 } | |
| 187 this->validate(); | |
| 188 } | |
| 189 | |
| 190 #ifdef SK_DEBUG | |
| 191 void SkRWBuffer::validate() const { | |
| 192 if (fHead) { | |
| 193 fHead->validate(fTotalUsed, fTail); | |
| 194 } else { | |
| 195 SkASSERT(NULL == fTail); | |
| 196 SkASSERT(0 == fTotalUsed); | |
| 197 } | |
| 198 } | |
| 199 #endif | |
| 200 | |
| 201 SkRBuffer* SkRWBuffer::newRBufferSnapshot() const { | |
| 202 return SkNEW_ARGS(SkRBuffer, (fHead, fTotalUsed)); | |
| 203 } | |
| 204 | |
| 205 SkData* SkRWBuffer::newDataSnapshot() const { | |
| 206 SkData* data = SkData::NewUninitialized(fTotalUsed); | |
| 207 | |
| 208 const SkBufferBlock* block = fHead; | |
| 209 char* dst = (char*)data->writable_data(); | |
| 210 while (block) { | |
| 211 memcpy(dst, block->startData(), block->fUsed); | |
| 212 dst += block->fUsed; | |
| 213 block = block->fNext; | |
| 214 } | |
| 215 return data; | |
| 216 } | |
| 217 | |
| 218 #if 0 | |
| 219 class SkRBufferStreamAsset : public SkStreamAsset { | |
| 220 public: | |
| 221 SkRBufferStreamAsset(const SkRBuffer* buffer) : fBuffer(SkRef(buffer)), fIte r(buffer) { | |
| 222 fUsedInCurrIter = 0; | |
| 223 } | |
| 224 | |
| 225 virtual ~SkRBufferStreamAsset() { fBuffer->unref(); } | |
| 226 | |
| 227 // SkStreamAsset* duplicate() const override = 0; | |
| 228 // SkStreamAsset* fork() const override = 0; | |
| 229 | |
| 230 size_t getLength() const override { return fBuffer->size(); } | |
| 231 | |
| 232 bool rewind() override { | |
| 233 fIter.reset(fBuffer); | |
| 234 fUsedInCurrIter = 0; | |
| 235 return true; | |
| 236 } | |
| 237 | |
| 238 size_t read(void* dst, size_t request) override { | |
| 239 size_t bytesRead = 0; | |
| 240 for (;;) { | |
| 241 size_t size = fIter.size(); | |
| 242 SkASSERT(fUsedInCurrIter <= size); | |
| 243 size_t avail = SkTMin(size - fUsedInCurrIter, request); | |
| 244 if (dst) { | |
| 245 memcpy(dst, (const char*)fIter.data() + fUsedInCurrIter, avail); | |
| 246 dst = (char*)dst + avail; | |
| 247 } | |
| 248 bytesRead += avail; | |
| 249 fUsedInCurrIter += avail; | |
| 250 SkASSERT(bytesRead <= request); | |
| 251 if (bytesRead == request) { | |
| 252 return bytesRead; | |
| 253 } | |
| 254 // If we get here, we've exhausted the current iter | |
| 255 SkASSERT(fUsedInCurrIter == size); | |
| 256 fUsedInCurrIter = 0; | |
| 257 if (!fIter.next()) { | |
| 258 return bytesRead; // ran out of data | |
| 259 } | |
| 260 } | |
| 261 } | |
| 262 | |
| 263 private: | |
| 264 const SkRBuffer* fBuffer; | |
| 265 SkRBuffer::Iter fIter; | |
| 266 size_t fUsedInCurrIter; | |
| 267 }; | |
| 268 | |
| 269 SkStreamAsset* SkRWBuffer::newStreamSnapshot() const { | |
| 270 SkAutoTUnref<SkRBuffer> buffer(this->newRBufferSnapshot()); | |
| 271 return SkNEW_ARGS(SkRBufferStreamAsset, (buffer)); | |
| 272 } | |
| 273 #endif | |
| OLD | NEW |