Chromium Code Reviews| Index: src/core/SkMallocPixelRef.cpp |
| diff --git a/src/core/SkMallocPixelRef.cpp b/src/core/SkMallocPixelRef.cpp |
| index f229e9de34d0fa8dbdd29304dd0ac0f7a78e5611..69fde2c17e60f8196c145f33e9754fefb347147f 100644 |
| --- a/src/core/SkMallocPixelRef.cpp |
| +++ b/src/core/SkMallocPixelRef.cpp |
| @@ -1,27 +1,105 @@ |
| - |
| /* |
| * Copyright 2011 Google Inc. |
| * |
| * Use of this source code is governed by a BSD-style license that can be |
| * found in the LICENSE file. |
| */ |
| + |
| #include "SkMallocPixelRef.h" |
| #include "SkBitmap.h" |
| #include "SkFlattenableBuffers.h" |
| -SkMallocPixelRef::SkMallocPixelRef(void* storage, size_t size, |
| - SkColorTable* ctable, bool ownPixels) { |
| - if (NULL == storage) { |
| - SkASSERT(ownPixels); |
| - storage = sk_malloc_throw(size); |
| +static bool check_info(const SkImageInfo& info, SkColorTable* ctable) { |
| + if (info.fWidth < 0 || |
| + info.fHeight < 0 || |
| + (unsigned)info.fColorType > (unsigned)kLastEnum_SkColorType || |
| + (unsigned)info.fAlphaType > (unsigned)kLastEnum_SkAlphaType) |
| + { |
| + return false; |
| + } |
| + |
| + // these seem like good checks, but currently we have (at least) tests |
| + // that expect the pixelref to succeed even when there is a mismatch |
| + // with colortables. fix? |
| +#if 0 |
| + if (kIndex8_SkColorType == info.fColorType && NULL == ctable) { |
| + return false; |
| + } |
| + if (kIndex8_SkColorType != info.fColorType && NULL != ctable) { |
| + return false; |
| + } |
| +#endif |
| + return true; |
| +} |
| + |
| +SkMallocPixelRef* SkMallocPixelRef::NewDirect(const SkImageInfo& info, |
|
mtklein
2013/12/04 22:46:00
I would assert that this may be better thought of
reed1
2013/12/05 13:32:39
Agreed. May move this kind of factory out to SkPix
|
| + void* addr, |
| + size_t rowBytes, |
| + SkColorTable* ctable) { |
| + if (!check_info(info, ctable)) { |
| + return NULL; |
| + } |
| + return SkNEW_ARGS(SkMallocPixelRef, (info, addr, rowBytes, ctable, false)); |
| +} |
| + |
| +SkMallocPixelRef* SkMallocPixelRef::NewAllocate(const SkImageInfo& info, |
| + size_t requestedRowBytes, |
| + SkColorTable* ctable) { |
| + if (!check_info(info, ctable)) { |
| + return NULL; |
| + } |
| + |
| + int32_t minRB = info.minRowBytes(); |
| + if (minRB < 0) { |
| + return NULL; // allocation will be too large |
| + } |
| + if (requestedRowBytes > 0 && (int32_t)requestedRowBytes < minRB) { |
| + return NULL; // cannot meet requested rowbytes |
| + } |
| + |
| + int32_t rowBytes; |
| + if (requestedRowBytes) { |
| + rowBytes = requestedRowBytes; |
| + } else { |
| + rowBytes = minRB; |
| } |
| + |
| + Sk64 bigSize; |
| + bigSize.setMul(info.fHeight, rowBytes); |
| + if (!bigSize.is32()) { |
| + return NULL; |
| + } |
| + |
| + size_t size = bigSize.get32(); |
| + void* addr = sk_malloc_flags(size, 0); |
| + if (NULL == addr) { |
| + return NULL; |
| + } |
| + |
| + return SkNEW_ARGS(SkMallocPixelRef, (info, addr, rowBytes, ctable, true)); |
| +} |
| + |
| +/////////////////////////////////////////////////////////////////////////////// |
| + |
| +SkMallocPixelRef::SkMallocPixelRef(const SkImageInfo& info, void* storage, |
| + size_t rowBytes, SkColorTable* ctable, |
| + bool ownsPixels) |
| + : fOwnPixels(ownsPixels) |
| +{ |
| + SkASSERT(check_info(info, ctable)); |
| + SkASSERT(rowBytes >= info.minRowBytes()); |
| + |
| + if (kIndex8_SkColorType != info.fColorType) { |
| + ctable = NULL; |
| + } |
| + |
| fStorage = storage; |
| - fSize = size; |
| fCTable = ctable; |
| + fRB = rowBytes; |
| + fInfo = info; |
| SkSafeRef(ctable); |
| - fOwnPixels = ownPixels; |
| - |
| - this->setPreLocked(fStorage, fCTable); |
| + |
| + this->setPreLocked(fInfo, fStorage, fRB, fCTable); |
| } |
| SkMallocPixelRef::~SkMallocPixelRef() { |
| @@ -31,9 +109,16 @@ SkMallocPixelRef::~SkMallocPixelRef() { |
| } |
| } |
| -void* SkMallocPixelRef::onLockPixels(SkColorTable** ct) { |
| - *ct = fCTable; |
| - return fStorage; |
| +bool SkMallocPixelRef::onGetInfo(SkImageInfo* info) { |
| + *info = fInfo; |
| + return true; |
| +} |
| + |
| +bool SkMallocPixelRef::onNewLockPixels(LockRec* rec) { |
| + rec->fPixels = fStorage; |
| + rec->fRowBytes = fRB; |
| + rec->fColorTable = fCTable; |
| + return true; |
| } |
| void SkMallocPixelRef::onUnlockPixels() { |
| @@ -43,7 +128,13 @@ void SkMallocPixelRef::onUnlockPixels() { |
| void SkMallocPixelRef::flatten(SkFlattenableWriteBuffer& buffer) const { |
| this->INHERITED::flatten(buffer); |
| - buffer.writeByteArray(fStorage, fSize); |
| + fInfo.flatten(buffer); |
| + buffer.write32(fRB); |
| + |
| + // TODO: replace this bulk write with a chunky one that can trim off any |
| + // trailing bytes on each scanline (in case rowbytes > width*size) |
| + size_t size = this->computeMinSize(); |
| + buffer.writeByteArray(fStorage, size); |
| buffer.writeBool(fCTable != NULL); |
| if (fCTable) { |
| fCTable->writeToBuffer(buffer); |
| @@ -51,16 +142,19 @@ void SkMallocPixelRef::flatten(SkFlattenableWriteBuffer& buffer) const { |
| } |
| SkMallocPixelRef::SkMallocPixelRef(SkFlattenableReadBuffer& buffer) |
| - : INHERITED(buffer, NULL) { |
| - fSize = buffer.getArrayCount(); |
| - fStorage = sk_malloc_throw(fSize); |
| - buffer.readByteArray(fStorage, fSize); |
| + : INHERITED(buffer, NULL) |
| + , fOwnPixels(true) |
| +{ |
| + fInfo.unflatten(buffer); |
| + fRB = buffer.read32(); |
| + size_t size = this->computeMinSize(); |
| + fStorage = sk_malloc_throw(size); |
| + buffer.readByteArray(fStorage, size); |
| if (buffer.readBool()) { |
| fCTable = SkNEW_ARGS(SkColorTable, (buffer)); |
| } else { |
| fCTable = NULL; |
| } |
| - fOwnPixels = true; |
| - this->setPreLocked(fStorage, fCTable); |
| + this->setPreLocked(fInfo, fStorage, fRB, fCTable); |
| } |