| Index: src/gpu/gl/GrGLGpu.cpp
|
| diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
|
| index 96b1625dd21f297fb5a8f1a22266d67eaa6a17fc..3d843f6624c4401d95e8c9c4332a270165b40802 100644
|
| --- a/src/gpu/gl/GrGLGpu.cpp
|
| +++ b/src/gpu/gl/GrGLGpu.cpp
|
| @@ -20,8 +20,10 @@
|
| #include "builders/GrGLShaderStringBuilder.h"
|
| #include "glsl/GrGLSL.h"
|
| #include "glsl/GrGLSLCaps.h"
|
| +#include "SkMipMapLevel.h"
|
| #include "SkStrokeRec.h"
|
| #include "SkTemplates.h"
|
| +#include "SkTypes.h"
|
|
|
| #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
|
| #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
|
| @@ -38,6 +40,12 @@
|
| #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
|
| #endif
|
|
|
| +#if defined(GOOGLE3)
|
| + // Stack frame size is limited in GOOGLE3.
|
| + typedef SkAutoSMalloc<64 * 128> SkAutoSMallocTexels;
|
| +#else
|
| + typedef SkAutoSMalloc<128 * 128> SkAutoSMallocTexels;
|
| +#endif
|
|
|
| ///////////////////////////////////////////////////////////////////////////////
|
|
|
| @@ -525,7 +533,7 @@ GrRenderTarget* GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe
|
| case kBorrow_GrWrapOwnership:
|
| idDesc.fLifeCycle = GrGpuResource::kBorrowed_LifeCycle;
|
| break;
|
| - }
|
| + }
|
| idDesc.fSampleConfig = GrRenderTarget::kUnified_SampleConfig;
|
|
|
| GrSurfaceDesc desc;
|
| @@ -541,7 +549,7 @@ GrRenderTarget* GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe
|
|
|
| ////////////////////////////////////////////////////////////////////////////////
|
| bool GrGLGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
|
| - size_t rowBytes, GrPixelConfig srcConfig,
|
| + GrPixelConfig srcConfig,
|
| DrawPreference* drawPreference,
|
| WritePixelTempDrawInfo* tempDrawInfo) {
|
| if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
|
| @@ -626,8 +634,8 @@ static bool check_write_and_transfer_input(GrGLTexture* glTex, GrSurface* surfac
|
|
|
| bool GrGLGpu::onWritePixels(GrSurface* surface,
|
| int left, int top, int width, int height,
|
| - GrPixelConfig config, const void* buffer,
|
| - size_t rowBytes) {
|
| + GrPixelConfig config,
|
| + const SkTArray<SkMipMapLevel>& texels) {
|
| GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
|
|
|
| if (!check_write_and_transfer_input(glTex, surface, config)) {
|
| @@ -641,19 +649,14 @@ bool GrGLGpu::onWritePixels(GrSurface* surface,
|
| if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
|
| // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixels()
|
| SkASSERT(config == glTex->desc().fConfig);
|
| - success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), buffer,
|
| + success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), texels,
|
| kWrite_UploadType, left, top, width, height);
|
| } else {
|
| success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_UploadType,
|
| - left, top, width, height, config, buffer, rowBytes);
|
| + left, top, width, height, config, texels);
|
| }
|
|
|
| - if (success) {
|
| - glTex->texturePriv().dirtyMipMaps(true);
|
| - return true;
|
| - }
|
| -
|
| - return false;
|
| + return success;
|
| }
|
|
|
| bool GrGLGpu::onTransferPixels(GrSurface* surface,
|
| @@ -682,8 +685,11 @@ bool GrGLGpu::onTransferPixels(GrSurface* surface,
|
| GL_CALL(BindBuffer(glBuffer->bufferType(), glBuffer->bufferID()));
|
|
|
| bool success = false;
|
| + SkMipMapLevel mipLevel(buffer, rowBytes, width, height);
|
| + SkSTArray<1, SkMipMapLevel> texels;
|
| + texels.push_back(mipLevel);
|
| success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_UploadType,
|
| - left, top, width, height, config, buffer, rowBytes);
|
| + left, top, width, height, config, texels);
|
|
|
| if (success) {
|
| glTex->texturePriv().dirtyMipMaps(true);
|
| @@ -723,35 +729,164 @@ static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc,
|
| }
|
| }
|
|
|
| +/**
|
| + * Creates storage space for the texture and fills it with texels.
|
| + *
|
| + * @param desc The surface descriptor for the texture being created.
|
| + * @param interface The GL interface in use.
|
| + * @param useTexStorage The result of a call to can_use_tex_storage().
|
| + * @param internalFormat The data format used for the internal storage of the texture.
|
| + * @param externalFormat The data format used for the external storage of the texture.
|
| + * @param externalType The type of the data used for the external storage of the texture.
|
| + * @param texels The texel data of the texture being created.
|
| + * @param succeeded Set to true if allocating and populating the texture completed
|
| + * without error.
|
| + */
|
| +static void allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc,
|
| + const GrGLInterface& interface,
|
| + GrGLenum target,
|
| + GrGLenum internalFormat,
|
| + GrGLenum externalFormat,
|
| + GrGLenum externalType,
|
| + const SkTArray<SkMipMapLevel>& texels,
|
| + bool* succeeded) {
|
| + CLEAR_ERROR_BEFORE_ALLOC(&interface);
|
| + *succeeded = true;
|
| + for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
|
| + const void* currentMipData = texels[currentMipLevel].fTexelsOrOffset;
|
| + // Even if curremtMipData is nullptr, continue to call TexImage2D.
|
| + // This will allocate texture memory which we can later populate.
|
| + GL_ALLOC_CALL(&interface,
|
| + TexImage2D(target,
|
| + currentMipLevel,
|
| + internalFormat,
|
| + texels[currentMipLevel].fWidth,
|
| + texels[currentMipLevel].fHeight,
|
| + 0, // border
|
| + externalFormat, externalType,
|
| + currentMipData));
|
| + GrGLenum error = check_alloc_error(desc, &interface);
|
| + if (error != GR_GL_NO_ERROR) {
|
| + *succeeded = false;
|
| + break;
|
| + }
|
| + }
|
| +}
|
| +
|
| +/**
|
| + * Creates storage space for the texture and fills it with texels.
|
| + *
|
| + * @param desc The surface descriptor for the texture being created.
|
| + * @param interface The GL interface in use.
|
| + * @param useTexStorage The result of a call to can_use_tex_storage().
|
| + * @param internalFormat The data format used for the internal storage of the texture.
|
| + * @param texels The texel data of the texture being created.
|
| + */
|
| +static bool allocate_and_populate_compressed_texture(const GrSurfaceDesc& desc,
|
| + const GrGLInterface& interface,
|
| + GrGLenum target, GrGLenum internalFormat,
|
| + const SkTArray<SkMipMapLevel>& texels) {
|
| + CLEAR_ERROR_BEFORE_ALLOC(&interface);
|
| + for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
|
| + int width = texels[currentMipLevel].fWidth;
|
| + int height = texels[currentMipLevel].fHeight;
|
| +
|
| + // Make sure that the width and height that we pass to OpenGL
|
| + // is a multiple of the block size.
|
| + size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height);
|
| +
|
| + GL_ALLOC_CALL(&interface,
|
| + CompressedTexImage2D(target,
|
| + currentMipLevel,
|
| + internalFormat,
|
| + width,
|
| + height,
|
| + 0, // border
|
| + SkToInt(dataSize),
|
| + texels[currentMipLevel].fTexelsOrOffset));
|
| +
|
| + GrGLenum error = check_alloc_error(desc, &interface);
|
| + if (error != GR_GL_NO_ERROR) {
|
| + return false;
|
| + }
|
| + }
|
| +
|
| + return true;
|
| +}
|
| +
|
| +/**
|
| + * After a texture is created, any state which was altered during its creation
|
| + * needs to be restored.
|
| + *
|
| + * @param interface The GL interface to use.
|
| + * @param caps The capabilities of the GL device.
|
| + * @param restoreGLRowLength Should the row length unpacking be restored?
|
| + * @param glFlipY Did GL flip the texture vertically?
|
| + */
|
| +static void restore_pixelstore_state(const GrGLInterface& interface, const GrGLCaps& caps,
|
| + bool restoreGLRowLength, bool glFlipY) {
|
| + if (restoreGLRowLength) {
|
| + SkASSERT(caps.unpackRowLengthSupport());
|
| + GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
|
| + }
|
| + if (glFlipY) {
|
| + GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
|
| + }
|
| +}
|
| +
|
| bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
|
| GrGLenum target,
|
| UploadType uploadType,
|
| int left, int top, int width, int height,
|
| GrPixelConfig dataConfig,
|
| - const void* dataOrOffset,
|
| - size_t rowBytes) {
|
| - SkASSERT(dataOrOffset || kNewTexture_UploadType == uploadType ||
|
| - kTransfer_UploadType == uploadType);
|
| -
|
| + const SkTArray<SkMipMapLevel>& texels) {
|
| // If we're uploading compressed data then we should be using uploadCompressedTexData
|
| SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
|
|
|
| SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
|
|
|
| - size_t bpp = GrBytesPerPixel(dataConfig);
|
| - if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
|
| - &width, &height, &dataOrOffset, &rowBytes)) {
|
| - return false;
|
| + // texels is const.
|
| + // But we may need to flip the texture vertically to prepare it.
|
| + // Rather than flip in place and alter the incoming data,
|
| + // we allocate a new buffer to flip into.
|
| + // This means we need to make a non-const shallow copy of texels.
|
| + SkTArray<SkMipMapLevel> texelsShallowCopy(texels);
|
| +
|
| + for (int currentMipLevel = texelsShallowCopy.count() - 1; currentMipLevel >= 0;
|
| + currentMipLevel--) {
|
| + SkASSERT(texelsShallowCopy[currentMipLevel].fTexelsOrOffset ||
|
| + kNewTexture_UploadType == uploadType || kTransfer_UploadType == uploadType);
|
| }
|
| - size_t trimRowBytes = width * bpp;
|
|
|
| - // in case we need a temporary, trimmed copy of the src pixels
|
| -#if defined(GOOGLE3)
|
| - // Stack frame size is limited in GOOGLE3.
|
| - SkAutoSMalloc<64 * 128> tempStorage;
|
| -#else
|
| - SkAutoSMalloc<128 * 128> tempStorage;
|
| -#endif
|
| +
|
| + const GrGLInterface* interface = this->glInterface();
|
| + const GrGLCaps& caps = this->glCaps();
|
| +
|
| + size_t bpp = GrBytesPerPixel(dataConfig);
|
| + for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
|
| + if (texelsShallowCopy[currentMipLevel].fTexelsOrOffset == nullptr) {
|
| + continue;
|
| + }
|
| +
|
| + if (texelsShallowCopy[currentMipLevel].fHeight > SK_MaxS32 ||
|
| + texelsShallowCopy[currentMipLevel].fWidth > SK_MaxS32) {
|
| + return false;
|
| + }
|
| + int currentMipHeight = texelsShallowCopy[currentMipLevel].fHeight;
|
| + int currentMipWidth = texelsShallowCopy[currentMipLevel].fWidth;
|
| + if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
|
| + ¤tMipWidth,
|
| + ¤tMipHeight,
|
| + &texelsShallowCopy[currentMipLevel].fTexelsOrOffset,
|
| + &texelsShallowCopy[currentMipLevel].fRowBytes)) {
|
| + return false;
|
| + }
|
| + if (currentMipWidth < 0 || currentMipHeight < 0) {
|
| + return false;
|
| + }
|
| + texelsShallowCopy[currentMipLevel].fWidth = currentMipWidth;
|
| + texelsShallowCopy[currentMipLevel].fHeight = currentMipHeight;
|
| + }
|
|
|
| // Internal format comes from the texture desc.
|
| GrGLenum internalFormat;
|
| @@ -771,30 +906,62 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
|
| bool restoreGLRowLength = false;
|
| bool swFlipY = false;
|
| bool glFlipY = false;
|
| - if (dataOrOffset) {
|
| - if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
|
| - if (this->glCaps().unpackFlipYSupport()) {
|
| - glFlipY = true;
|
| - } else {
|
| - swFlipY = true;
|
| - }
|
| +
|
| + if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
|
| + if (caps.unpackFlipYSupport()) {
|
| + glFlipY = true;
|
| + } else {
|
| + swFlipY = true;
|
| }
|
| - if (this->glCaps().unpackRowLengthSupport() && !swFlipY) {
|
| + }
|
| +
|
| + // in case we need a temporary, trimmed copy of the src pixels
|
| + SkAutoSMallocTexels tempStorage;
|
| +
|
| + // find the combined size of all the mip levels and the relative offset of
|
| + // each into the collective buffer
|
| + size_t combined_buffer_size = 0;
|
| + SkTArray<size_t> individual_mip_offsets(texelsShallowCopy.count());
|
| + for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
|
| + const size_t trimmedSize = texels[currentMipLevel].fWidth * bpp *
|
| + texelsShallowCopy[currentMipLevel].fHeight;
|
| + individual_mip_offsets.push_back(combined_buffer_size);
|
| + combined_buffer_size += trimmedSize;
|
| + }
|
| + char* buffer = (char*)tempStorage.reset(combined_buffer_size);
|
| +
|
| + for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
|
| + if (texelsShallowCopy[currentMipLevel].fTexelsOrOffset == nullptr) {
|
| + continue;
|
| + }
|
| +
|
| + const size_t trimRowBytes = texelsShallowCopy[currentMipLevel].fWidth * bpp;
|
| +
|
| + /*
|
| + * check whether to allocate a temporary buffer for flipping y or
|
| + * because our srcData has extra bytes past each row. If so, we need
|
| + * to trim those off here, since GL ES may not let us specify
|
| + * GL_UNPACK_ROW_LENGTH.
|
| + */
|
| + restoreGLRowLength = false;
|
| +
|
| + const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
|
| + if (caps.unpackRowLengthSupport() && !swFlipY) {
|
| // can't use this for flipping, only non-neg values allowed. :(
|
| if (rowBytes != trimRowBytes) {
|
| GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
|
| - GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
|
| + GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
|
| restoreGLRowLength = true;
|
| }
|
| } else if (kTransfer_UploadType != uploadType) {
|
| if (trimRowBytes != rowBytes || swFlipY) {
|
| + const int height = texelsShallowCopy[currentMipLevel].fHeight;
|
| // copy data into our new storage, skipping the trailing bytes
|
| - size_t trimSize = height * trimRowBytes;
|
| - const char* src = (const char*)dataOrOffset;
|
| - if (swFlipY) {
|
| + const char* src = (const char*)texelsShallowCopy[currentMipLevel].fTexelsOrOffset;
|
| + if (swFlipY && height >= 1) {
|
| src += (height - 1) * rowBytes;
|
| }
|
| - char* dst = (char*)tempStorage.reset(trimSize);
|
| + char* dst = buffer + individual_mip_offsets[currentMipLevel];
|
| for (int y = 0; y < height; y++) {
|
| memcpy(dst, src, trimRowBytes);
|
| if (swFlipY) {
|
| @@ -805,49 +972,51 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
|
| dst += trimRowBytes;
|
| }
|
| // now point data to our copied version
|
| - dataOrOffset = tempStorage.get();
|
| + texelsShallowCopy[currentMipLevel] =
|
| + SkMipMapLevel(buffer + individual_mip_offsets[currentMipLevel],
|
| + trimRowBytes,
|
| + texelsShallowCopy[currentMipLevel].fWidth,
|
| + texelsShallowCopy[currentMipLevel].fHeight);
|
| }
|
| } else {
|
| return false;
|
| }
|
| if (glFlipY) {
|
| - GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
|
| + GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
|
| }
|
| - GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(dataConfig)));
|
| + GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ALIGNMENT,
|
| + config_alignment(desc.fConfig)));
|
| }
|
| +
|
| bool succeeded = true;
|
| - if (kNewTexture_UploadType == uploadType) {
|
| - if (dataOrOffset &&
|
| - !(0 == left && 0 == top && desc.fWidth == width && desc.fHeight == height)) {
|
| - succeeded = false;
|
| - } else {
|
| - CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
|
| - GL_ALLOC_CALL(this->glInterface(), TexImage2D(target, 0, internalFormat, desc.fWidth,
|
| - desc.fHeight, 0, externalFormat,
|
| - externalType, dataOrOffset));
|
| - GrGLenum error = check_alloc_error(desc, this->glInterface());
|
| - if (error != GR_GL_NO_ERROR) {
|
| - succeeded = false;
|
| - }
|
| - }
|
| + if (kNewTexture_UploadType == uploadType &&
|
| + 0 == left && 0 == top &&
|
| + desc.fWidth == width && desc.fHeight == height) {
|
| + allocate_and_populate_uncompressed_texture(desc, *interface, target, internalFormat,
|
| + externalFormat, externalType, texelsShallowCopy,
|
| + &succeeded);
|
| } else {
|
| if (swFlipY || glFlipY) {
|
| top = desc.fHeight - (top + height);
|
| }
|
| - GL_CALL(TexSubImage2D(target,
|
| - 0, // level
|
| - left, top,
|
| - width, height,
|
| - externalFormat, externalType, dataOrOffset));
|
| - }
|
| + for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count();
|
| + currentMipLevel++) {
|
| + if (texelsShallowCopy[currentMipLevel].fTexelsOrOffset == nullptr) {
|
| + continue;
|
| + }
|
|
|
| - if (restoreGLRowLength) {
|
| - SkASSERT(this->glCaps().unpackRowLengthSupport());
|
| - GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
|
| - }
|
| - if (glFlipY) {
|
| - GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
|
| + GL_CALL(TexSubImage2D(target,
|
| + currentMipLevel,
|
| + left, top,
|
| + texelsShallowCopy[currentMipLevel].fWidth,
|
| + texelsShallowCopy[currentMipLevel].fHeight,
|
| + externalFormat, externalType,
|
| + texelsShallowCopy[currentMipLevel].fTexelsOrOffset));
|
| + }
|
| }
|
| +
|
| + restore_pixelstore_state(*interface, caps, restoreGLRowLength, glFlipY);
|
| +
|
| return succeeded;
|
| }
|
|
|
| @@ -858,16 +1027,19 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
|
| // see fit if they want to go against the "standard" way to do it.
|
| bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
|
| GrGLenum target,
|
| - const void* data,
|
| + const SkTArray<SkMipMapLevel>& texels,
|
| UploadType uploadType,
|
| int left, int top, int width, int height) {
|
| SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
|
| - SkASSERT(kTransfer_UploadType != uploadType &&
|
| - (data || kNewTexture_UploadType != uploadType));
|
| + SkASSERT(kTransfer_UploadType != uploadType &&
|
| + (texels[0].fTexelsOrOffset || kNewTexture_UploadType != uploadType));
|
|
|
| // No support for software flip y, yet...
|
| SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin);
|
|
|
| + const GrGLInterface* interface = this->glInterface();
|
| + const GrGLCaps& caps = this->glCaps();
|
| +
|
| if (-1 == width) {
|
| width = desc.fWidth;
|
| }
|
| @@ -886,42 +1058,39 @@ bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
|
| }
|
| #endif
|
|
|
| - // Make sure that the width and height that we pass to OpenGL
|
| - // is a multiple of the block size.
|
| - size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height);
|
| -
|
| // We only need the internal format for compressed 2D textures.
|
| GrGLenum internalFormat;
|
| - if (!this->glCaps().getCompressedTexImageFormats(desc.fConfig, &internalFormat)) {
|
| + if (!caps.getCompressedTexImageFormats(desc.fConfig, &internalFormat)) {
|
| return false;
|
| }
|
|
|
| if (kNewTexture_UploadType == uploadType) {
|
| - CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
|
| - GL_ALLOC_CALL(this->glInterface(),
|
| - CompressedTexImage2D(target,
|
| - 0, // level
|
| - internalFormat,
|
| - width, height,
|
| - 0, // border
|
| - SkToInt(dataSize),
|
| - data));
|
| - GrGLenum error = check_alloc_error(desc, this->glInterface());
|
| - if (error != GR_GL_NO_ERROR) {
|
| - return false;
|
| - }
|
| + return allocate_and_populate_compressed_texture(desc, *interface, target, internalFormat,
|
| + texels);
|
| } else {
|
| // Paletted textures can't be updated.
|
| if (GR_GL_PALETTE8_RGBA8 == internalFormat) {
|
| return false;
|
| }
|
| - GL_CALL(CompressedTexSubImage2D(target,
|
| - 0, // level
|
| - left, top,
|
| - width, height,
|
| - internalFormat,
|
| - SkToInt(dataSize),
|
| - data));
|
| + for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
|
| + if (texels[currentMipLevel].fTexelsOrOffset == nullptr) {
|
| + continue;
|
| + }
|
| +
|
| + // Make sure that the width and height that we pass to OpenGL
|
| + // is a multiple of the block size.
|
| + size_t dataSize = GrCompressedFormatDataSize(desc.fConfig,
|
| + texels[currentMipLevel].fWidth,
|
| + texels[currentMipLevel].fHeight);
|
| + GL_CALL(CompressedTexSubImage2D(target,
|
| + currentMipLevel,
|
| + left, top,
|
| + texels[currentMipLevel].fWidth,
|
| + texels[currentMipLevel].fHeight,
|
| + internalFormat,
|
| + dataSize,
|
| + texels[currentMipLevel].fTexelsOrOffset));
|
| + }
|
| }
|
|
|
| return true;
|
| @@ -1085,9 +1254,47 @@ static size_t as_size_t(int x) {
|
| }
|
| #endif
|
|
|
| +static GrGLTexture::IDDesc generate_gl_texture(const GrGLInterface* interface,
|
| + GrGpuResource::LifeCycle lifeCycle) {
|
| + GrGLTexture::IDDesc idDesc;
|
| + idDesc.fInfo.fID = 0;
|
| + GR_GL_CALL(interface, GenTextures(1, &idDesc.fInfo.fID));
|
| + idDesc.fLifeCycle = lifeCycle;
|
| + // We only support GL_TEXTURE_2D at the moment.
|
| + idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
|
| + return idDesc;
|
| +}
|
| +
|
| +static GrGLTexture::TexParams set_initial_texture_params(const GrGLInterface* interface,
|
| + GrGLTexture::IDDesc idDesc) {
|
| + // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
|
| + // drivers have a bug where an FBO won't be complete if it includes a
|
| + // texture that is not mipmap complete (considering the filter in use).
|
| + GrGLTexture::TexParams initialTexParams;
|
| + // we only set a subset here so invalidate first
|
| + initialTexParams.invalidate();
|
| + initialTexParams.fMinFilter = GR_GL_NEAREST;
|
| + initialTexParams.fMagFilter = GR_GL_NEAREST;
|
| + initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
|
| + initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
|
| + GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget,
|
| + GR_GL_TEXTURE_MAG_FILTER,
|
| + initialTexParams.fMagFilter));
|
| + GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget,
|
| + GR_GL_TEXTURE_MIN_FILTER,
|
| + initialTexParams.fMinFilter));
|
| + GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget,
|
| + GR_GL_TEXTURE_WRAP_S,
|
| + initialTexParams.fWrapS));
|
| + GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget,
|
| + GR_GL_TEXTURE_WRAP_T,
|
| + initialTexParams.fWrapT));
|
| + return initialTexParams;
|
| +}
|
| +
|
| GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
|
| GrGpuResource::LifeCycle lifeCycle,
|
| - const void* srcData, size_t rowBytes) {
|
| + const SkTArray<SkMipMapLevel>& texels) {
|
| // We fail if the MSAA was requested and is not available.
|
| if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) {
|
| //SkDebugf("MSAA RT requested but not supported on this platform.");
|
| @@ -1096,13 +1303,7 @@ GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
|
|
|
| bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
|
|
|
| - GrGLTexture::IDDesc idDesc;
|
| - idDesc.fInfo.fID = 0;
|
| - GL_CALL(GenTextures(1, &idDesc.fInfo.fID));
|
| - idDesc.fLifeCycle = lifeCycle;
|
| - // We only support GL_TEXTURE_2D at the moment.
|
| - idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
|
| -
|
| + GrGLTexture::IDDesc idDesc = generate_gl_texture(this->glInterface(), lifeCycle);
|
| if (!idDesc.fInfo.fID) {
|
| return return_null_texture();
|
| }
|
| @@ -1117,31 +1318,12 @@ GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
|
| GR_GL_FRAMEBUFFER_ATTACHMENT));
|
| }
|
|
|
| - // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
|
| - // drivers have a bug where an FBO won't be complete if it includes a
|
| - // texture that is not mipmap complete (considering the filter in use).
|
| - GrGLTexture::TexParams initialTexParams;
|
| - // we only set a subset here so invalidate first
|
| - initialTexParams.invalidate();
|
| - initialTexParams.fMinFilter = GR_GL_NEAREST;
|
| - initialTexParams.fMagFilter = GR_GL_NEAREST;
|
| - initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
|
| - initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_MAG_FILTER,
|
| - initialTexParams.fMagFilter));
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_MIN_FILTER,
|
| - initialTexParams.fMinFilter));
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_WRAP_S,
|
| - initialTexParams.fWrapS));
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_WRAP_T,
|
| - initialTexParams.fWrapT));
|
| + GrGLTexture::TexParams initialTexParams = set_initial_texture_params(this->glInterface(),
|
| + idDesc);
|
| +
|
| if (!this->uploadTexData(desc, idDesc.fInfo.fTarget, kNewTexture_UploadType, 0, 0,
|
| desc.fWidth, desc.fHeight,
|
| - desc.fConfig, srcData, rowBytes)) {
|
| + desc.fConfig, texels)) {
|
| GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
|
| return return_null_texture();
|
| }
|
| @@ -1158,31 +1340,29 @@ GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
|
| }
|
| tex = new GrGLTextureRenderTarget(this, desc, idDesc, rtIDDesc);
|
| } else {
|
| - tex = new GrGLTexture(this, desc, idDesc);
|
| + bool wasMipMapDataProvided = false;
|
| + if (texels.count() > 1) {
|
| + wasMipMapDataProvided = true;
|
| + }
|
| + tex = new GrGLTexture(this, desc, idDesc, wasMipMapDataProvided);
|
| }
|
| tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
|
| #ifdef TRACE_TEXTURE_CREATION
|
| SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n",
|
| - glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
|
| + glTexDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig);
|
| #endif
|
| return tex;
|
| }
|
|
|
| GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc,
|
| GrGpuResource::LifeCycle lifeCycle,
|
| - const void* srcData) {
|
| + const SkTArray<SkMipMapLevel>& texels) {
|
| // Make sure that we're not flipping Y.
|
| if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
|
| return return_null_texture();
|
| }
|
|
|
| - GrGLTexture::IDDesc idDesc;
|
| - idDesc.fInfo.fID = 0;
|
| - GL_CALL(GenTextures(1, &idDesc.fInfo.fID));
|
| - idDesc.fLifeCycle = lifeCycle;
|
| - // We only support GL_TEXTURE_2D at the moment.
|
| - idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
|
| -
|
| + GrGLTexture::IDDesc idDesc = generate_gl_texture(this->glInterface(), lifeCycle);
|
| if (!idDesc.fInfo.fID) {
|
| return return_null_texture();
|
| }
|
| @@ -1190,30 +1370,10 @@ GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc,
|
| this->setScratchTextureUnit();
|
| GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID));
|
|
|
| - // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
|
| - // drivers have a bug where an FBO won't be complete if it includes a
|
| - // texture that is not mipmap complete (considering the filter in use).
|
| - GrGLTexture::TexParams initialTexParams;
|
| - // we only set a subset here so invalidate first
|
| - initialTexParams.invalidate();
|
| - initialTexParams.fMinFilter = GR_GL_NEAREST;
|
| - initialTexParams.fMagFilter = GR_GL_NEAREST;
|
| - initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
|
| - initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_MAG_FILTER,
|
| - initialTexParams.fMagFilter));
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_MIN_FILTER,
|
| - initialTexParams.fMinFilter));
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_WRAP_S,
|
| - initialTexParams.fWrapS));
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_WRAP_T,
|
| - initialTexParams.fWrapT));
|
| -
|
| - if (!this->uploadCompressedTexData(desc, idDesc.fInfo.fTarget, srcData)) {
|
| + GrGLTexture::TexParams initialTexParams = set_initial_texture_params(this->glInterface(),
|
| + idDesc);
|
| +
|
| + if (!this->uploadCompressedTexData(desc, idDesc.fInfo.fTarget, texels)) {
|
| GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
|
| return return_null_texture();
|
| }
|
| @@ -1223,7 +1383,7 @@ GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc,
|
| tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
|
| #ifdef TRACE_TEXTURE_CREATION
|
| SkDebugf("--- new compressed texture [%d] size=(%d %d) config=%d\n",
|
| - glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
|
| + glTexDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig);
|
| #endif
|
| return tex;
|
| }
|
| @@ -2573,12 +2733,17 @@ void GrGLGpu::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTextur
|
| newTexParams.fMinFilter = glMinFilterModes[filterMode];
|
| newTexParams.fMagFilter = glMagFilterModes[filterMode];
|
|
|
| - if (GrTextureParams::kMipMap_FilterMode == filterMode &&
|
| - texture->texturePriv().mipMapsAreDirty()) {
|
| - GL_CALL(GenerateMipmap(target));
|
| - texture->texturePriv().dirtyMipMaps(false);
|
| + if (GrTextureParams::kMipMap_FilterMode == filterMode) {
|
| + if (texture->texturePriv().mipMapsAreDirty()) {
|
| + GL_CALL(GenerateMipmap(target));
|
| + texture->texturePriv().dirtyMipMaps(false);
|
| + texture->texturePriv().setMaxMipMapLevel(SkMipMapLevelCount(texture->width(),
|
| + texture->height()));
|
| + }
|
| }
|
|
|
| + newTexParams.fMaxMipMapLevel = texture->texturePriv().maxMipMapLevel();
|
| +
|
| newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX());
|
| newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY());
|
| get_tex_param_swizzle(texture->config(), this->glCaps(), newTexParams.fSwizzleRGBA);
|
| @@ -2590,6 +2755,17 @@ void GrGLGpu::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTextur
|
| this->setTextureUnit(unitIdx);
|
| GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newTexParams.fMinFilter));
|
| }
|
| + if (setAll || newTexParams.fMaxMipMapLevel != oldTexParams.fMaxMipMapLevel) {
|
| + if (newTexParams.fMaxMipMapLevel != 0) {
|
| + this->setTextureUnit(unitIdx);
|
| + GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_LOD, 0));
|
| + GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL, 0));
|
| + GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LOD,
|
| + static_cast<float>(newTexParams.fMaxMipMapLevel)));
|
| + GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL,
|
| + newTexParams.fMaxMipMapLevel));
|
| + }
|
| + }
|
| if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) {
|
| this->setTextureUnit(unitIdx);
|
| GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newTexParams.fWrapS));
|
| @@ -2782,7 +2958,7 @@ void GrGLGpu::bindSurfaceFBOForCopy(GrSurface* surface, GrGLenum fboTarget, GrGL
|
| }
|
|
|
| void GrGLGpu::unbindTextureFBOForCopy(GrGLenum fboTarget, GrSurface* surface) {
|
| - // bindSurfaceFBOForCopy temporarily binds textures that are not render targets to
|
| + // bindSurfaceFBOForCopy temporarily binds textures that are not render targets to
|
| if (!surface->asRenderTarget()) {
|
| SkASSERT(surface->asTexture());
|
| GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target();
|
| @@ -2866,7 +3042,7 @@ bool GrGLGpu::onCopySurface(GrSurface* dst,
|
| this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
|
| return true;
|
| }
|
| -
|
| +
|
| if (can_copy_texsubimage(dst, src, this)) {
|
| this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstPoint);
|
| return true;
|
| @@ -2917,7 +3093,7 @@ void GrGLGpu::createCopyPrograms() {
|
| vshaderTxt.append(";");
|
| vTexCoord.appendDecl(this->glCaps().glslCaps(), &vshaderTxt);
|
| vshaderTxt.append(";");
|
| -
|
| +
|
| vshaderTxt.append(
|
| "// Copy Program VS\n"
|
| "void main() {"
|
| @@ -2956,7 +3132,7 @@ void GrGLGpu::createCopyPrograms() {
|
| fsOutName,
|
| GrGLSLTexture2DFunctionName(kVec2f_GrSLType, kSamplerTypes[i], this->glslGeneration())
|
| );
|
| -
|
| +
|
| GL_CALL_RET(fCopyPrograms[i].fProgram, CreateProgram());
|
| const char* str;
|
| GrGLint length;
|
|
|