| Index: src/gpu/gl/GrGLGpu.cpp
|
| diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp
|
| index 49b8f3acb806855b78da7351589433198900f6fe..ffbacce16f7b876bb9d3bcdbc3d673b472434d3f 100644
|
| --- a/src/gpu/gl/GrGLGpu.cpp
|
| +++ b/src/gpu/gl/GrGLGpu.cpp
|
| @@ -22,6 +22,7 @@
|
| #include "glsl/GrGLSLCaps.h"
|
| #include "SkStrokeRec.h"
|
| #include "SkTemplates.h"
|
| +#include "SkTypes.h"
|
|
|
| #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
|
| #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
|
| @@ -38,6 +39,12 @@
|
| #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
|
| #endif
|
|
|
| +#if defined(GOOGLE3)
|
| + // Stack frame size is limited in GOOGLE3.
|
| + typedef SkAutoSMalloc<64 * 128> SkAutoSMallocTexels;
|
| +#else
|
| + typedef SkAutoSMalloc<128 * 128> SkAutoSMallocTexels;
|
| +#endif
|
|
|
| ///////////////////////////////////////////////////////////////////////////////
|
|
|
| @@ -465,7 +472,7 @@ GrTexture* GrGLGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
|
| case kBorrow_GrWrapOwnership:
|
| idDesc.fLifeCycle = GrGpuResource::kBorrowed_LifeCycle;
|
| break;
|
| - }
|
| + }
|
|
|
| surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags;
|
| surfDesc.fWidth = desc.fWidth;
|
| @@ -513,7 +520,7 @@ GrRenderTarget* GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe
|
| case kBorrow_GrWrapOwnership:
|
| idDesc.fLifeCycle = GrGpuResource::kBorrowed_LifeCycle;
|
| break;
|
| - }
|
| + }
|
| idDesc.fSampleConfig = GrRenderTarget::kUnified_SampleConfig;
|
|
|
| GrSurfaceDesc desc;
|
| @@ -529,7 +536,7 @@ GrRenderTarget* GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe
|
|
|
| ////////////////////////////////////////////////////////////////////////////////
|
| bool GrGLGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
|
| - size_t rowBytes, GrPixelConfig srcConfig,
|
| + GrPixelConfig srcConfig,
|
| DrawPreference* drawPreference,
|
| WritePixelTempDrawInfo* tempDrawInfo) {
|
| if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
|
| @@ -614,8 +621,8 @@ static bool check_write_and_transfer_input(GrGLTexture* glTex, GrSurface* surfac
|
|
|
| bool GrGLGpu::onWritePixels(GrSurface* surface,
|
| int left, int top, int width, int height,
|
| - GrPixelConfig config, const void* buffer,
|
| - size_t rowBytes) {
|
| + GrPixelConfig config,
|
| + const SkTArray<SkMipMapLevel>& texels) {
|
| GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
|
|
|
| if (!check_write_and_transfer_input(glTex, surface, config)) {
|
| @@ -629,19 +636,14 @@ bool GrGLGpu::onWritePixels(GrSurface* surface,
|
| if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
|
| // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixels()
|
| SkASSERT(config == glTex->desc().fConfig);
|
| - success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), buffer,
|
| + success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), texels, false,
|
| kWrite_UploadType, left, top, width, height);
|
| } else {
|
| success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_UploadType,
|
| - left, top, width, height, config, buffer, rowBytes);
|
| + left, top, width, height, config, texels);
|
| }
|
|
|
| - if (success) {
|
| - glTex->texturePriv().dirtyMipMaps(true);
|
| - return true;
|
| - }
|
| -
|
| - return false;
|
| + return success;
|
| }
|
|
|
| bool GrGLGpu::onTransferPixels(GrSurface* surface,
|
| @@ -670,8 +672,12 @@ bool GrGLGpu::onTransferPixels(GrSurface* surface,
|
| GL_CALL(BindBuffer(glBuffer->bufferType(), glBuffer->bufferID()));
|
|
|
| bool success = false;
|
| + SkMipMapLevel mipLevel(buffer, rowBytes, width, height);
|
| + const int mipLevelCount = 1;
|
| + SkTArray<SkMipMapLevel> texels(mipLevelCount);
|
| + texels.push_back(mipLevel);
|
| success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_UploadType,
|
| - left, top, width, height, config, buffer, rowBytes);
|
| + left, top, width, height, config, texels);
|
|
|
| if (success) {
|
| glTex->texturePriv().dirtyMipMaps(true);
|
| @@ -711,78 +717,362 @@ static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc,
|
| }
|
| }
|
|
|
| +/**
|
| + * Determines if TexStorage can be used when creating a texture.
|
| + *
|
| + * @param caps The capabilities of the GL device.
|
| + * @param standard The GL standard in use.
|
| + * @param desc The surface descriptor for the texture being created.
|
| + */
|
| +static bool can_use_tex_storage(const GrGLCaps& caps, const GrGLStandard& standard,
|
| + const GrSurfaceDesc& desc) {
|
| + bool useTexStorage = caps.texStorageSupport();
|
| + if (useTexStorage && kGL_GrGLStandard == standard) {
|
| + // 565 is not a sized internal format on desktop GL. So on desktop with
|
| + // 565 we always use an unsized internal format to let the system pick
|
| + // the best sized format to convert the 565 data to. Since TexStorage
|
| + // only allows sized internal formats we will instead use TexImage2D.
|
| + useTexStorage = desc.fConfig != kRGB_565_GrPixelConfig;
|
| + }
|
| +
|
| + return useTexStorage;
|
| +}
|
| +
|
| +/**
|
| + * Creates storage space for the texture and fills it with texels.
|
| + *
|
| + * @param desc The surface descriptor for the texture being created.
|
| + * @param interface The GL interface in use.
|
| + * @param useTexStorage The result of a call to can_use_tex_storage().
|
| + * @param internalFormat The data format used for the internal storage of the texture.
|
| + * @param externalFormat The data format used for the external storage of the texture.
|
| + * @param externalType The type of the data used for the external storage of the texture.
|
| + * @param texels The texel data of the texture being created.
|
| + * @param succeeded Set to true if allocating and populating the texture completed
|
| + * without error.
|
| + */
|
| +static void allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc,
|
| + const GrGLInterface& interface,
|
| + GrGLenum target,
|
| + bool useTexStorage,
|
| + GrGLenum internalFormat,
|
| + GrGLenum externalFormat,
|
| + GrGLenum externalType,
|
| + const SkTArray<SkMipMapLevel>& texels,
|
| + bool* succeeded) {
|
| + CLEAR_ERROR_BEFORE_ALLOC(&interface);
|
| + if (useTexStorage) {
|
| + // We never resize or change formats of textures.
|
| + GL_ALLOC_CALL(&interface,
|
| + TexStorage2D(target,
|
| + texels.count(),
|
| + internalFormat,
|
| + desc.fWidth, desc.fHeight));
|
| +
|
| + GrGLenum error = check_alloc_error(desc, &interface);
|
| + if (error != GR_GL_NO_ERROR) {
|
| + *succeeded = false;
|
| + } else {
|
| + for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
|
| + const void* currentMipData = texels[currentMipLevel].fTexelsOrOffset;
|
| + if (currentMipData == nullptr) {
|
| + continue;
|
| + }
|
| +
|
| + GR_GL_CALL(&interface,
|
| + TexSubImage2D(target,
|
| + currentMipLevel,
|
| + 0, // left
|
| + 0, // top
|
| + texels[currentMipLevel].fWidth,
|
| + texels[currentMipLevel].fHeight,
|
| + externalFormat, externalType,
|
| + currentMipData));
|
| + }
|
| + *succeeded = true;
|
| + }
|
| + } else {
|
| + *succeeded = true;
|
| + for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
|
| + const void* currentMipData = texels[currentMipLevel].fTexelsOrOffset;
|
| + // Even if curremtMipData is nullptr, continue to call TexImage2D.
|
| + // This will allocate texture memory which we can later populate.
|
| + GL_ALLOC_CALL(&interface,
|
| + TexImage2D(target,
|
| + currentMipLevel,
|
| + internalFormat,
|
| + texels[currentMipLevel].fWidth,
|
| + texels[currentMipLevel].fHeight,
|
| + 0, // border
|
| + externalFormat, externalType,
|
| + currentMipData));
|
| + GrGLenum error = check_alloc_error(desc, &interface);
|
| + if (error != GR_GL_NO_ERROR) {
|
| + *succeeded = false;
|
| + break;
|
| + }
|
| + }
|
| +
|
| + if (*succeeded == true) {
|
| + GR_GL_CALL(&interface,
|
| + TexParameteri(target, GR_GL_TEXTURE_MIN_LOD, 0));
|
| + GR_GL_CALL(&interface,
|
| + TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL, 0));
|
| + if (texels.count() > 1) {
|
| + // If we have mipmaps, we can go ahead and limit the max mipmap level.
|
| + // Otherwise, mipmaps may be generated later and we do not want to
|
| + // limit.
|
| + GR_GL_CALL(&interface,
|
| + TexParameteri(target, GR_GL_TEXTURE_MAX_LOD, static_cast<float>(texels.count() - 1)));
|
| + GR_GL_CALL(&interface,
|
| + TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL, texels.count() - 1));
|
| + }
|
| +
|
| + }
|
| + }
|
| +}
|
| +
|
| +/**
|
| + * Creates storage space for the texture and fills it with texels.
|
| + *
|
| + * @param desc The surface descriptor for the texture being created.
|
| + * @param interface The GL interface in use.
|
| + * @param useTexStorage The result of a call to can_use_tex_storage().
|
| + * @param internalFormat The data format used for the internal storage of the texture.
|
| + * @param texels The texel data of the texture being created.
|
| + */
|
| +static bool allocate_and_populate_compressed_texture(const GrSurfaceDesc& desc,
|
| + const GrGLInterface& interface,
|
| + GrGLenum target,
|
| + bool useTexStorage, GrGLenum internalFormat,
|
| + const SkTArray<SkMipMapLevel>& texels) {
|
| + CLEAR_ERROR_BEFORE_ALLOC(&interface);
|
| + if (useTexStorage) {
|
| + // We never resize or change formats of textures.
|
| + GL_ALLOC_CALL(&interface,
|
| + TexStorage2D(target,
|
| + texels.count(),
|
| + internalFormat,
|
| + desc.fWidth, desc.fHeight));
|
| + GrGLenum error = check_alloc_error(desc, &interface);
|
| + if (error != GR_GL_NO_ERROR) {
|
| + return false;
|
| + } else {
|
| + for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
|
| + const void* currentMipData = texels[currentMipLevel].fTexelsOrOffset;
|
| + if (currentMipData == nullptr) {
|
| + continue;
|
| + }
|
| +
|
| + int width = texels[currentMipLevel].fWidth;
|
| + int height = texels[currentMipLevel].fHeight;
|
| +
|
| + // Make sure that the width and height that we pass to OpenGL
|
| + // is a multiple of the block size.
|
| + size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height);
|
| + GR_GL_CALL(&interface, CompressedTexSubImage2D(target,
|
| + currentMipLevel,
|
| + 0, // left
|
| + 0, // top
|
| + width,
|
| + height,
|
| + internalFormat, SkToInt(dataSize),
|
| + currentMipData));
|
| + }
|
| + }
|
| + } else {
|
| + for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
|
| +
|
| + int width = texels[currentMipLevel].fWidth;
|
| + int height = texels[currentMipLevel].fHeight;
|
| +
|
| + // Make sure that the width and height that we pass to OpenGL
|
| + // is a multiple of the block size.
|
| + size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height);
|
| +
|
| + GL_ALLOC_CALL(&interface,
|
| + CompressedTexImage2D(target,
|
| + currentMipLevel,
|
| + internalFormat,
|
| + width,
|
| + height,
|
| + 0, // border
|
| + SkToInt(dataSize),
|
| + texels[currentMipLevel].fTexelsOrOffset));
|
| +
|
| + GrGLenum error = check_alloc_error(desc, &interface);
|
| + if (error != GR_GL_NO_ERROR) {
|
| + return false;
|
| + }
|
| + }
|
| +
|
| + GR_GL_CALL(&interface,
|
| + TexParameteri(target, GR_GL_TEXTURE_MIN_LOD, 0));
|
| + GR_GL_CALL(&interface,
|
| + TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL, 0));
|
| + if (texels.count() > 1) {
|
| + // If we have mipmaps, we can go ahead and limit the max mipmap level.
|
| + // Otherwise, mipmaps may be generated later and we do not want to
|
| + // limit.
|
| + GR_GL_CALL(&interface,
|
| + TexParameteri(target, GR_GL_TEXTURE_MAX_LOD, static_cast<float>(texels.count() - 1)));
|
| +
|
| + GR_GL_CALL(&interface,
|
| + TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL, texels.count() - 1));
|
| + }
|
| + }
|
| + return true;
|
| +}
|
| +
|
| +/**
|
| + * After a texture is created, any state which was altered during its creation
|
| + * needs to be restored.
|
| + *
|
| + * @param interface The GL interface to use.
|
| + * @param caps The capabilities of the GL device.
|
| + * @param restoreGLRowLength Should the row length unpacking be restored?
|
| + * @param glFlipY Did GL flip the texture vertically?
|
| + */
|
| +static void restore_pixelstore_state(const GrGLInterface& interface, const GrGLCaps& caps,
|
| + bool restoreGLRowLength, bool glFlipY) {
|
| + if (restoreGLRowLength) {
|
| + SkASSERT(caps.unpackRowLengthSupport());
|
| + GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
|
| + }
|
| + if (glFlipY) {
|
| + GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
|
| + }
|
| +}
|
| +
|
| bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
|
| GrGLenum target,
|
| UploadType uploadType,
|
| int left, int top, int width, int height,
|
| GrPixelConfig dataConfig,
|
| - const void* dataOrOffset,
|
| - size_t rowBytes) {
|
| - SkASSERT(dataOrOffset || kNewTexture_UploadType == uploadType ||
|
| - kTransfer_UploadType == uploadType);
|
| -
|
| + const SkTArray<SkMipMapLevel>& texels) {
|
| // If we're uploading compressed data then we should be using uploadCompressedTexData
|
| SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
|
|
|
| SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
|
|
|
| + // texels is const.
|
| + // But we may need to flip the texture vertically to prepare it.
|
| + // Rather than flip in place and alter the incoming data,
|
| + // we allocate a new buffer to flip into.
|
| + // This means we need to make a non-const shallow copy of texels.
|
| + SkTArray<SkMipMapLevel> texelsShallowCopy(texels);
|
| +
|
| + for (int currentMipLevel = texelsShallowCopy.count() - 1; currentMipLevel >= 0;
|
| + currentMipLevel--) {
|
| + SkASSERT(texelsShallowCopy[currentMipLevel].fTexelsOrOffset
|
| + || kNewTexture_UploadType == uploadType || kTransfer_UploadType == uploadType);
|
| + }
|
| +
|
| +
|
| + const GrGLInterface* interface = this->glInterface();
|
| + const GrGLCaps& caps = this->glCaps();
|
| + GrGLStandard standard = this->glStandard();
|
| +
|
| size_t bpp = GrBytesPerPixel(dataConfig);
|
| - if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
|
| - &width, &height, &dataOrOffset, &rowBytes)) {
|
| - return false;
|
| + for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
|
| + if (texelsShallowCopy[currentMipLevel].fTexelsOrOffset == nullptr) {
|
| + continue;
|
| + }
|
| +
|
| + if (texelsShallowCopy[currentMipLevel].fHeight > SK_MaxS32
|
| + || texelsShallowCopy[currentMipLevel].fWidth > SK_MaxS32) {
|
| + return false;
|
| + }
|
| + int currentMipHeight = texelsShallowCopy[currentMipLevel].fHeight;
|
| + int currentMipWidth = texelsShallowCopy[currentMipLevel].fWidth;
|
| + if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
|
| + ¤tMipWidth,
|
| + ¤tMipHeight,
|
| + &texelsShallowCopy[currentMipLevel].fTexelsOrOffset,
|
| + &texelsShallowCopy[currentMipLevel].fRowBytes)) {
|
| + return false;
|
| + }
|
| + if (currentMipWidth < 0 || currentMipHeight < 0) {
|
| + return false;
|
| + }
|
| + texelsShallowCopy[currentMipLevel].fWidth = currentMipWidth;
|
| + texelsShallowCopy[currentMipLevel].fHeight = currentMipHeight;
|
| }
|
| - size_t trimRowBytes = width * bpp;
|
|
|
| - // in case we need a temporary, trimmed copy of the src pixels
|
| -#if defined(GOOGLE3)
|
| - // Stack frame size is limited in GOOGLE3.
|
| - SkAutoSMalloc<64 * 128> tempStorage;
|
| -#else
|
| - SkAutoSMalloc<128 * 128> tempStorage;
|
| -#endif
|
| + bool useTexStorage = can_use_tex_storage(caps, standard, desc);
|
| + // We can only use TexStorage if we know we will not later change the storage requirements.
|
| + // This means if we may later want to generate mipmaps, we cannot use TexStorage.
|
| + // Right now, we cannot know if we will later generate mipmaps or not.
|
| + // The only time we can use TexStorage is when we already have the mipmaps.
|
| + useTexStorage &= texelsShallowCopy.count() > 1;
|
|
|
| // Internal format comes from the texture desc.
|
| - GrGLenum internalFormat =
|
| - this->glCaps().configGLFormats(desc.fConfig).fInternalFormatTexImage;
|
| + GrGLenum internalFormat = caps.configGLFormats(desc.fConfig).fInternalFormatTexImage;
|
|
|
| // External format and type come from the upload data.
|
| - GrGLenum externalFormat =
|
| - this->glCaps().configGLFormats(dataConfig).fExternalFormatForTexImage;
|
| - GrGLenum externalType = this->glCaps().configGLFormats(dataConfig).fExternalType;
|
| -
|
| - /*
|
| - * Check whether to allocate a temporary buffer for flipping y or
|
| - * because our srcData has extra bytes past each row. If so, we need
|
| - * to trim those off here, since GL ES may not let us specify
|
| - * GL_UNPACK_ROW_LENGTH.
|
| - */
|
| - bool restoreGLRowLength = false;
|
| + GrGLenum externalFormat = caps.configGLFormats(dataConfig).fExternalFormatForTexImage;
|
| + GrGLenum externalType = caps.configGLFormats(dataConfig).fExternalType;
|
| +
|
| bool swFlipY = false;
|
| bool glFlipY = false;
|
| - if (dataOrOffset) {
|
| - if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
|
| - if (this->glCaps().unpackFlipYSupport()) {
|
| - glFlipY = true;
|
| - } else {
|
| - swFlipY = true;
|
| - }
|
| +
|
| + if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
|
| + if (caps.unpackFlipYSupport()) {
|
| + glFlipY = true;
|
| + } else {
|
| + swFlipY = true;
|
| }
|
| - if (this->glCaps().unpackRowLengthSupport() && !swFlipY) {
|
| + }
|
| +
|
| + bool restoreGLRowLength = false;
|
| +
|
| + // in case we need a temporary, trimmed copy of the src pixels
|
| + SkAutoSMallocTexels tempStorage;
|
| +
|
| + // find the combined size of all the mip levels and the relative offset of
|
| + // each into the collective buffer
|
| + size_t combined_buffer_size = 0;
|
| + SkTArray<size_t> individual_mip_offsets(texelsShallowCopy.count());
|
| + for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
|
| + const size_t trimmedSize = texels[currentMipLevel].fWidth * bpp *
|
| + texelsShallowCopy[currentMipLevel].fHeight;
|
| + individual_mip_offsets.push_back(combined_buffer_size);
|
| + combined_buffer_size += trimmedSize;
|
| + }
|
| + char* buffer = (char*)tempStorage.reset(combined_buffer_size);
|
| +
|
| + for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
|
| + if (texelsShallowCopy[currentMipLevel].fTexelsOrOffset == nullptr) {
|
| + continue;
|
| + }
|
| +
|
| + const size_t trimRowBytes = texelsShallowCopy[currentMipLevel].fWidth * bpp;
|
| +
|
| + /*
|
| + * check whether to allocate a temporary buffer for flipping y or
|
| + * because our srcData has extra bytes past each row. If so, we need
|
| + * to trim those off here, since GL ES may not let us specify
|
| + * GL_UNPACK_ROW_LENGTH.
|
| + */
|
| + restoreGLRowLength = false;
|
| +
|
| + const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
|
| + if (caps.unpackRowLengthSupport() && !swFlipY) {
|
| // can't use this for flipping, only non-neg values allowed. :(
|
| if (rowBytes != trimRowBytes) {
|
| GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
|
| - GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
|
| + GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
|
| restoreGLRowLength = true;
|
| }
|
| } else if (kTransfer_UploadType != uploadType) {
|
| if (trimRowBytes != rowBytes || swFlipY) {
|
| + const int height = texelsShallowCopy[currentMipLevel].fHeight;
|
| // copy data into our new storage, skipping the trailing bytes
|
| - size_t trimSize = height * trimRowBytes;
|
| - const char* src = (const char*)dataOrOffset;
|
| - if (swFlipY) {
|
| + const char* src = (const char*)texelsShallowCopy[currentMipLevel].fTexelsOrOffset;
|
| + if (swFlipY && height >= 1) {
|
| src += (height - 1) * rowBytes;
|
| }
|
| - char* dst = (char*)tempStorage.reset(trimSize);
|
| + char* dst = buffer + individual_mip_offsets[currentMipLevel];
|
| for (int y = 0; y < height; y++) {
|
| memcpy(dst, src, trimRowBytes);
|
| if (swFlipY) {
|
| @@ -793,49 +1083,51 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
|
| dst += trimRowBytes;
|
| }
|
| // now point data to our copied version
|
| - dataOrOffset = tempStorage.get();
|
| + texelsShallowCopy[currentMipLevel] =
|
| + SkMipMapLevel(buffer + individual_mip_offsets[currentMipLevel],
|
| + trimRowBytes,
|
| + texelsShallowCopy[currentMipLevel].fWidth,
|
| + texelsShallowCopy[currentMipLevel].fHeight);
|
| }
|
| } else {
|
| return false;
|
| }
|
| if (glFlipY) {
|
| - GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
|
| + GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
|
| }
|
| - GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(dataConfig)));
|
| + GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ALIGNMENT,
|
| + config_alignment(desc.fConfig)));
|
| }
|
| +
|
| bool succeeded = true;
|
| - if (kNewTexture_UploadType == uploadType) {
|
| - if (dataOrOffset &&
|
| - !(0 == left && 0 == top && desc.fWidth == width && desc.fHeight == height)) {
|
| - succeeded = false;
|
| - } else {
|
| - CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
|
| - GL_ALLOC_CALL(this->glInterface(), TexImage2D(target, 0, internalFormat, desc.fWidth,
|
| - desc.fHeight, 0, externalFormat,
|
| - externalType, dataOrOffset));
|
| - GrGLenum error = check_alloc_error(desc, this->glInterface());
|
| - if (error != GR_GL_NO_ERROR) {
|
| - succeeded = false;
|
| - }
|
| - }
|
| + if (kNewTexture_UploadType == uploadType &&
|
| + 0 == left && 0 == top &&
|
| + desc.fWidth == width && desc.fHeight == height) {
|
| + allocate_and_populate_uncompressed_texture(desc, *interface, target, useTexStorage,
|
| + internalFormat, externalFormat, externalType,
|
| + texelsShallowCopy, &succeeded);
|
| } else {
|
| if (swFlipY || glFlipY) {
|
| top = desc.fHeight - (top + height);
|
| }
|
| - GL_CALL(TexSubImage2D(target,
|
| - 0, // level
|
| - left, top,
|
| - width, height,
|
| - externalFormat, externalType, dataOrOffset));
|
| - }
|
| + for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count();
|
| + currentMipLevel++) {
|
| + if (texelsShallowCopy[currentMipLevel].fTexelsOrOffset == nullptr) {
|
| + continue;
|
| + }
|
|
|
| - if (restoreGLRowLength) {
|
| - SkASSERT(this->glCaps().unpackRowLengthSupport());
|
| - GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
|
| - }
|
| - if (glFlipY) {
|
| - GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
|
| + GL_CALL(TexSubImage2D(target,
|
| + currentMipLevel,
|
| + left, top,
|
| + texelsShallowCopy[currentMipLevel].fWidth,
|
| + texelsShallowCopy[currentMipLevel].fHeight,
|
| + externalFormat, externalType,
|
| + texelsShallowCopy[currentMipLevel].fTexelsOrOffset));
|
| + }
|
| }
|
| +
|
| + restore_pixelstore_state(*interface, caps, restoreGLRowLength, glFlipY);
|
| +
|
| return succeeded;
|
| }
|
|
|
| @@ -846,16 +1138,21 @@ bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
|
| // see fit if they want to go against the "standard" way to do it.
|
| bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
|
| GrGLenum target,
|
| - const void* data,
|
| + const SkTArray<SkMipMapLevel>& texels,
|
| + bool isNewTexture,
|
| UploadType uploadType,
|
| int left, int top, int width, int height) {
|
| SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
|
| - SkASSERT(kTransfer_UploadType != uploadType &&
|
| - (data || kNewTexture_UploadType != uploadType));
|
| + SkASSERT(kTransfer_UploadType != uploadType &&
|
| + (texels[0].fTexelsOrOffset || kNewTexture_UploadType != uploadType));
|
|
|
| // No support for software flip y, yet...
|
| SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin);
|
|
|
| + const GrGLInterface* interface = this->glInterface();
|
| + const GrGLCaps& caps = this->glCaps();
|
| + GrGLStandard standard = this->glStandard();
|
| +
|
| if (-1 == width) {
|
| width = desc.fWidth;
|
| }
|
| @@ -874,40 +1171,44 @@ bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
|
| }
|
| #endif
|
|
|
| - // Make sure that the width and height that we pass to OpenGL
|
| - // is a multiple of the block size.
|
| - size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height);
|
| + bool useTexStorage = can_use_tex_storage(caps, standard, desc);
|
| + // We can only use TexStorage if we know we will not later change the storage requirements.
|
| + // This means if we may later want to generate mipmaps, we cannot use TexStorage.
|
| + // Right now, we cannot know if we will later generate mipmaps or not.
|
| + // The only time we can use TexStorage is when we already have the mipmaps.
|
| + useTexStorage &= texels.count() > 1;
|
|
|
| // We only need the internal format for compressed 2D textures. There is on
|
| // sized vs base internal format distinction for compressed textures.
|
| - GrGLenum internalFormat =this->glCaps().configGLFormats(desc.fConfig).fSizedInternalFormat;
|
| + GrGLenum internalFormat = caps.configGLFormats(desc.fConfig).fInternalFormatTexImage;
|
|
|
| if (kNewTexture_UploadType == uploadType) {
|
| - CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
|
| - GL_ALLOC_CALL(this->glInterface(),
|
| - CompressedTexImage2D(target,
|
| - 0, // level
|
| - internalFormat,
|
| - width, height,
|
| - 0, // border
|
| - SkToInt(dataSize),
|
| - data));
|
| - GrGLenum error = check_alloc_error(desc, this->glInterface());
|
| - if (error != GR_GL_NO_ERROR) {
|
| - return false;
|
| - }
|
| + return allocate_and_populate_compressed_texture(desc, *interface, target, useTexStorage,
|
| + internalFormat, texels);
|
| } else {
|
| // Paletted textures can't be updated.
|
| if (GR_GL_PALETTE8_RGBA8 == internalFormat) {
|
| return false;
|
| }
|
| - GL_CALL(CompressedTexSubImage2D(target,
|
| - 0, // level
|
| - left, top,
|
| - width, height,
|
| - internalFormat,
|
| - SkToInt(dataSize),
|
| - data));
|
| + for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
|
| + if (texels[currentMipLevel].fTexelsOrOffset == nullptr) {
|
| + continue;
|
| + }
|
| +
|
| + // Make sure that the width and height that we pass to OpenGL
|
| + // is a multiple of the block size.
|
| + size_t dataSize = GrCompressedFormatDataSize(desc.fConfig,
|
| + texels[currentMipLevel].fWidth,
|
| + texels[currentMipLevel].fHeight);
|
| + GL_CALL(CompressedTexSubImage2D(target,
|
| + currentMipLevel,
|
| + left, top,
|
| + texels[currentMipLevel].fWidth,
|
| + texels[currentMipLevel].fHeight,
|
| + internalFormat,
|
| + dataSize,
|
| + texels[currentMipLevel].fTexelsOrOffset));
|
| + }
|
| }
|
|
|
| return true;
|
| @@ -1074,9 +1375,47 @@ static size_t as_size_t(int x) {
|
| }
|
| #endif
|
|
|
| +static GrGLTexture::IDDesc generate_gl_texture(const GrGLInterface* interface,
|
| + GrGpuResource::LifeCycle lifeCycle) {
|
| + GrGLTexture::IDDesc idDesc;
|
| + idDesc.fInfo.fID = 0;
|
| + GR_GL_CALL(interface, GenTextures(1, &idDesc.fInfo.fID));
|
| + idDesc.fLifeCycle = lifeCycle;
|
| + // We only support GL_TEXTURE_2D at the moment.
|
| + idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
|
| + return idDesc;
|
| +}
|
| +
|
| +static GrGLTexture::TexParams set_initial_texture_params(const GrGLInterface* interface,
|
| + GrGLTexture::IDDesc idDesc) {
|
| + // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
|
| + // drivers have a bug where an FBO won't be complete if it includes a
|
| + // texture that is not mipmap complete (considering the filter in use).
|
| + GrGLTexture::TexParams initialTexParams;
|
| + // we only set a subset here so invalidate first
|
| + initialTexParams.invalidate();
|
| + initialTexParams.fMinFilter = GR_GL_NEAREST;
|
| + initialTexParams.fMagFilter = GR_GL_NEAREST;
|
| + initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
|
| + initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
|
| + GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget,
|
| + GR_GL_TEXTURE_MAG_FILTER,
|
| + initialTexParams.fMagFilter));
|
| + GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget,
|
| + GR_GL_TEXTURE_MIN_FILTER,
|
| + initialTexParams.fMinFilter));
|
| + GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget,
|
| + GR_GL_TEXTURE_WRAP_S,
|
| + initialTexParams.fWrapS));
|
| + GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget,
|
| + GR_GL_TEXTURE_WRAP_T,
|
| + initialTexParams.fWrapT));
|
| + return initialTexParams;
|
| +}
|
| +
|
| GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
|
| GrGpuResource::LifeCycle lifeCycle,
|
| - const void* srcData, size_t rowBytes) {
|
| + const SkTArray<SkMipMapLevel>& texels) {
|
| // We fail if the MSAA was requested and is not available.
|
| if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) {
|
| //SkDebugf("MSAA RT requested but not supported on this platform.");
|
| @@ -1085,13 +1424,7 @@ GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
|
|
|
| bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
|
|
|
| - GrGLTexture::IDDesc idDesc;
|
| - idDesc.fInfo.fID = 0;
|
| - GL_CALL(GenTextures(1, &idDesc.fInfo.fID));
|
| - idDesc.fLifeCycle = lifeCycle;
|
| - // We only support GL_TEXTURE_2D at the moment.
|
| - idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
|
| -
|
| + GrGLTexture::IDDesc idDesc = generate_gl_texture(this->glInterface(), lifeCycle);
|
| if (!idDesc.fInfo.fID) {
|
| return return_null_texture();
|
| }
|
| @@ -1106,31 +1439,12 @@ GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
|
| GR_GL_FRAMEBUFFER_ATTACHMENT));
|
| }
|
|
|
| - // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
|
| - // drivers have a bug where an FBO won't be complete if it includes a
|
| - // texture that is not mipmap complete (considering the filter in use).
|
| - GrGLTexture::TexParams initialTexParams;
|
| - // we only set a subset here so invalidate first
|
| - initialTexParams.invalidate();
|
| - initialTexParams.fMinFilter = GR_GL_NEAREST;
|
| - initialTexParams.fMagFilter = GR_GL_NEAREST;
|
| - initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
|
| - initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_MAG_FILTER,
|
| - initialTexParams.fMagFilter));
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_MIN_FILTER,
|
| - initialTexParams.fMinFilter));
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_WRAP_S,
|
| - initialTexParams.fWrapS));
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_WRAP_T,
|
| - initialTexParams.fWrapT));
|
| + GrGLTexture::TexParams initialTexParams = set_initial_texture_params(this->glInterface(),
|
| + idDesc);
|
| +
|
| if (!this->uploadTexData(desc, idDesc.fInfo.fTarget, kNewTexture_UploadType, 0, 0,
|
| desc.fWidth, desc.fHeight,
|
| - desc.fConfig, srcData, rowBytes)) {
|
| + desc.fConfig, texels)) {
|
| GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
|
| return return_null_texture();
|
| }
|
| @@ -1147,31 +1461,29 @@ GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
|
| }
|
| tex = new GrGLTextureRenderTarget(this, desc, idDesc, rtIDDesc);
|
| } else {
|
| - tex = new GrGLTexture(this, desc, idDesc);
|
| + bool wasMipMapDataProvided = false;
|
| + if (texels.count() > 1) {
|
| + wasMipMapDataProvided = true;
|
| + }
|
| + tex = new GrGLTexture(this, desc, idDesc, wasMipMapDataProvided);
|
| }
|
| tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
|
| #ifdef TRACE_TEXTURE_CREATION
|
| SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n",
|
| - glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
|
| + glTexDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig);
|
| #endif
|
| return tex;
|
| }
|
|
|
| GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc,
|
| GrGpuResource::LifeCycle lifeCycle,
|
| - const void* srcData) {
|
| + const SkTArray<SkMipMapLevel>& texels) {
|
| // Make sure that we're not flipping Y.
|
| if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
|
| return return_null_texture();
|
| }
|
|
|
| - GrGLTexture::IDDesc idDesc;
|
| - idDesc.fInfo.fID = 0;
|
| - GL_CALL(GenTextures(1, &idDesc.fInfo.fID));
|
| - idDesc.fLifeCycle = lifeCycle;
|
| - // We only support GL_TEXTURE_2D at the moment.
|
| - idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
|
| -
|
| + GrGLTexture::IDDesc idDesc = generate_gl_texture(this->glInterface(), lifeCycle);
|
| if (!idDesc.fInfo.fID) {
|
| return return_null_texture();
|
| }
|
| @@ -1179,30 +1491,10 @@ GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc,
|
| this->setScratchTextureUnit();
|
| GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID));
|
|
|
| - // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
|
| - // drivers have a bug where an FBO won't be complete if it includes a
|
| - // texture that is not mipmap complete (considering the filter in use).
|
| - GrGLTexture::TexParams initialTexParams;
|
| - // we only set a subset here so invalidate first
|
| - initialTexParams.invalidate();
|
| - initialTexParams.fMinFilter = GR_GL_NEAREST;
|
| - initialTexParams.fMagFilter = GR_GL_NEAREST;
|
| - initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
|
| - initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_MAG_FILTER,
|
| - initialTexParams.fMagFilter));
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_MIN_FILTER,
|
| - initialTexParams.fMinFilter));
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_WRAP_S,
|
| - initialTexParams.fWrapS));
|
| - GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
|
| - GR_GL_TEXTURE_WRAP_T,
|
| - initialTexParams.fWrapT));
|
| -
|
| - if (!this->uploadCompressedTexData(desc, idDesc.fInfo.fTarget, srcData)) {
|
| + GrGLTexture::TexParams initialTexParams = set_initial_texture_params(this->glInterface(),
|
| + idDesc);
|
| +
|
| + if (!this->uploadCompressedTexData(desc, idDesc.fInfo.fTarget, texels)) {
|
| GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
|
| return return_null_texture();
|
| }
|
| @@ -1212,7 +1504,7 @@ GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc,
|
| tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
|
| #ifdef TRACE_TEXTURE_CREATION
|
| SkDebugf("--- new compressed texture [%d] size=(%d %d) config=%d\n",
|
| - glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
|
| + glTexDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig);
|
| #endif
|
| return tex;
|
| }
|
| @@ -2759,7 +3051,7 @@ void GrGLGpu::bindSurfaceFBOForCopy(GrSurface* surface, GrGLenum fboTarget, GrGL
|
| }
|
|
|
| void GrGLGpu::unbindTextureFBOForCopy(GrGLenum fboTarget, GrSurface* surface) {
|
| - // bindSurfaceFBOForCopy temporarily binds textures that are not render targets to
|
| + // bindSurfaceFBOForCopy temporarily binds textures that are not render targets to
|
| if (!surface->asRenderTarget()) {
|
| SkASSERT(surface->asTexture());
|
| GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target();
|
| @@ -2837,7 +3129,7 @@ bool GrGLGpu::onCopySurface(GrSurface* dst,
|
| this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
|
| return true;
|
| }
|
| -
|
| +
|
| if (can_copy_texsubimage(dst, src, this)) {
|
| this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstPoint);
|
| return true;
|
| @@ -2880,7 +3172,7 @@ void GrGLGpu::createCopyPrograms() {
|
| vshaderTxt.append(";");
|
| vTexCoord.appendDecl(this->glCaps().glslCaps(), &vshaderTxt);
|
| vshaderTxt.append(";");
|
| -
|
| +
|
| vshaderTxt.append(
|
| "// Copy Program VS\n"
|
| "void main() {"
|
| @@ -2919,7 +3211,7 @@ void GrGLGpu::createCopyPrograms() {
|
| fsOutName,
|
| GrGLSLTexture2DFunctionName(kVec2f_GrSLType, this->glslGeneration())
|
| );
|
| -
|
| +
|
| GL_CALL_RET(fCopyPrograms[i].fProgram, CreateProgram());
|
| const char* str;
|
| GrGLint length;
|
|
|