Index: src/gpu/gl/GrGLGpu.cpp |
diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp |
index fe803de0a903bfb367116fc5fce42d6ef243a70e..dc50c1429ce44ea940c65a31b85693b1b45d6010 100644 |
--- a/src/gpu/gl/GrGLGpu.cpp |
+++ b/src/gpu/gl/GrGLGpu.cpp |
@@ -926,7 +926,7 @@ |
* @param succeeded Set to true if allocating and populating the texture completed |
* without error. |
*/ |
-static bool allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc, |
+static void allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc, |
const GrGLInterface& interface, |
const GrGLCaps& caps, |
GrGLenum target, |
@@ -934,7 +934,8 @@ |
GrGLenum externalFormat, |
GrGLenum externalType, |
const SkTArray<GrMipLevel>& texels, |
- int baseWidth, int baseHeight) { |
+ int baseWidth, int baseHeight, |
+ bool* succeeded) { |
CLEAR_ERROR_BEFORE_ALLOC(&interface); |
bool useTexStorage = caps.isConfigTexSupportEnabled(desc.fConfig); |
@@ -954,7 +955,7 @@ |
desc.fWidth, desc.fHeight)); |
GrGLenum error = check_alloc_error(desc, &interface); |
if (error != GR_GL_NO_ERROR) { |
- return false; |
+ *succeeded = false; |
} else { |
for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) { |
const void* currentMipData = texels[currentMipLevel].fPixels; |
@@ -975,48 +976,33 @@ |
externalFormat, externalType, |
currentMipData)); |
} |
- return true; |
+ *succeeded = true; |
} |
} else { |
- if (texels.empty()) { |
+ *succeeded = true; |
+ for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) { |
+ int twoToTheMipLevel = 1 << currentMipLevel; |
+ int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); |
+ int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); |
+ const void* currentMipData = texels[currentMipLevel].fPixels; |
+ // Even if curremtMipData is nullptr, continue to call TexImage2D. |
+ // This will allocate texture memory which we can later populate. |
GL_ALLOC_CALL(&interface, |
TexImage2D(target, |
- 0, |
+ currentMipLevel, |
internalFormat, |
- baseWidth, |
- baseHeight, |
+ currentWidth, |
+ currentHeight, |
0, // border |
externalFormat, externalType, |
- nullptr)); |
+ currentMipData)); |
GrGLenum error = check_alloc_error(desc, &interface); |
if (error != GR_GL_NO_ERROR) { |
- return false; |
- } |
- } else { |
- for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) { |
- int twoToTheMipLevel = 1 << currentMipLevel; |
- int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel); |
- int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel); |
- const void* currentMipData = texels[currentMipLevel].fPixels; |
- // Even if curremtMipData is nullptr, continue to call TexImage2D. |
- // This will allocate texture memory which we can later populate. |
- GL_ALLOC_CALL(&interface, |
- TexImage2D(target, |
- currentMipLevel, |
- internalFormat, |
- currentWidth, |
- currentHeight, |
- 0, // border |
- externalFormat, externalType, |
- currentMipData)); |
- GrGLenum error = check_alloc_error(desc, &interface); |
- if (error != GR_GL_NO_ERROR) { |
- return false; |
- } |
- } |
- } |
- } |
- return true; |
+ *succeeded = false; |
+ break; |
+ } |
+ } |
+ } |
} |
/** |
@@ -1150,7 +1136,8 @@ |
for (int currentMipLevel = texelsShallowCopy.count() - 1; currentMipLevel >= 0; |
currentMipLevel--) { |
- SkASSERT(texelsShallowCopy[currentMipLevel].fPixels || kTransfer_UploadType == uploadType); |
+ SkASSERT(texelsShallowCopy[currentMipLevel].fPixels || |
+ kNewTexture_UploadType == uploadType || kTransfer_UploadType == uploadType); |
} |
const GrGLInterface* interface = this->glInterface(); |
@@ -1166,6 +1153,10 @@ |
int twoToTheMipLevel = 1 << currentMipLevel; |
int currentWidth = SkTMax(1, width / twoToTheMipLevel); |
int currentHeight = SkTMax(1, height / twoToTheMipLevel); |
+ |
+ if (texelsShallowCopy[currentMipLevel].fPixels == nullptr) { |
+ continue; |
+ } |
if (currentHeight > SK_MaxS32 || |
currentWidth > SK_MaxS32) { |
@@ -1202,7 +1193,7 @@ |
bool swFlipY = false; |
bool glFlipY = false; |
- if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin && !texelsShallowCopy.empty()) { |
+ if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { |
if (caps.unpackFlipYSupport()) { |
glFlipY = true; |
} else { |
@@ -1228,6 +1219,10 @@ |
char* buffer = (char*)tempStorage.reset(combined_buffer_size); |
for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) { |
+ if (texelsShallowCopy[currentMipLevel].fPixels == nullptr) { |
+ continue; |
+ } |
+ |
int twoToTheMipLevel = 1 << currentMipLevel; |
int currentWidth = SkTMax(1, width / twoToTheMipLevel); |
int currentHeight = SkTMax(1, height / twoToTheMipLevel); |
@@ -1274,9 +1269,6 @@ |
} else { |
return false; |
} |
- } |
- |
- if (!texelsShallowCopy.empty()) { |
if (glFlipY) { |
GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE)); |
} |
@@ -1289,10 +1281,10 @@ |
0 == left && 0 == top && |
desc.fWidth == width && desc.fHeight == height && |
!desc.fTextureStorageAllocator.fAllocateTextureStorage) { |
- succeeded = allocate_and_populate_uncompressed_texture(desc, *interface, caps, target, |
- internalFormat, externalFormat, |
- externalType, texelsShallowCopy, |
- width, height); |
+ allocate_and_populate_uncompressed_texture(desc, *interface, caps, target, |
+ internalFormat, externalFormat, |
+ externalType, texelsShallowCopy, |
+ width, height, &succeeded); |
} else { |
if (swFlipY || glFlipY) { |
top = desc.fHeight - (top + height); |
@@ -1302,6 +1294,9 @@ |
int twoToTheMipLevel = 1 << currentMipLevel; |
int currentWidth = SkTMax(1, width / twoToTheMipLevel); |
int currentHeight = SkTMax(1, height / twoToTheMipLevel); |
+ if (texelsShallowCopy[currentMipLevel].fPixels == nullptr) { |
+ continue; |
+ } |
GL_CALL(TexSubImage2D(target, |
currentMipLevel, |
@@ -1329,6 +1324,8 @@ |
UploadType uploadType, |
int left, int top, int width, int height) { |
SkASSERT(this->caps()->isConfigTexturable(desc.fConfig)); |
+ SkASSERT(kTransfer_UploadType != uploadType && |
+ (texels[0].fPixels || kNewTexture_UploadType != uploadType)); |
// No support for software flip y, yet... |
SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin); |
@@ -1369,7 +1366,9 @@ |
return false; |
} |
for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) { |
- SkASSERT(texels[currentMipLevel].fPixels || kTransfer_UploadType == uploadType); |
+ if (texels[currentMipLevel].fPixels == nullptr) { |
+ continue; |
+ } |
int twoToTheMipLevel = 1 << currentMipLevel; |
int currentWidth = SkTMax(1, width / twoToTheMipLevel); |
@@ -1848,15 +1847,14 @@ |
// We do not make SkTArray available outside of Skia, |
// and so we do not want to allow mipmaps to external |
// allocators just yet. |
- SkASSERT(texels.count() < 2); |
- |
- const void* pixels = nullptr; |
- if (!texels.empty()) { |
- pixels = texels.begin()->fPixels; |
- } |
+ SkASSERT(texels.count() == 1); |
+ SkSTArray<1, GrMipLevel> texelsShallowCopy(1); |
+ texelsShallowCopy.push_back(texels[0]); |
+ |
switch (desc.fTextureStorageAllocator.fAllocateTextureStorage( |
desc.fTextureStorageAllocator.fCtx, reinterpret_cast<GrBackendObject>(info), |
- desc.fWidth, desc.fHeight, desc.fConfig, pixels, desc.fOrigin)) { |
+ desc.fWidth, desc.fHeight, desc.fConfig, texelsShallowCopy[0].fPixels, |
+ desc.fOrigin)) { |
case GrTextureStorageAllocator::Result::kSucceededAndUploaded: |
return true; |
case GrTextureStorageAllocator::Result::kFailed: |
@@ -1867,7 +1865,7 @@ |
if (!this->uploadTexData(desc, info->fTarget, kNewTexture_UploadType, 0, 0, |
desc.fWidth, desc.fHeight, |
- desc.fConfig, texels)) { |
+ desc.fConfig, texelsShallowCopy)) { |
desc.fTextureStorageAllocator.fDeallocateTextureStorage( |
desc.fTextureStorageAllocator.fCtx, reinterpret_cast<GrBackendObject>(info)); |
return false; |
@@ -2481,8 +2479,7 @@ |
desc.fConfig = rtConfig; |
desc.fWidth = desc.fHeight = 16; |
desc.fFlags = kRenderTarget_GrSurfaceFlag; |
- SkAutoTUnref<GrTexture> temp(this->createTexture(desc, |
- SkBudgeted::kNo)); |
+ SkAutoTUnref<GrTexture> temp(this->createTexture(desc, SkBudgeted::kNo, nullptr, 0)); |
if (!temp) { |
return false; |
} |