Index: src/gpu/gl/GrGLRenderTarget.cpp |
diff --git a/src/gpu/gl/GrGLRenderTarget.cpp b/src/gpu/gl/GrGLRenderTarget.cpp |
index 0e8bd05bc735b009b0b0b0599b0293a49a02e699..8482ecdc6cf7f53df8e0722495ba49b562349bcb 100644 |
--- a/src/gpu/gl/GrGLRenderTarget.cpp |
+++ b/src/gpu/gl/GrGLRenderTarget.cpp |
@@ -9,47 +9,48 @@ |
#include "GrGpuGL.h" |
-#define GPUGL static_cast<GrGpuGL*>(getGpu()) |
- |
+#define GPUGL static_cast<GrGpuGL*>(this->getGpu()) |
#define GL_CALL(X) GR_GL_CALL(GPUGL->glInterface(), X) |
-void GrGLRenderTarget::init(const GrSurfaceDesc& desc, |
- const IDDesc& idDesc, |
- const GrGLIRect& viewport, |
- GrGLTexID* texID) { |
- fRTFBOID = idDesc.fRTFBOID; |
- fTexFBOID = idDesc.fTexFBOID; |
- fMSColorRenderbufferID = idDesc.fMSColorRenderbufferID; |
- fViewport = viewport; |
- fTexIDObj.reset(SkSafeRef(texID)); |
+// Because this class is virtually derived from GrSurface we must explicitly call its constructor. |
+GrGLRenderTarget::GrGLRenderTarget(GrGpuGL* gpu, const GrSurfaceDesc& desc, const IDDesc& idDesc) |
+ : GrSurface(gpu, idDesc.fIsWrapped, desc) |
+ , INHERITED(gpu, idDesc.fIsWrapped, desc) { |
+ this->init(desc, idDesc); |
this->registerWithCache(); |
} |
-GrGLRenderTarget::GrGLRenderTarget(GrGpuGL* gpu, |
- const IDDesc& idDesc, |
- const GrGLIRect& viewport, |
- GrGLTexID* texID, |
- GrGLTexture* texture) |
- : INHERITED(gpu, idDesc.fIsWrapped, texture, texture->desc()) { |
- SkASSERT(texID); |
- SkASSERT(texture); |
- // FBO 0 can't also be a texture, right? |
- SkASSERT(0 != idDesc.fRTFBOID); |
- SkASSERT(0 != idDesc.fTexFBOID); |
+GrGLRenderTarget::GrGLRenderTarget(GrGpuGL* gpu, const GrSurfaceDesc& desc, const IDDesc& idDesc, |
+ Derived) |
+ : GrSurface(gpu, idDesc.fIsWrapped, desc) |
+ , INHERITED(gpu, idDesc.fIsWrapped, desc) { |
+ this->init(desc, idDesc); |
+} |
+ |
+void GrGLRenderTarget::init(const GrSurfaceDesc& desc, const IDDesc& idDesc) { |
+ fRTFBOID = idDesc.fRTFBOID; |
+ fTexFBOID = idDesc.fTexFBOID; |
+ fMSColorRenderbufferID = idDesc.fMSColorRenderbufferID; |
- // we assume this is true, TODO: get rid of viewport as a param. |
- SkASSERT(viewport.fWidth == texture->width()); |
- SkASSERT(viewport.fHeight == texture->height()); |
+ fViewport.fLeft = 0; |
+ fViewport.fBottom = 0; |
+ fViewport.fWidth = desc.fWidth; |
+ fViewport.fHeight = desc.fHeight; |
- this->init(texture->desc(), idDesc, viewport, texID); |
+ // We own one color value for each MSAA sample. |
+ fColorValuesPerPixel = SkTMax(1, fDesc.fSampleCnt); |
+ if (fTexFBOID != fRTFBOID) { |
+ // If we own the resolve buffer then that is one more sample per pixel. |
+ fColorValuesPerPixel += 1; |
+ } |
} |
-GrGLRenderTarget::GrGLRenderTarget(GrGpuGL* gpu, |
- const GrSurfaceDesc& desc, |
- const IDDesc& idDesc, |
- const GrGLIRect& viewport) |
- : INHERITED(gpu, idDesc.fIsWrapped, NULL, desc) { |
- this->init(desc, idDesc, viewport, NULL); |
+size_t GrGLRenderTarget::gpuMemorySize() const { |
+ SkASSERT(kUnknown_GrPixelConfig != fDesc.fConfig); |
+ SkASSERT(!GrPixelConfigIsCompressed(fDesc.fConfig)); |
+ size_t colorBytes = GrBytesPerPixel(fDesc.fConfig); |
+ SkASSERT(colorBytes > 0); |
+ return fColorValuesPerPixel * fDesc.fWidth * fDesc.fHeight * colorBytes; |
} |
void GrGLRenderTarget::onRelease() { |
@@ -67,7 +68,6 @@ void GrGLRenderTarget::onRelease() { |
fRTFBOID = 0; |
fTexFBOID = 0; |
fMSColorRenderbufferID = 0; |
- fTexIDObj.reset(NULL); |
INHERITED::onRelease(); |
} |
@@ -75,9 +75,5 @@ void GrGLRenderTarget::onAbandon() { |
fRTFBOID = 0; |
fTexFBOID = 0; |
fMSColorRenderbufferID = 0; |
- if (fTexIDObj.get()) { |
- fTexIDObj->abandon(); |
- fTexIDObj.reset(NULL); |
- } |
INHERITED::onAbandon(); |
} |