| OLD | NEW |
| (Empty) |
| 1 /* | |
| 2 * Copyright 2011 Google Inc. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license that can be | |
| 5 * found in the LICENSE file. | |
| 6 */ | |
| 7 | |
| 8 | |
| 9 #include "GrGpuGL.h" | |
| 10 #include "GrGLStencilBuffer.h" | |
| 11 #include "GrGLTextureRenderTarget.h" | |
| 12 #include "GrGpuResourceCacheAccess.h" | |
| 13 #include "GrOptDrawState.h" | |
| 14 #include "GrSurfacePriv.h" | |
| 15 #include "GrTemplates.h" | |
| 16 #include "GrTexturePriv.h" | |
| 17 #include "GrTypes.h" | |
| 18 #include "SkStrokeRec.h" | |
| 19 #include "SkTemplates.h" | |
| 20 | |
| 21 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X) | |
| 22 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X) | |
| 23 | |
| 24 #define SKIP_CACHE_CHECK true | |
| 25 | |
| 26 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR | |
| 27 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface) | |
| 28 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call) | |
| 29 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface) | |
| 30 #else | |
| 31 #define CLEAR_ERROR_BEFORE_ALLOC(iface) | |
| 32 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) | |
| 33 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR | |
| 34 #endif | |
| 35 | |
| 36 | |
| 37 /////////////////////////////////////////////////////////////////////////////// | |
| 38 | |
| 39 | |
| 40 static const GrGLenum gXfermodeCoeff2Blend[] = { | |
| 41 GR_GL_ZERO, | |
| 42 GR_GL_ONE, | |
| 43 GR_GL_SRC_COLOR, | |
| 44 GR_GL_ONE_MINUS_SRC_COLOR, | |
| 45 GR_GL_DST_COLOR, | |
| 46 GR_GL_ONE_MINUS_DST_COLOR, | |
| 47 GR_GL_SRC_ALPHA, | |
| 48 GR_GL_ONE_MINUS_SRC_ALPHA, | |
| 49 GR_GL_DST_ALPHA, | |
| 50 GR_GL_ONE_MINUS_DST_ALPHA, | |
| 51 GR_GL_CONSTANT_COLOR, | |
| 52 GR_GL_ONE_MINUS_CONSTANT_COLOR, | |
| 53 GR_GL_CONSTANT_ALPHA, | |
| 54 GR_GL_ONE_MINUS_CONSTANT_ALPHA, | |
| 55 | |
| 56 // extended blend coeffs | |
| 57 GR_GL_SRC1_COLOR, | |
| 58 GR_GL_ONE_MINUS_SRC1_COLOR, | |
| 59 GR_GL_SRC1_ALPHA, | |
| 60 GR_GL_ONE_MINUS_SRC1_ALPHA, | |
| 61 }; | |
| 62 | |
| 63 bool GrGLGpu::BlendCoeffReferencesConstant(GrBlendCoeff coeff) { | |
| 64 static const bool gCoeffReferencesBlendConst[] = { | |
| 65 false, | |
| 66 false, | |
| 67 false, | |
| 68 false, | |
| 69 false, | |
| 70 false, | |
| 71 false, | |
| 72 false, | |
| 73 false, | |
| 74 false, | |
| 75 true, | |
| 76 true, | |
| 77 true, | |
| 78 true, | |
| 79 | |
| 80 // extended blend coeffs | |
| 81 false, | |
| 82 false, | |
| 83 false, | |
| 84 false, | |
| 85 }; | |
| 86 return gCoeffReferencesBlendConst[coeff]; | |
| 87 GR_STATIC_ASSERT(kTotalGrBlendCoeffCount == | |
| 88 SK_ARRAY_COUNT(gCoeffReferencesBlendConst)); | |
| 89 | |
| 90 GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff); | |
| 91 GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff); | |
| 92 GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff); | |
| 93 GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff); | |
| 94 GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff); | |
| 95 GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff); | |
| 96 GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff); | |
| 97 GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff); | |
| 98 GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff); | |
| 99 GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff); | |
| 100 GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff); | |
| 101 GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff); | |
| 102 GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff); | |
| 103 GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff); | |
| 104 | |
| 105 GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff); | |
| 106 GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff); | |
| 107 GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff); | |
| 108 GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff); | |
| 109 | |
| 110 // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope | |
| 111 GR_STATIC_ASSERT(kTotalGrBlendCoeffCount == | |
| 112 SK_ARRAY_COUNT(gXfermodeCoeff2Blend)); | |
| 113 } | |
| 114 | |
| 115 /////////////////////////////////////////////////////////////////////////////// | |
| 116 | |
| 117 static bool gPrintStartupSpew; | |
| 118 | |
| 119 GrGLGpu::GrGLGpu(const GrGLContext& ctx, GrContext* context) | |
| 120 : GrGpu(context) | |
| 121 , fGLContext(ctx) { | |
| 122 | |
| 123 SkASSERT(ctx.isInitialized()); | |
| 124 fCaps.reset(SkRef(ctx.caps())); | |
| 125 | |
| 126 fHWBoundTextureUniqueIDs.reset(this->glCaps().maxFragmentTextureUnits()); | |
| 127 | |
| 128 GrGLClearErr(fGLContext.interface()); | |
| 129 if (gPrintStartupSpew) { | |
| 130 const GrGLubyte* vendor; | |
| 131 const GrGLubyte* renderer; | |
| 132 const GrGLubyte* version; | |
| 133 GL_CALL_RET(vendor, GetString(GR_GL_VENDOR)); | |
| 134 GL_CALL_RET(renderer, GetString(GR_GL_RENDERER)); | |
| 135 GL_CALL_RET(version, GetString(GR_GL_VERSION)); | |
| 136 SkDebugf("------------------------- create GrGLGpu %p --------------\n", | |
| 137 this); | |
| 138 SkDebugf("------ VENDOR %s\n", vendor); | |
| 139 SkDebugf("------ RENDERER %s\n", renderer); | |
| 140 SkDebugf("------ VERSION %s\n", version); | |
| 141 SkDebugf("------ EXTENSIONS\n"); | |
| 142 ctx.extensions().print(); | |
| 143 SkDebugf("\n"); | |
| 144 SkDebugf(this->glCaps().dump().c_str()); | |
| 145 } | |
| 146 | |
| 147 fProgramCache = SkNEW_ARGS(ProgramCache, (this)); | |
| 148 | |
| 149 SkASSERT(this->glCaps().maxVertexAttributes() >= GrGeometryProcessor::kMaxVe
rtexAttribs); | |
| 150 | |
| 151 fLastSuccessfulStencilFmtIdx = 0; | |
| 152 fHWProgramID = 0; | |
| 153 | |
| 154 if (this->glCaps().pathRenderingSupport()) { | |
| 155 fPathRendering.reset(new GrGLPathRendering(this)); | |
| 156 } | |
| 157 } | |
| 158 | |
| 159 GrGLGpu::~GrGLGpu() { | |
| 160 if (0 != fHWProgramID) { | |
| 161 // detach the current program so there is no confusion on OpenGL's part | |
| 162 // that we want it to be deleted | |
| 163 SkASSERT(fHWProgramID == fCurrentProgram->programID()); | |
| 164 GL_CALL(UseProgram(0)); | |
| 165 } | |
| 166 | |
| 167 delete fProgramCache; | |
| 168 } | |
| 169 | |
| 170 void GrGLGpu::contextAbandoned() { | |
| 171 INHERITED::contextAbandoned(); | |
| 172 fProgramCache->abandon(); | |
| 173 fHWProgramID = 0; | |
| 174 if (this->glCaps().pathRenderingSupport()) { | |
| 175 this->glPathRendering()->abandonGpuResources(); | |
| 176 } | |
| 177 } | |
| 178 | |
| 179 /////////////////////////////////////////////////////////////////////////////// | |
| 180 GrPixelConfig GrGLGpu::preferredReadPixelsConfig(GrPixelConfig readConfig, | |
| 181 GrPixelConfig surfaceConfig) co
nst { | |
| 182 if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == readConfig
) { | |
| 183 return kBGRA_8888_GrPixelConfig; | |
| 184 } else if (this->glContext().isMesa() && | |
| 185 GrBytesPerPixel(readConfig) == 4 && | |
| 186 GrPixelConfigSwapRAndB(readConfig) == surfaceConfig) { | |
| 187 // Mesa 3D takes a slow path on when reading back BGRA from an RGBA sur
face and vice-versa. | |
| 188 // Perhaps this should be guarded by some compiletime or runtime check. | |
| 189 return surfaceConfig; | |
| 190 } else if (readConfig == kBGRA_8888_GrPixelConfig | |
| 191 && !this->glCaps().readPixelsSupported( | |
| 192 this->glInterface(), | |
| 193 GR_GL_BGRA, | |
| 194 GR_GL_UNSIGNED_BYTE, | |
| 195 surfaceConfig | |
| 196 )) { | |
| 197 return kRGBA_8888_GrPixelConfig; | |
| 198 } else { | |
| 199 return readConfig; | |
| 200 } | |
| 201 } | |
| 202 | |
| 203 GrPixelConfig GrGLGpu::preferredWritePixelsConfig(GrPixelConfig writeConfig, | |
| 204 GrPixelConfig surfaceConfig) c
onst { | |
| 205 if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == writeConfi
g) { | |
| 206 return kBGRA_8888_GrPixelConfig; | |
| 207 } else { | |
| 208 return writeConfig; | |
| 209 } | |
| 210 } | |
| 211 | |
| 212 bool GrGLGpu::canWriteTexturePixels(const GrTexture* texture, GrPixelConfig srcC
onfig) const { | |
| 213 if (kIndex_8_GrPixelConfig == srcConfig || kIndex_8_GrPixelConfig == texture
->config()) { | |
| 214 return false; | |
| 215 } | |
| 216 if (srcConfig != texture->config() && kGLES_GrGLStandard == this->glStandard
()) { | |
| 217 // In general ES2 requires the internal format of the texture and the fo
rmat of the src | |
| 218 // pixels to match. However, It may or may not be possible to upload BGR
A data to a RGBA | |
| 219 // texture. It depends upon which extension added BGRA. The Apple extens
ion allows it | |
| 220 // (BGRA's internal format is RGBA) while the EXT extension does not (BG
RA is its own | |
| 221 // internal format). | |
| 222 if (this->glCaps().isConfigTexturable(kBGRA_8888_GrPixelConfig) && | |
| 223 !this->glCaps().bgraIsInternalFormat() && | |
| 224 kBGRA_8888_GrPixelConfig == srcConfig && | |
| 225 kRGBA_8888_GrPixelConfig == texture->config()) { | |
| 226 return true; | |
| 227 } else { | |
| 228 return false; | |
| 229 } | |
| 230 } else { | |
| 231 return true; | |
| 232 } | |
| 233 } | |
| 234 | |
| 235 bool GrGLGpu::fullReadPixelsIsFasterThanPartial() const { | |
| 236 return SkToBool(GR_GL_FULL_READPIXELS_FASTER_THAN_PARTIAL); | |
| 237 } | |
| 238 | |
| 239 void GrGLGpu::onResetContext(uint32_t resetBits) { | |
| 240 // we don't use the zb at all | |
| 241 if (resetBits & kMisc_GrGLBackendState) { | |
| 242 GL_CALL(Disable(GR_GL_DEPTH_TEST)); | |
| 243 GL_CALL(DepthMask(GR_GL_FALSE)); | |
| 244 | |
| 245 fHWDrawFace = GrDrawState::kInvalid_DrawFace; | |
| 246 fHWDitherEnabled = kUnknown_TriState; | |
| 247 | |
| 248 if (kGL_GrGLStandard == this->glStandard()) { | |
| 249 // Desktop-only state that we never change | |
| 250 if (!this->glCaps().isCoreProfile()) { | |
| 251 GL_CALL(Disable(GR_GL_POINT_SMOOTH)); | |
| 252 GL_CALL(Disable(GR_GL_LINE_SMOOTH)); | |
| 253 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH)); | |
| 254 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE)); | |
| 255 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP)); | |
| 256 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP)); | |
| 257 } | |
| 258 // The windows NVIDIA driver has GL_ARB_imaging in the extension str
ing when using a | |
| 259 // core profile. This seems like a bug since the core spec removes a
ny mention of | |
| 260 // GL_ARB_imaging. | |
| 261 if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile
()) { | |
| 262 GL_CALL(Disable(GR_GL_COLOR_TABLE)); | |
| 263 } | |
| 264 GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL)); | |
| 265 // Since ES doesn't support glPointSize at all we always use the VS
to | |
| 266 // set the point size | |
| 267 GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE)); | |
| 268 | |
| 269 // We should set glPolygonMode(FRONT_AND_BACK,FILL) here, too. It is
n't | |
| 270 // currently part of our gl interface. There are probably others as | |
| 271 // well. | |
| 272 } | |
| 273 | |
| 274 if (kGLES_GrGLStandard == this->glStandard() && | |
| 275 fGLContext.hasExtension("GL_ARM_shader_framebuffer_fetch")) { | |
| 276 // The arm extension requires specifically enabling MSAA fetching pe
r sample. | |
| 277 // On some devices this may have a perf hit. Also multiple render t
argets are disabled | |
| 278 GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE_ARM)); | |
| 279 } | |
| 280 fHWWriteToColor = kUnknown_TriState; | |
| 281 // we only ever use lines in hairline mode | |
| 282 GL_CALL(LineWidth(1)); | |
| 283 } | |
| 284 | |
| 285 if (resetBits & kMSAAEnable_GrGLBackendState) { | |
| 286 fMSAAEnabled = kUnknown_TriState; | |
| 287 } | |
| 288 | |
| 289 fHWActiveTextureUnitIdx = -1; // invalid | |
| 290 | |
| 291 if (resetBits & kTextureBinding_GrGLBackendState) { | |
| 292 for (int s = 0; s < fHWBoundTextureUniqueIDs.count(); ++s) { | |
| 293 fHWBoundTextureUniqueIDs[s] = SK_InvalidUniqueID; | |
| 294 } | |
| 295 } | |
| 296 | |
| 297 if (resetBits & kBlend_GrGLBackendState) { | |
| 298 fHWBlendState.invalidate(); | |
| 299 } | |
| 300 | |
| 301 if (resetBits & kView_GrGLBackendState) { | |
| 302 fHWScissorSettings.invalidate(); | |
| 303 fHWViewport.invalidate(); | |
| 304 } | |
| 305 | |
| 306 if (resetBits & kStencil_GrGLBackendState) { | |
| 307 fHWStencilSettings.invalidate(); | |
| 308 fHWStencilTestEnabled = kUnknown_TriState; | |
| 309 } | |
| 310 | |
| 311 // Vertex | |
| 312 if (resetBits & kVertex_GrGLBackendState) { | |
| 313 fHWGeometryState.invalidate(); | |
| 314 } | |
| 315 | |
| 316 if (resetBits & kRenderTarget_GrGLBackendState) { | |
| 317 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; | |
| 318 } | |
| 319 | |
| 320 if (resetBits & kPathRendering_GrGLBackendState) { | |
| 321 if (this->caps()->pathRenderingSupport()) { | |
| 322 this->glPathRendering()->resetContext(); | |
| 323 } | |
| 324 } | |
| 325 | |
| 326 // we assume these values | |
| 327 if (resetBits & kPixelStore_GrGLBackendState) { | |
| 328 if (this->glCaps().unpackRowLengthSupport()) { | |
| 329 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); | |
| 330 } | |
| 331 if (this->glCaps().packRowLengthSupport()) { | |
| 332 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); | |
| 333 } | |
| 334 if (this->glCaps().unpackFlipYSupport()) { | |
| 335 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); | |
| 336 } | |
| 337 if (this->glCaps().packFlipYSupport()) { | |
| 338 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE)); | |
| 339 } | |
| 340 } | |
| 341 | |
| 342 if (resetBits & kProgram_GrGLBackendState) { | |
| 343 fHWProgramID = 0; | |
| 344 } | |
| 345 } | |
| 346 | |
| 347 namespace { | |
| 348 | |
| 349 GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) { | |
| 350 // By default, GrRenderTargets are GL's normal orientation so that they | |
| 351 // can be drawn to by the outside world without the client having | |
| 352 // to render upside down. | |
| 353 if (kDefault_GrSurfaceOrigin == origin) { | |
| 354 return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOr
igin; | |
| 355 } else { | |
| 356 return origin; | |
| 357 } | |
| 358 } | |
| 359 | |
| 360 } | |
| 361 | |
| 362 GrTexture* GrGLGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc) { | |
| 363 if (!this->configToGLFormats(desc.fConfig, false, NULL, NULL, NULL)) { | |
| 364 return NULL; | |
| 365 } | |
| 366 | |
| 367 if (0 == desc.fTextureHandle) { | |
| 368 return NULL; | |
| 369 } | |
| 370 | |
| 371 int maxSize = this->caps()->maxTextureSize(); | |
| 372 if (desc.fWidth > maxSize || desc.fHeight > maxSize) { | |
| 373 return NULL; | |
| 374 } | |
| 375 | |
| 376 GrGLTexture::IDDesc idDesc; | |
| 377 GrSurfaceDesc surfDesc; | |
| 378 | |
| 379 idDesc.fTextureID = static_cast<GrGLuint>(desc.fTextureHandle); | |
| 380 idDesc.fIsWrapped = true; | |
| 381 | |
| 382 // next line relies on GrBackendTextureDesc's flags matching GrTexture's | |
| 383 surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags; | |
| 384 surfDesc.fWidth = desc.fWidth; | |
| 385 surfDesc.fHeight = desc.fHeight; | |
| 386 surfDesc.fConfig = desc.fConfig; | |
| 387 surfDesc.fSampleCnt = desc.fSampleCnt; | |
| 388 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFla
g); | |
| 389 // FIXME: this should be calling resolve_origin(), but Chrome code is curre
ntly | |
| 390 // assuming the old behaviour, which is that backend textures are always | |
| 391 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to: | |
| 392 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); | |
| 393 if (kDefault_GrSurfaceOrigin == desc.fOrigin) { | |
| 394 surfDesc.fOrigin = kBottomLeft_GrSurfaceOrigin; | |
| 395 } else { | |
| 396 surfDesc.fOrigin = desc.fOrigin; | |
| 397 } | |
| 398 | |
| 399 GrGLTexture* texture = NULL; | |
| 400 if (renderTarget) { | |
| 401 GrGLRenderTarget::IDDesc rtIDDesc; | |
| 402 if (!this->createRenderTargetObjects(surfDesc, idDesc.fTextureID, &rtIDD
esc)) { | |
| 403 return NULL; | |
| 404 } | |
| 405 texture = SkNEW_ARGS(GrGLTextureRenderTarget, (this, surfDesc, idDesc, r
tIDDesc)); | |
| 406 } else { | |
| 407 texture = SkNEW_ARGS(GrGLTexture, (this, surfDesc, idDesc)); | |
| 408 } | |
| 409 if (NULL == texture) { | |
| 410 return NULL; | |
| 411 } | |
| 412 | |
| 413 return texture; | |
| 414 } | |
| 415 | |
| 416 GrRenderTarget* GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe
sc& wrapDesc) { | |
| 417 GrGLRenderTarget::IDDesc idDesc; | |
| 418 idDesc.fRTFBOID = static_cast<GrGLuint>(wrapDesc.fRenderTargetHandle); | |
| 419 idDesc.fMSColorRenderbufferID = 0; | |
| 420 idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; | |
| 421 idDesc.fIsWrapped = true; | |
| 422 | |
| 423 GrSurfaceDesc desc; | |
| 424 desc.fConfig = wrapDesc.fConfig; | |
| 425 desc.fFlags = kCheckAllocation_GrSurfaceFlag; | |
| 426 desc.fWidth = wrapDesc.fWidth; | |
| 427 desc.fHeight = wrapDesc.fHeight; | |
| 428 desc.fSampleCnt = wrapDesc.fSampleCnt; | |
| 429 desc.fOrigin = resolve_origin(wrapDesc.fOrigin, true); | |
| 430 | |
| 431 GrRenderTarget* tgt = SkNEW_ARGS(GrGLRenderTarget, (this, desc, idDesc)); | |
| 432 if (wrapDesc.fStencilBits) { | |
| 433 GrGLStencilBuffer::Format format; | |
| 434 format.fInternalFormat = GrGLStencilBuffer::kUnknownInternalFormat; | |
| 435 format.fPacked = false; | |
| 436 format.fStencilBits = wrapDesc.fStencilBits; | |
| 437 format.fTotalBits = wrapDesc.fStencilBits; | |
| 438 static const bool kIsSBWrapped = false; | |
| 439 GrGLStencilBuffer* sb = SkNEW_ARGS(GrGLStencilBuffer, | |
| 440 (this, | |
| 441 kIsSBWrapped, | |
| 442 0, | |
| 443 desc.fWidth, | |
| 444 desc.fHeight, | |
| 445 desc.fSampleCnt, | |
| 446 format)); | |
| 447 tgt->setStencilBuffer(sb); | |
| 448 sb->unref(); | |
| 449 } | |
| 450 return tgt; | |
| 451 } | |
| 452 | |
| 453 //////////////////////////////////////////////////////////////////////////////// | |
| 454 | |
| 455 bool GrGLGpu::onWriteTexturePixels(GrTexture* texture, | |
| 456 int left, int top, int width, int height, | |
| 457 GrPixelConfig config, const void* buffer, | |
| 458 size_t rowBytes) { | |
| 459 if (NULL == buffer) { | |
| 460 return false; | |
| 461 } | |
| 462 GrGLTexture* glTex = static_cast<GrGLTexture*>(texture); | |
| 463 | |
| 464 this->setScratchTextureUnit(); | |
| 465 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTex->textureID())); | |
| 466 | |
| 467 bool success = false; | |
| 468 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { | |
| 469 // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixel
s() | |
| 470 SkASSERT(config == glTex->desc().fConfig); | |
| 471 success = this->uploadCompressedTexData(glTex->desc(), buffer, false, le
ft, top, width, | |
| 472 height); | |
| 473 } else { | |
| 474 success = this->uploadTexData(glTex->desc(), false, left, top, width, he
ight, config, | |
| 475 buffer, rowBytes); | |
| 476 } | |
| 477 | |
| 478 if (success) { | |
| 479 texture->texturePriv().dirtyMipMaps(true); | |
| 480 return true; | |
| 481 } | |
| 482 | |
| 483 return false; | |
| 484 } | |
| 485 | |
| 486 static bool adjust_pixel_ops_params(int surfaceWidth, | |
| 487 int surfaceHeight, | |
| 488 size_t bpp, | |
| 489 int* left, int* top, int* width, int* height
, | |
| 490 const void** data, | |
| 491 size_t* rowBytes) { | |
| 492 if (!*rowBytes) { | |
| 493 *rowBytes = *width * bpp; | |
| 494 } | |
| 495 | |
| 496 SkIRect subRect = SkIRect::MakeXYWH(*left, *top, *width, *height); | |
| 497 SkIRect bounds = SkIRect::MakeWH(surfaceWidth, surfaceHeight); | |
| 498 | |
| 499 if (!subRect.intersect(bounds)) { | |
| 500 return false; | |
| 501 } | |
| 502 *data = reinterpret_cast<const void*>(reinterpret_cast<intptr_t>(*data) + | |
| 503 (subRect.fTop - *top) * *rowBytes + (subRect.fLeft - *left) * bpp); | |
| 504 | |
| 505 *left = subRect.fLeft; | |
| 506 *top = subRect.fTop; | |
| 507 *width = subRect.width(); | |
| 508 *height = subRect.height(); | |
| 509 return true; | |
| 510 } | |
| 511 | |
| 512 static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc, | |
| 513 const GrGLInterface* interface) { | |
| 514 if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) { | |
| 515 return GR_GL_GET_ERROR(interface); | |
| 516 } else { | |
| 517 return CHECK_ALLOC_ERROR(interface); | |
| 518 } | |
| 519 } | |
| 520 | |
| 521 bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc, | |
| 522 bool isNewTexture, | |
| 523 int left, int top, int width, int height, | |
| 524 GrPixelConfig dataConfig, | |
| 525 const void* data, | |
| 526 size_t rowBytes) { | |
| 527 SkASSERT(data || isNewTexture); | |
| 528 | |
| 529 // If we're uploading compressed data then we should be using uploadCompress
edTexData | |
| 530 SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); | |
| 531 | |
| 532 size_t bpp = GrBytesPerPixel(dataConfig); | |
| 533 if (!adjust_pixel_ops_params(desc.fWidth, desc.fHeight, bpp, &left, &top, | |
| 534 &width, &height, &data, &rowBytes)) { | |
| 535 return false; | |
| 536 } | |
| 537 size_t trimRowBytes = width * bpp; | |
| 538 | |
| 539 // in case we need a temporary, trimmed copy of the src pixels | |
| 540 GrAutoMalloc<128 * 128> tempStorage; | |
| 541 | |
| 542 // We currently lazily create MIPMAPs when the we see a draw with | |
| 543 // GrTextureParams::kMipMap_FilterMode. Using texture storage requires that
the | |
| 544 // MIP levels are all created when the texture is created. So for now we don
't use | |
| 545 // texture storage. | |
| 546 bool useTexStorage = false && | |
| 547 isNewTexture && | |
| 548 this->glCaps().texStorageSupport(); | |
| 549 | |
| 550 if (useTexStorage && kGL_GrGLStandard == this->glStandard()) { | |
| 551 // 565 is not a sized internal format on desktop GL. So on desktop with | |
| 552 // 565 we always use an unsized internal format to let the system pick | |
| 553 // the best sized format to convert the 565 data to. Since TexStorage | |
| 554 // only allows sized internal formats we will instead use TexImage2D. | |
| 555 useTexStorage = desc.fConfig != kRGB_565_GrPixelConfig; | |
| 556 } | |
| 557 | |
| 558 GrGLenum internalFormat = 0x0; // suppress warning | |
| 559 GrGLenum externalFormat = 0x0; // suppress warning | |
| 560 GrGLenum externalType = 0x0; // suppress warning | |
| 561 | |
| 562 // glTexStorage requires sized internal formats on both desktop and ES. ES2
requires an unsized | |
| 563 // format for glTexImage, unlike ES3 and desktop. | |
| 564 bool useSizedFormat = useTexStorage; | |
| 565 if (kGL_GrGLStandard == this->glStandard() || | |
| 566 (this->glVersion() >= GR_GL_VER(3, 0) && | |
| 567 // ES3 only works with sized BGRA8 format if "GL_APPLE_texture_format_B
GRA8888" enabled | |
| 568 (kBGRA_8888_GrPixelConfig != dataConfig || !this->glCaps().bgraIsIntern
alFormat()))) { | |
| 569 useSizedFormat = true; | |
| 570 } | |
| 571 | |
| 572 if (!this->configToGLFormats(dataConfig, useSizedFormat, &internalFormat, | |
| 573 &externalFormat, &externalType)) { | |
| 574 return false; | |
| 575 } | |
| 576 | |
| 577 /* | |
| 578 * check whether to allocate a temporary buffer for flipping y or | |
| 579 * because our srcData has extra bytes past each row. If so, we need | |
| 580 * to trim those off here, since GL ES may not let us specify | |
| 581 * GL_UNPACK_ROW_LENGTH. | |
| 582 */ | |
| 583 bool restoreGLRowLength = false; | |
| 584 bool swFlipY = false; | |
| 585 bool glFlipY = false; | |
| 586 if (data) { | |
| 587 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { | |
| 588 if (this->glCaps().unpackFlipYSupport()) { | |
| 589 glFlipY = true; | |
| 590 } else { | |
| 591 swFlipY = true; | |
| 592 } | |
| 593 } | |
| 594 if (this->glCaps().unpackRowLengthSupport() && !swFlipY) { | |
| 595 // can't use this for flipping, only non-neg values allowed. :( | |
| 596 if (rowBytes != trimRowBytes) { | |
| 597 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp); | |
| 598 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength)); | |
| 599 restoreGLRowLength = true; | |
| 600 } | |
| 601 } else { | |
| 602 if (trimRowBytes != rowBytes || swFlipY) { | |
| 603 // copy data into our new storage, skipping the trailing bytes | |
| 604 size_t trimSize = height * trimRowBytes; | |
| 605 const char* src = (const char*)data; | |
| 606 if (swFlipY) { | |
| 607 src += (height - 1) * rowBytes; | |
| 608 } | |
| 609 char* dst = (char*)tempStorage.reset(trimSize); | |
| 610 for (int y = 0; y < height; y++) { | |
| 611 memcpy(dst, src, trimRowBytes); | |
| 612 if (swFlipY) { | |
| 613 src -= rowBytes; | |
| 614 } else { | |
| 615 src += rowBytes; | |
| 616 } | |
| 617 dst += trimRowBytes; | |
| 618 } | |
| 619 // now point data to our copied version | |
| 620 data = tempStorage.get(); | |
| 621 } | |
| 622 } | |
| 623 if (glFlipY) { | |
| 624 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE)); | |
| 625 } | |
| 626 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, | |
| 627 static_cast<GrGLint>(GrUnpackAlignment(dataConfig)))); | |
| 628 } | |
| 629 bool succeeded = true; | |
| 630 if (isNewTexture && | |
| 631 0 == left && 0 == top && | |
| 632 desc.fWidth == width && desc.fHeight == height) { | |
| 633 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); | |
| 634 if (useTexStorage) { | |
| 635 // We never resize or change formats of textures. | |
| 636 GL_ALLOC_CALL(this->glInterface(), | |
| 637 TexStorage2D(GR_GL_TEXTURE_2D, | |
| 638 1, // levels | |
| 639 internalFormat, | |
| 640 desc.fWidth, desc.fHeight)); | |
| 641 } else { | |
| 642 GL_ALLOC_CALL(this->glInterface(), | |
| 643 TexImage2D(GR_GL_TEXTURE_2D, | |
| 644 0, // level | |
| 645 internalFormat, | |
| 646 desc.fWidth, desc.fHeight, | |
| 647 0, // border | |
| 648 externalFormat, externalType, | |
| 649 data)); | |
| 650 } | |
| 651 GrGLenum error = check_alloc_error(desc, this->glInterface()); | |
| 652 if (error != GR_GL_NO_ERROR) { | |
| 653 succeeded = false; | |
| 654 } else { | |
| 655 // if we have data and we used TexStorage to create the texture, we | |
| 656 // now upload with TexSubImage. | |
| 657 if (data && useTexStorage) { | |
| 658 GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, | |
| 659 0, // level | |
| 660 left, top, | |
| 661 width, height, | |
| 662 externalFormat, externalType, | |
| 663 data)); | |
| 664 } | |
| 665 } | |
| 666 } else { | |
| 667 if (swFlipY || glFlipY) { | |
| 668 top = desc.fHeight - (top + height); | |
| 669 } | |
| 670 GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, | |
| 671 0, // level | |
| 672 left, top, | |
| 673 width, height, | |
| 674 externalFormat, externalType, data)); | |
| 675 } | |
| 676 | |
| 677 if (restoreGLRowLength) { | |
| 678 SkASSERT(this->glCaps().unpackRowLengthSupport()); | |
| 679 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); | |
| 680 } | |
| 681 if (glFlipY) { | |
| 682 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); | |
| 683 } | |
| 684 return succeeded; | |
| 685 } | |
| 686 | |
| 687 // TODO: This function is using a lot of wonky semantics like, if width == -1 | |
| 688 // then set width = desc.fWdith ... blah. A better way to do it might be to | |
| 689 // create a CompressedTexData struct that takes a desc/ptr and figures out | |
| 690 // the proper upload semantics. Then users can construct this function how they | |
| 691 // see fit if they want to go against the "standard" way to do it. | |
| 692 bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc, | |
| 693 const void* data, | |
| 694 bool isNewTexture, | |
| 695 int left, int top, int width, int height)
{ | |
| 696 SkASSERT(data || isNewTexture); | |
| 697 | |
| 698 // No support for software flip y, yet... | |
| 699 SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin); | |
| 700 | |
| 701 if (-1 == width) { | |
| 702 width = desc.fWidth; | |
| 703 } | |
| 704 #ifdef SK_DEBUG | |
| 705 else { | |
| 706 SkASSERT(width <= desc.fWidth); | |
| 707 } | |
| 708 #endif | |
| 709 | |
| 710 if (-1 == height) { | |
| 711 height = desc.fHeight; | |
| 712 } | |
| 713 #ifdef SK_DEBUG | |
| 714 else { | |
| 715 SkASSERT(height <= desc.fHeight); | |
| 716 } | |
| 717 #endif | |
| 718 | |
| 719 // Make sure that the width and height that we pass to OpenGL | |
| 720 // is a multiple of the block size. | |
| 721 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height); | |
| 722 | |
| 723 // We only need the internal format for compressed 2D textures. | |
| 724 GrGLenum internalFormat = 0; | |
| 725 if (!this->configToGLFormats(desc.fConfig, false, &internalFormat, NULL, NUL
L)) { | |
| 726 return false; | |
| 727 } | |
| 728 | |
| 729 if (isNewTexture) { | |
| 730 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); | |
| 731 GL_ALLOC_CALL(this->glInterface(), | |
| 732 CompressedTexImage2D(GR_GL_TEXTURE_2D, | |
| 733 0, // level | |
| 734 internalFormat, | |
| 735 width, height, | |
| 736 0, // border | |
| 737 SkToInt(dataSize), | |
| 738 data)); | |
| 739 GrGLenum error = check_alloc_error(desc, this->glInterface()); | |
| 740 if (error != GR_GL_NO_ERROR) { | |
| 741 return false; | |
| 742 } | |
| 743 } else { | |
| 744 // Paletted textures can't be updated. | |
| 745 if (GR_GL_PALETTE8_RGBA8 == internalFormat) { | |
| 746 return false; | |
| 747 } | |
| 748 GL_CALL(CompressedTexSubImage2D(GR_GL_TEXTURE_2D, | |
| 749 0, // level | |
| 750 left, top, | |
| 751 width, height, | |
| 752 internalFormat, | |
| 753 SkToInt(dataSize), | |
| 754 data)); | |
| 755 } | |
| 756 | |
| 757 return true; | |
| 758 } | |
| 759 | |
| 760 static bool renderbuffer_storage_msaa(GrGLContext& ctx, | |
| 761 int sampleCount, | |
| 762 GrGLenum format, | |
| 763 int width, int height) { | |
| 764 CLEAR_ERROR_BEFORE_ALLOC(ctx.interface()); | |
| 765 SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType()); | |
| 766 switch (ctx.caps()->msFBOType()) { | |
| 767 case GrGLCaps::kDesktop_ARB_MSFBOType: | |
| 768 case GrGLCaps::kDesktop_EXT_MSFBOType: | |
| 769 case GrGLCaps::kES_3_0_MSFBOType: | |
| 770 GL_ALLOC_CALL(ctx.interface(), | |
| 771 RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, | |
| 772 sampleCount, | |
| 773 format, | |
| 774 width, height)); | |
| 775 break; | |
| 776 case GrGLCaps::kES_Apple_MSFBOType: | |
| 777 GL_ALLOC_CALL(ctx.interface(), | |
| 778 RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERB
UFFER, | |
| 779 sampleCount, | |
| 780 format, | |
| 781 width, heigh
t)); | |
| 782 break; | |
| 783 case GrGLCaps::kES_EXT_MsToTexture_MSFBOType: | |
| 784 case GrGLCaps::kES_IMG_MsToTexture_MSFBOType: | |
| 785 GL_ALLOC_CALL(ctx.interface(), | |
| 786 RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUF
FER, | |
| 787 sampleCount, | |
| 788 format, | |
| 789 width, height)); | |
| 790 break; | |
| 791 case GrGLCaps::kNone_MSFBOType: | |
| 792 SkFAIL("Shouldn't be here if we don't support multisampled renderbuf
fers."); | |
| 793 break; | |
| 794 } | |
| 795 return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));; | |
| 796 } | |
| 797 | |
| 798 bool GrGLGpu::createRenderTargetObjects(const GrSurfaceDesc& desc, GrGLuint texI
D, | |
| 799 GrGLRenderTarget::IDDesc* idDesc) { | |
| 800 idDesc->fMSColorRenderbufferID = 0; | |
| 801 idDesc->fRTFBOID = 0; | |
| 802 idDesc->fTexFBOID = 0; | |
| 803 idDesc->fIsWrapped = false; | |
| 804 | |
| 805 GrGLenum status; | |
| 806 | |
| 807 GrGLenum msColorFormat = 0; // suppress warning | |
| 808 | |
| 809 if (desc.fSampleCnt > 0 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBO
Type()) { | |
| 810 goto FAILED; | |
| 811 } | |
| 812 | |
| 813 GL_CALL(GenFramebuffers(1, &idDesc->fTexFBOID)); | |
| 814 if (!idDesc->fTexFBOID) { | |
| 815 goto FAILED; | |
| 816 } | |
| 817 | |
| 818 | |
| 819 // If we are using multisampling we will create two FBOS. We render to one a
nd then resolve to | |
| 820 // the texture bound to the other. The exception is the IMG multisample exte
nsion. With this | |
| 821 // extension the texture is multisampled when rendered to and then auto-reso
lves it when it is | |
| 822 // rendered from. | |
| 823 if (desc.fSampleCnt > 0 && this->glCaps().usesMSAARenderBuffers()) { | |
| 824 GL_CALL(GenFramebuffers(1, &idDesc->fRTFBOID)); | |
| 825 GL_CALL(GenRenderbuffers(1, &idDesc->fMSColorRenderbufferID)); | |
| 826 if (!idDesc->fRTFBOID || | |
| 827 !idDesc->fMSColorRenderbufferID || | |
| 828 !this->configToGLFormats(desc.fConfig, | |
| 829 // ES2 and ES3 require sized internal forma
ts for rb storage. | |
| 830 kGLES_GrGLStandard == this->glStandard(), | |
| 831 &msColorFormat, | |
| 832 NULL, | |
| 833 NULL)) { | |
| 834 goto FAILED; | |
| 835 } | |
| 836 } else { | |
| 837 idDesc->fRTFBOID = idDesc->fTexFBOID; | |
| 838 } | |
| 839 | |
| 840 // below here we may bind the FBO | |
| 841 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; | |
| 842 if (idDesc->fRTFBOID != idDesc->fTexFBOID) { | |
| 843 SkASSERT(desc.fSampleCnt > 0); | |
| 844 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, idDesc->fMSColorRenderbuffe
rID)); | |
| 845 if (!renderbuffer_storage_msaa(fGLContext, | |
| 846 desc.fSampleCnt, | |
| 847 msColorFormat, | |
| 848 desc.fWidth, desc.fHeight)) { | |
| 849 goto FAILED; | |
| 850 } | |
| 851 fGPUStats.incRenderTargetBinds(); | |
| 852 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fRTFBOID)); | |
| 853 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, | |
| 854 GR_GL_COLOR_ATTACHMENT0, | |
| 855 GR_GL_RENDERBUFFER, | |
| 856 idDesc->fMSColorRenderbufferID)); | |
| 857 if ((desc.fFlags & kCheckAllocation_GrSurfaceFlag) || | |
| 858 !this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) { | |
| 859 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); | |
| 860 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { | |
| 861 goto FAILED; | |
| 862 } | |
| 863 fGLContext.caps()->markConfigAsValidColorAttachment(desc.fConfig); | |
| 864 } | |
| 865 } | |
| 866 fGPUStats.incRenderTargetBinds(); | |
| 867 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fTexFBOID)); | |
| 868 | |
| 869 if (this->glCaps().usesImplicitMSAAResolve() && desc.fSampleCnt > 0) { | |
| 870 GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, | |
| 871 GR_GL_COLOR_ATTACHMENT0, | |
| 872 GR_GL_TEXTURE_2D, | |
| 873 texID, 0, desc.fSampleCnt)); | |
| 874 } else { | |
| 875 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, | |
| 876 GR_GL_COLOR_ATTACHMENT0, | |
| 877 GR_GL_TEXTURE_2D, | |
| 878 texID, 0)); | |
| 879 } | |
| 880 if ((desc.fFlags & kCheckAllocation_GrSurfaceFlag) || | |
| 881 !this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) { | |
| 882 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); | |
| 883 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { | |
| 884 goto FAILED; | |
| 885 } | |
| 886 fGLContext.caps()->markConfigAsValidColorAttachment(desc.fConfig); | |
| 887 } | |
| 888 | |
| 889 return true; | |
| 890 | |
| 891 FAILED: | |
| 892 if (idDesc->fMSColorRenderbufferID) { | |
| 893 GL_CALL(DeleteRenderbuffers(1, &idDesc->fMSColorRenderbufferID)); | |
| 894 } | |
| 895 if (idDesc->fRTFBOID != idDesc->fTexFBOID) { | |
| 896 GL_CALL(DeleteFramebuffers(1, &idDesc->fRTFBOID)); | |
| 897 } | |
| 898 if (idDesc->fTexFBOID) { | |
| 899 GL_CALL(DeleteFramebuffers(1, &idDesc->fTexFBOID)); | |
| 900 } | |
| 901 return false; | |
| 902 } | |
| 903 | |
| 904 // good to set a break-point here to know when createTexture fails | |
| 905 static GrTexture* return_null_texture() { | |
| 906 // SkDEBUGFAIL("null texture"); | |
| 907 return NULL; | |
| 908 } | |
| 909 | |
| 910 #if 0 && defined(SK_DEBUG) | |
| 911 static size_t as_size_t(int x) { | |
| 912 return x; | |
| 913 } | |
| 914 #endif | |
| 915 | |
| 916 GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& origDesc, | |
| 917 const void* srcData, | |
| 918 size_t rowBytes) { | |
| 919 | |
| 920 GrSurfaceDesc desc = origDesc; | |
| 921 GrGLRenderTarget::IDDesc rtIDDesc; | |
| 922 | |
| 923 // Attempt to catch un- or wrongly initialized sample counts; | |
| 924 SkASSERT(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64); | |
| 925 // We fail if the MSAA was requested and is not available. | |
| 926 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleC
nt) { | |
| 927 //SkDebugf("MSAA RT requested but not supported on this platform."); | |
| 928 return return_null_texture(); | |
| 929 } | |
| 930 | |
| 931 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); | |
| 932 | |
| 933 // If the sample count exceeds the max then we clamp it. | |
| 934 desc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount()); | |
| 935 desc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); | |
| 936 | |
| 937 rtIDDesc.fMSColorRenderbufferID = 0; | |
| 938 rtIDDesc.fRTFBOID = 0; | |
| 939 rtIDDesc.fTexFBOID = 0; | |
| 940 rtIDDesc.fIsWrapped = false; | |
| 941 | |
| 942 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleC
nt) { | |
| 943 //SkDebugf("MSAA RT requested but not supported on this platform."); | |
| 944 return return_null_texture(); | |
| 945 } | |
| 946 | |
| 947 if (renderTarget) { | |
| 948 int maxRTSize = this->caps()->maxRenderTargetSize(); | |
| 949 if (desc.fWidth > maxRTSize || desc.fHeight > maxRTSize) { | |
| 950 return return_null_texture(); | |
| 951 } | |
| 952 } else { | |
| 953 int maxSize = this->caps()->maxTextureSize(); | |
| 954 if (desc.fWidth > maxSize || desc.fHeight > maxSize) { | |
| 955 return return_null_texture(); | |
| 956 } | |
| 957 } | |
| 958 | |
| 959 GrGLTexture::IDDesc idDesc; | |
| 960 GL_CALL(GenTextures(1, &idDesc.fTextureID)); | |
| 961 idDesc.fIsWrapped = false; | |
| 962 | |
| 963 if (!idDesc.fTextureID) { | |
| 964 return return_null_texture(); | |
| 965 } | |
| 966 | |
| 967 this->setScratchTextureUnit(); | |
| 968 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, idDesc.fTextureID)); | |
| 969 | |
| 970 if (renderTarget && this->glCaps().textureUsageSupport()) { | |
| 971 // provides a hint about how this texture will be used | |
| 972 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, | |
| 973 GR_GL_TEXTURE_USAGE, | |
| 974 GR_GL_FRAMEBUFFER_ATTACHMENT)); | |
| 975 } | |
| 976 | |
| 977 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some | |
| 978 // drivers have a bug where an FBO won't be complete if it includes a | |
| 979 // texture that is not mipmap complete (considering the filter in use). | |
| 980 GrGLTexture::TexParams initialTexParams; | |
| 981 // we only set a subset here so invalidate first | |
| 982 initialTexParams.invalidate(); | |
| 983 initialTexParams.fMinFilter = GR_GL_NEAREST; | |
| 984 initialTexParams.fMagFilter = GR_GL_NEAREST; | |
| 985 initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE; | |
| 986 initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE; | |
| 987 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, | |
| 988 GR_GL_TEXTURE_MAG_FILTER, | |
| 989 initialTexParams.fMagFilter)); | |
| 990 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, | |
| 991 GR_GL_TEXTURE_MIN_FILTER, | |
| 992 initialTexParams.fMinFilter)); | |
| 993 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, | |
| 994 GR_GL_TEXTURE_WRAP_S, | |
| 995 initialTexParams.fWrapS)); | |
| 996 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, | |
| 997 GR_GL_TEXTURE_WRAP_T, | |
| 998 initialTexParams.fWrapT)); | |
| 999 if (!this->uploadTexData(desc, true, 0, 0, | |
| 1000 desc.fWidth, desc.fHeight, | |
| 1001 desc.fConfig, srcData, rowBytes)) { | |
| 1002 GL_CALL(DeleteTextures(1, &idDesc.fTextureID)); | |
| 1003 return return_null_texture(); | |
| 1004 } | |
| 1005 | |
| 1006 GrGLTexture* tex; | |
| 1007 if (renderTarget) { | |
| 1008 // unbind the texture from the texture unit before binding it to the fra
me buffer | |
| 1009 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0)); | |
| 1010 | |
| 1011 if (!this->createRenderTargetObjects(desc, idDesc.fTextureID, &rtIDDesc
)) { | |
| 1012 GL_CALL(DeleteTextures(1, &idDesc.fTextureID)); | |
| 1013 return return_null_texture(); | |
| 1014 } | |
| 1015 tex = SkNEW_ARGS(GrGLTextureRenderTarget, (this, desc, idDesc, rtIDDesc)
); | |
| 1016 } else { | |
| 1017 tex = SkNEW_ARGS(GrGLTexture, (this, desc, idDesc)); | |
| 1018 } | |
| 1019 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); | |
| 1020 #ifdef TRACE_TEXTURE_CREATION | |
| 1021 SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n", | |
| 1022 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig); | |
| 1023 #endif | |
| 1024 return tex; | |
| 1025 } | |
| 1026 | |
| 1027 GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& origDesc, con
st void* srcData) { | |
| 1028 | |
| 1029 if(SkToBool(origDesc.fFlags & kRenderTarget_GrSurfaceFlag) || origDesc.fSamp
leCnt > 0) { | |
| 1030 return return_null_texture(); | |
| 1031 } | |
| 1032 | |
| 1033 // Make sure that we're not flipping Y. | |
| 1034 GrSurfaceOrigin texOrigin = resolve_origin(origDesc.fOrigin, false); | |
| 1035 if (kBottomLeft_GrSurfaceOrigin == texOrigin) { | |
| 1036 return return_null_texture(); | |
| 1037 } | |
| 1038 GrSurfaceDesc desc = origDesc; | |
| 1039 desc.fOrigin = texOrigin; | |
| 1040 | |
| 1041 int maxSize = this->caps()->maxTextureSize(); | |
| 1042 if (desc.fWidth > maxSize || desc.fHeight > maxSize) { | |
| 1043 return return_null_texture(); | |
| 1044 } | |
| 1045 | |
| 1046 GrGLTexture::IDDesc idDesc; | |
| 1047 GL_CALL(GenTextures(1, &idDesc.fTextureID)); | |
| 1048 idDesc.fIsWrapped = false; | |
| 1049 | |
| 1050 if (!idDesc.fTextureID) { | |
| 1051 return return_null_texture(); | |
| 1052 } | |
| 1053 | |
| 1054 this->setScratchTextureUnit(); | |
| 1055 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, idDesc.fTextureID)); | |
| 1056 | |
| 1057 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some | |
| 1058 // drivers have a bug where an FBO won't be complete if it includes a | |
| 1059 // texture that is not mipmap complete (considering the filter in use). | |
| 1060 GrGLTexture::TexParams initialTexParams; | |
| 1061 // we only set a subset here so invalidate first | |
| 1062 initialTexParams.invalidate(); | |
| 1063 initialTexParams.fMinFilter = GR_GL_NEAREST; | |
| 1064 initialTexParams.fMagFilter = GR_GL_NEAREST; | |
| 1065 initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE; | |
| 1066 initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE; | |
| 1067 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, | |
| 1068 GR_GL_TEXTURE_MAG_FILTER, | |
| 1069 initialTexParams.fMagFilter)); | |
| 1070 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, | |
| 1071 GR_GL_TEXTURE_MIN_FILTER, | |
| 1072 initialTexParams.fMinFilter)); | |
| 1073 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, | |
| 1074 GR_GL_TEXTURE_WRAP_S, | |
| 1075 initialTexParams.fWrapS)); | |
| 1076 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, | |
| 1077 GR_GL_TEXTURE_WRAP_T, | |
| 1078 initialTexParams.fWrapT)); | |
| 1079 | |
| 1080 if (!this->uploadCompressedTexData(desc, srcData)) { | |
| 1081 GL_CALL(DeleteTextures(1, &idDesc.fTextureID)); | |
| 1082 return return_null_texture(); | |
| 1083 } | |
| 1084 | |
| 1085 GrGLTexture* tex; | |
| 1086 tex = SkNEW_ARGS(GrGLTexture, (this, desc, idDesc)); | |
| 1087 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); | |
| 1088 #ifdef TRACE_TEXTURE_CREATION | |
| 1089 SkDebugf("--- new compressed texture [%d] size=(%d %d) config=%d\n", | |
| 1090 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig); | |
| 1091 #endif | |
| 1092 return tex; | |
| 1093 } | |
| 1094 | |
| 1095 namespace { | |
| 1096 | |
| 1097 const GrGLuint kUnknownBitCount = GrGLStencilBuffer::kUnknownBitCount; | |
| 1098 | |
| 1099 void inline get_stencil_rb_sizes(const GrGLInterface* gl, | |
| 1100 GrGLStencilBuffer::Format* format) { | |
| 1101 | |
| 1102 // we shouldn't ever know one size and not the other | |
| 1103 SkASSERT((kUnknownBitCount == format->fStencilBits) == | |
| 1104 (kUnknownBitCount == format->fTotalBits)); | |
| 1105 if (kUnknownBitCount == format->fStencilBits) { | |
| 1106 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, | |
| 1107 GR_GL_RENDERBUFFER_STENCIL_SIZE, | |
| 1108 (GrGLint*)&format->fStencilBits); | |
| 1109 if (format->fPacked) { | |
| 1110 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, | |
| 1111 GR_GL_RENDERBUFFER_DEPTH_SIZE, | |
| 1112 (GrGLint*)&format->fTotalBits); | |
| 1113 format->fTotalBits += format->fStencilBits; | |
| 1114 } else { | |
| 1115 format->fTotalBits = format->fStencilBits; | |
| 1116 } | |
| 1117 } | |
| 1118 } | |
| 1119 } | |
| 1120 | |
| 1121 bool GrGLGpu::createStencilBufferForRenderTarget(GrRenderTarget* rt, int width,
int height) { | |
| 1122 | |
| 1123 // All internally created RTs are also textures. We don't create | |
| 1124 // SBs for a client's standalone RT (that is a RT that isn't also a texture)
. | |
| 1125 SkASSERT(rt->asTexture()); | |
| 1126 SkASSERT(width >= rt->width()); | |
| 1127 SkASSERT(height >= rt->height()); | |
| 1128 | |
| 1129 int samples = rt->numSamples(); | |
| 1130 GrGLuint sbID = 0; | |
| 1131 | |
| 1132 int stencilFmtCnt = this->glCaps().stencilFormats().count(); | |
| 1133 for (int i = 0; i < stencilFmtCnt; ++i) { | |
| 1134 if (!sbID) { | |
| 1135 GL_CALL(GenRenderbuffers(1, &sbID)); | |
| 1136 } | |
| 1137 if (!sbID) { | |
| 1138 return false; | |
| 1139 } | |
| 1140 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbID)); | |
| 1141 // we start with the last stencil format that succeeded in hopes | |
| 1142 // that we won't go through this loop more than once after the | |
| 1143 // first (painful) stencil creation. | |
| 1144 int sIdx = (i + fLastSuccessfulStencilFmtIdx) % stencilFmtCnt; | |
| 1145 const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[sI
dx]; | |
| 1146 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); | |
| 1147 // we do this "if" so that we don't call the multisample | |
| 1148 // version on a GL that doesn't have an MSAA extension. | |
| 1149 bool created; | |
| 1150 if (samples > 0) { | |
| 1151 created = renderbuffer_storage_msaa(fGLContext, | |
| 1152 samples, | |
| 1153 sFmt.fInternalFormat, | |
| 1154 width, height); | |
| 1155 } else { | |
| 1156 GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERB
UFFER, | |
| 1157 sFmt.fInterna
lFormat, | |
| 1158 width, height
)); | |
| 1159 created = (GR_GL_NO_ERROR == check_alloc_error(rt->desc(), this->glI
nterface())); | |
| 1160 } | |
| 1161 if (created) { | |
| 1162 // After sized formats we attempt an unsized format and take | |
| 1163 // whatever sizes GL gives us. In that case we query for the size. | |
| 1164 GrGLStencilBuffer::Format format = sFmt; | |
| 1165 get_stencil_rb_sizes(this->glInterface(), &format); | |
| 1166 static const bool kIsWrapped = false; | |
| 1167 SkAutoTUnref<GrStencilBuffer> sb(SkNEW_ARGS(GrGLStencilBuffer, | |
| 1168 (this, kIsWrapped, sbID, width
, height, | |
| 1169 samples, format))); | |
| 1170 if (this->attachStencilBufferToRenderTarget(sb, rt)) { | |
| 1171 fLastSuccessfulStencilFmtIdx = sIdx; | |
| 1172 rt->setStencilBuffer(sb); | |
| 1173 return true; | |
| 1174 } | |
| 1175 // Remove the scratch key from this resource so we don't grab it fro
m the cache ever | |
| 1176 // again. | |
| 1177 sb->cacheAccess().removeScratchKey(); | |
| 1178 // Set this to 0 since we handed the valid ID off to the failed sten
cil buffer resource. | |
| 1179 sbID = 0; | |
| 1180 } | |
| 1181 } | |
| 1182 GL_CALL(DeleteRenderbuffers(1, &sbID)); | |
| 1183 return false; | |
| 1184 } | |
| 1185 | |
| 1186 bool GrGLGpu::attachStencilBufferToRenderTarget(GrStencilBuffer* sb, GrRenderTar
get* rt) { | |
| 1187 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt); | |
| 1188 | |
| 1189 GrGLuint fbo = glrt->renderFBOID(); | |
| 1190 | |
| 1191 if (NULL == sb) { | |
| 1192 if (rt->getStencilBuffer()) { | |
| 1193 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, | |
| 1194 GR_GL_STENCIL_ATTACHMENT, | |
| 1195 GR_GL_RENDERBUFFER, 0)); | |
| 1196 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, | |
| 1197 GR_GL_DEPTH_ATTACHMENT, | |
| 1198 GR_GL_RENDERBUFFER, 0)); | |
| 1199 #ifdef SK_DEBUG | |
| 1200 GrGLenum status; | |
| 1201 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); | |
| 1202 SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status); | |
| 1203 #endif | |
| 1204 } | |
| 1205 return true; | |
| 1206 } else { | |
| 1207 GrGLStencilBuffer* glsb = static_cast<GrGLStencilBuffer*>(sb); | |
| 1208 GrGLuint rb = glsb->renderbufferID(); | |
| 1209 | |
| 1210 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; | |
| 1211 fGPUStats.incRenderTargetBinds(); | |
| 1212 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fbo)); | |
| 1213 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, | |
| 1214 GR_GL_STENCIL_ATTACHMENT, | |
| 1215 GR_GL_RENDERBUFFER, rb)); | |
| 1216 if (glsb->format().fPacked) { | |
| 1217 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, | |
| 1218 GR_GL_DEPTH_ATTACHMENT, | |
| 1219 GR_GL_RENDERBUFFER, rb)); | |
| 1220 } else { | |
| 1221 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, | |
| 1222 GR_GL_DEPTH_ATTACHMENT, | |
| 1223 GR_GL_RENDERBUFFER, 0)); | |
| 1224 } | |
| 1225 | |
| 1226 GrGLenum status; | |
| 1227 if (!this->glCaps().isColorConfigAndStencilFormatVerified(rt->config(),
glsb->format())) { | |
| 1228 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); | |
| 1229 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { | |
| 1230 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, | |
| 1231 GR_GL_STENCIL_ATTACHMENT, | |
| 1232 GR_GL_RENDERBUFFER, 0)); | |
| 1233 if (glsb->format().fPacked) { | |
| 1234 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, | |
| 1235 GR_GL_DEPTH_ATTACHMENT, | |
| 1236 GR_GL_RENDERBUFFER, 0)); | |
| 1237 } | |
| 1238 return false; | |
| 1239 } else { | |
| 1240 fGLContext.caps()->markColorConfigAndStencilFormatAsVerified( | |
| 1241 rt->config(), | |
| 1242 glsb->format()); | |
| 1243 } | |
| 1244 } | |
| 1245 return true; | |
| 1246 } | |
| 1247 } | |
| 1248 | |
| 1249 //////////////////////////////////////////////////////////////////////////////// | |
| 1250 | |
| 1251 GrVertexBuffer* GrGLGpu::onCreateVertexBuffer(size_t size, bool dynamic) { | |
| 1252 GrGLVertexBuffer::Desc desc; | |
| 1253 desc.fDynamic = dynamic; | |
| 1254 desc.fSizeInBytes = size; | |
| 1255 desc.fIsWrapped = false; | |
| 1256 | |
| 1257 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) { | |
| 1258 desc.fID = 0; | |
| 1259 GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, des
c)); | |
| 1260 return vertexBuffer; | |
| 1261 } else { | |
| 1262 GL_CALL(GenBuffers(1, &desc.fID)); | |
| 1263 if (desc.fID) { | |
| 1264 fHWGeometryState.setVertexBufferID(this, desc.fID); | |
| 1265 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); | |
| 1266 // make sure driver can allocate memory for this buffer | |
| 1267 GL_ALLOC_CALL(this->glInterface(), | |
| 1268 BufferData(GR_GL_ARRAY_BUFFER, | |
| 1269 (GrGLsizeiptr) desc.fSizeInBytes, | |
| 1270 NULL, // data ptr | |
| 1271 desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_
STATIC_DRAW)); | |
| 1272 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { | |
| 1273 GL_CALL(DeleteBuffers(1, &desc.fID)); | |
| 1274 this->notifyVertexBufferDelete(desc.fID); | |
| 1275 return NULL; | |
| 1276 } | |
| 1277 GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this,
desc)); | |
| 1278 return vertexBuffer; | |
| 1279 } | |
| 1280 return NULL; | |
| 1281 } | |
| 1282 } | |
| 1283 | |
| 1284 GrIndexBuffer* GrGLGpu::onCreateIndexBuffer(size_t size, bool dynamic) { | |
| 1285 GrGLIndexBuffer::Desc desc; | |
| 1286 desc.fDynamic = dynamic; | |
| 1287 desc.fSizeInBytes = size; | |
| 1288 desc.fIsWrapped = false; | |
| 1289 | |
| 1290 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) { | |
| 1291 desc.fID = 0; | |
| 1292 GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc)); | |
| 1293 return indexBuffer; | |
| 1294 } else { | |
| 1295 GL_CALL(GenBuffers(1, &desc.fID)); | |
| 1296 if (desc.fID) { | |
| 1297 fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID
); | |
| 1298 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); | |
| 1299 // make sure driver can allocate memory for this buffer | |
| 1300 GL_ALLOC_CALL(this->glInterface(), | |
| 1301 BufferData(GR_GL_ELEMENT_ARRAY_BUFFER, | |
| 1302 (GrGLsizeiptr) desc.fSizeInBytes, | |
| 1303 NULL, // data ptr | |
| 1304 desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_
STATIC_DRAW)); | |
| 1305 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { | |
| 1306 GL_CALL(DeleteBuffers(1, &desc.fID)); | |
| 1307 this->notifyIndexBufferDelete(desc.fID); | |
| 1308 return NULL; | |
| 1309 } | |
| 1310 GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc
)); | |
| 1311 return indexBuffer; | |
| 1312 } | |
| 1313 return NULL; | |
| 1314 } | |
| 1315 } | |
| 1316 | |
| 1317 void GrGLGpu::flushScissor(const GrScissorState& scissorState, | |
| 1318 const GrGLIRect& rtViewport, | |
| 1319 GrSurfaceOrigin rtOrigin) { | |
| 1320 if (scissorState.fEnabled) { | |
| 1321 GrGLIRect scissor; | |
| 1322 scissor.setRelativeTo(rtViewport, | |
| 1323 scissorState.fRect.fLeft, | |
| 1324 scissorState.fRect.fTop, | |
| 1325 scissorState.fRect.width(), | |
| 1326 scissorState.fRect.height(), | |
| 1327 rtOrigin); | |
| 1328 // if the scissor fully contains the viewport then we fall through and | |
| 1329 // disable the scissor test. | |
| 1330 if (!scissor.contains(rtViewport)) { | |
| 1331 if (fHWScissorSettings.fRect != scissor) { | |
| 1332 scissor.pushToGLScissor(this->glInterface()); | |
| 1333 fHWScissorSettings.fRect = scissor; | |
| 1334 } | |
| 1335 if (kYes_TriState != fHWScissorSettings.fEnabled) { | |
| 1336 GL_CALL(Enable(GR_GL_SCISSOR_TEST)); | |
| 1337 fHWScissorSettings.fEnabled = kYes_TriState; | |
| 1338 } | |
| 1339 return; | |
| 1340 } | |
| 1341 } | |
| 1342 | |
| 1343 // See fall through note above | |
| 1344 this->disableScissor(); | |
| 1345 } | |
| 1346 | |
| 1347 bool GrGLGpu::flushGLState(const GrOptDrawState& optState) { | |
| 1348 GrXferProcessor::BlendInfo blendInfo; | |
| 1349 optState.getXferProcessor()->getBlendInfo(&blendInfo); | |
| 1350 | |
| 1351 this->flushDither(optState.isDitherState()); | |
| 1352 this->flushColorWrite(blendInfo.fWriteColor); | |
| 1353 this->flushDrawFace(optState.getDrawFace()); | |
| 1354 | |
| 1355 fCurrentProgram.reset(fProgramCache->getProgram(optState)); | |
| 1356 if (NULL == fCurrentProgram.get()) { | |
| 1357 SkDEBUGFAIL("Failed to create program!"); | |
| 1358 return false; | |
| 1359 } | |
| 1360 | |
| 1361 fCurrentProgram.get()->ref(); | |
| 1362 | |
| 1363 GrGLuint programID = fCurrentProgram->programID(); | |
| 1364 if (fHWProgramID != programID) { | |
| 1365 GL_CALL(UseProgram(programID)); | |
| 1366 fHWProgramID = programID; | |
| 1367 } | |
| 1368 | |
| 1369 if (blendInfo.fWriteColor) { | |
| 1370 this->flushBlend(blendInfo); | |
| 1371 } | |
| 1372 | |
| 1373 fCurrentProgram->setData(optState); | |
| 1374 | |
| 1375 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(optState.getRenderTa
rget()); | |
| 1376 this->flushStencil(optState.getStencil()); | |
| 1377 this->flushScissor(optState.getScissorState(), glRT->getViewport(), glRT->or
igin()); | |
| 1378 this->flushHWAAState(glRT, optState.isHWAntialiasState(), | |
| 1379 kDrawLines_DrawType == optState.drawType()); | |
| 1380 | |
| 1381 // This must come after textures are flushed because a texture may need | |
| 1382 // to be msaa-resolved (which will modify bound FBO state). | |
| 1383 this->flushRenderTarget(glRT, NULL); | |
| 1384 | |
| 1385 return true; | |
| 1386 } | |
| 1387 | |
| 1388 void GrGLGpu::setupGeometry(const GrOptDrawState& optState, | |
| 1389 const GrDrawTarget::DrawInfo& info, | |
| 1390 size_t* indexOffsetInBytes) { | |
| 1391 GrGLVertexBuffer* vbuf; | |
| 1392 vbuf = (GrGLVertexBuffer*) info.vertexBuffer(); | |
| 1393 | |
| 1394 SkASSERT(vbuf); | |
| 1395 SkASSERT(!vbuf->isMapped()); | |
| 1396 | |
| 1397 GrGLIndexBuffer* ibuf = NULL; | |
| 1398 if (info.isIndexed()) { | |
| 1399 SkASSERT(indexOffsetInBytes); | |
| 1400 | |
| 1401 *indexOffsetInBytes = 0; | |
| 1402 ibuf = (GrGLIndexBuffer*)info.indexBuffer(); | |
| 1403 | |
| 1404 SkASSERT(ibuf); | |
| 1405 SkASSERT(!ibuf->isMapped()); | |
| 1406 *indexOffsetInBytes += ibuf->baseOffset(); | |
| 1407 } | |
| 1408 GrGLAttribArrayState* attribState = | |
| 1409 fHWGeometryState.bindArrayAndBuffersToDraw(this, vbuf, ibuf); | |
| 1410 | |
| 1411 if (fCurrentProgram->hasVertexShader()) { | |
| 1412 const GrGeometryProcessor* gp = optState.getGeometryProcessor(); | |
| 1413 | |
| 1414 GrGLsizei stride = static_cast<GrGLsizei>(gp->getVertexStride()); | |
| 1415 | |
| 1416 size_t vertexOffsetInBytes = stride * info.startVertex(); | |
| 1417 | |
| 1418 vertexOffsetInBytes += vbuf->baseOffset(); | |
| 1419 | |
| 1420 const SkTArray<GrGeometryProcessor::GrAttribute, true>& attribs = gp->ge
tAttribs(); | |
| 1421 int vaCount = attribs.count(); | |
| 1422 uint32_t usedAttribArraysMask = 0; | |
| 1423 size_t offset = 0; | |
| 1424 | |
| 1425 for (int attribIndex = 0; attribIndex < vaCount; attribIndex++) { | |
| 1426 usedAttribArraysMask |= (1 << attribIndex); | |
| 1427 GrVertexAttribType attribType = attribs[attribIndex].fType; | |
| 1428 attribState->set(this, | |
| 1429 attribIndex, | |
| 1430 vbuf, | |
| 1431 GrGLAttribTypeToLayout(attribType).fCount, | |
| 1432 GrGLAttribTypeToLayout(attribType).fType, | |
| 1433 GrGLAttribTypeToLayout(attribType).fNormalized, | |
| 1434 stride, | |
| 1435 reinterpret_cast<GrGLvoid*>(vertexOffsetInBytes + o
ffset)); | |
| 1436 offset += attribs[attribIndex].fOffset; | |
| 1437 } | |
| 1438 attribState->disableUnusedArrays(this, usedAttribArraysMask); | |
| 1439 } | |
| 1440 } | |
| 1441 | |
| 1442 void GrGLGpu::buildProgramDesc(const GrOptDrawState& optState, | |
| 1443 const GrProgramDesc::DescInfo& descInfo, | |
| 1444 GrGpu::DrawType drawType, | |
| 1445 GrProgramDesc* desc) { | |
| 1446 if (!GrGLProgramDescBuilder::Build(optState, descInfo, drawType, this, desc)
) { | |
| 1447 SkDEBUGFAIL("Failed to generate GL program descriptor"); | |
| 1448 } | |
| 1449 } | |
| 1450 | |
| 1451 void GrGLGpu::disableScissor() { | |
| 1452 if (kNo_TriState != fHWScissorSettings.fEnabled) { | |
| 1453 GL_CALL(Disable(GR_GL_SCISSOR_TEST)); | |
| 1454 fHWScissorSettings.fEnabled = kNo_TriState; | |
| 1455 return; | |
| 1456 } | |
| 1457 } | |
| 1458 | |
| 1459 void GrGLGpu::onClear(GrRenderTarget* target, const SkIRect* rect, GrColor color
, | |
| 1460 bool canIgnoreRect) { | |
| 1461 // parent class should never let us get here with no RT | |
| 1462 SkASSERT(target); | |
| 1463 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); | |
| 1464 | |
| 1465 if (canIgnoreRect && this->glCaps().fullClearIsFree()) { | |
| 1466 rect = NULL; | |
| 1467 } | |
| 1468 | |
| 1469 SkIRect clippedRect; | |
| 1470 if (rect) { | |
| 1471 // flushScissor expects rect to be clipped to the target. | |
| 1472 clippedRect = *rect; | |
| 1473 SkIRect rtRect = SkIRect::MakeWH(target->width(), target->height()); | |
| 1474 if (clippedRect.intersect(rtRect)) { | |
| 1475 rect = &clippedRect; | |
| 1476 } else { | |
| 1477 return; | |
| 1478 } | |
| 1479 } | |
| 1480 | |
| 1481 this->flushRenderTarget(glRT, rect); | |
| 1482 GrScissorState scissorState; | |
| 1483 scissorState.fEnabled = SkToBool(rect); | |
| 1484 if (scissorState.fEnabled) { | |
| 1485 scissorState.fRect = *rect; | |
| 1486 } | |
| 1487 this->flushScissor(scissorState, glRT->getViewport(), glRT->origin()); | |
| 1488 | |
| 1489 GrGLfloat r, g, b, a; | |
| 1490 static const GrGLfloat scale255 = 1.f / 255.f; | |
| 1491 a = GrColorUnpackA(color) * scale255; | |
| 1492 GrGLfloat scaleRGB = scale255; | |
| 1493 r = GrColorUnpackR(color) * scaleRGB; | |
| 1494 g = GrColorUnpackG(color) * scaleRGB; | |
| 1495 b = GrColorUnpackB(color) * scaleRGB; | |
| 1496 | |
| 1497 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); | |
| 1498 fHWWriteToColor = kYes_TriState; | |
| 1499 GL_CALL(ClearColor(r, g, b, a)); | |
| 1500 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); | |
| 1501 } | |
| 1502 | |
| 1503 void GrGLGpu::discard(GrRenderTarget* renderTarget) { | |
| 1504 SkASSERT(renderTarget); | |
| 1505 if (!this->caps()->discardRenderTargetSupport()) { | |
| 1506 return; | |
| 1507 } | |
| 1508 | |
| 1509 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget); | |
| 1510 if (renderTarget->getUniqueID() != fHWBoundRenderTargetUniqueID) { | |
| 1511 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; | |
| 1512 fGPUStats.incRenderTargetBinds(); | |
| 1513 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, glRT->renderFBOID())); | |
| 1514 } | |
| 1515 switch (this->glCaps().invalidateFBType()) { | |
| 1516 case GrGLCaps::kNone_InvalidateFBType: | |
| 1517 SkFAIL("Should never get here."); | |
| 1518 break; | |
| 1519 case GrGLCaps::kInvalidate_InvalidateFBType: | |
| 1520 if (0 == glRT->renderFBOID()) { | |
| 1521 // When rendering to the default framebuffer the legal values f
or attachments | |
| 1522 // are GL_COLOR, GL_DEPTH, GL_STENCIL, ... rather than the vari
ous FBO attachment | |
| 1523 // types. | |
| 1524 static const GrGLenum attachments[] = { GR_GL_COLOR }; | |
| 1525 GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(
attachments), | |
| 1526 attachments)); | |
| 1527 } else { | |
| 1528 static const GrGLenum attachments[] = { GR_GL_COLOR_ATTACHMENT0
}; | |
| 1529 GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(
attachments), | |
| 1530 attachments)); | |
| 1531 } | |
| 1532 break; | |
| 1533 case GrGLCaps::kDiscard_InvalidateFBType: { | |
| 1534 if (0 == glRT->renderFBOID()) { | |
| 1535 // When rendering to the default framebuffer the legal values f
or attachments | |
| 1536 // are GL_COLOR, GL_DEPTH, GL_STENCIL, ... rather than the vari
ous FBO attachment | |
| 1537 // types. See glDiscardFramebuffer() spec. | |
| 1538 static const GrGLenum attachments[] = { GR_GL_COLOR }; | |
| 1539 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(att
achments), | |
| 1540 attachments)); | |
| 1541 } else { | |
| 1542 static const GrGLenum attachments[] = { GR_GL_COLOR_ATTACHMENT0
}; | |
| 1543 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(att
achments), | |
| 1544 attachments)); | |
| 1545 } | |
| 1546 break; | |
| 1547 } | |
| 1548 } | |
| 1549 renderTarget->flagAsResolved(); | |
| 1550 } | |
| 1551 | |
| 1552 | |
| 1553 void GrGLGpu::clearStencil(GrRenderTarget* target) { | |
| 1554 if (NULL == target) { | |
| 1555 return; | |
| 1556 } | |
| 1557 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); | |
| 1558 this->flushRenderTarget(glRT, &SkIRect::EmptyIRect()); | |
| 1559 | |
| 1560 this->disableScissor(); | |
| 1561 | |
| 1562 GL_CALL(StencilMask(0xffffffff)); | |
| 1563 GL_CALL(ClearStencil(0)); | |
| 1564 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); | |
| 1565 fHWStencilSettings.invalidate(); | |
| 1566 } | |
| 1567 | |
| 1568 void GrGLGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bo
ol insideClip) { | |
| 1569 SkASSERT(target); | |
| 1570 | |
| 1571 // this should only be called internally when we know we have a | |
| 1572 // stencil buffer. | |
| 1573 SkASSERT(target->getStencilBuffer()); | |
| 1574 GrGLint stencilBitCount = target->getStencilBuffer()->bits(); | |
| 1575 #if 0 | |
| 1576 SkASSERT(stencilBitCount > 0); | |
| 1577 GrGLint clipStencilMask = (1 << (stencilBitCount - 1)); | |
| 1578 #else | |
| 1579 // we could just clear the clip bit but when we go through | |
| 1580 // ANGLE a partial stencil mask will cause clears to be | |
| 1581 // turned into draws. Our contract on GrDrawTarget says that | |
| 1582 // changing the clip between stencil passes may or may not | |
| 1583 // zero the client's clip bits. So we just clear the whole thing. | |
| 1584 static const GrGLint clipStencilMask = ~0; | |
| 1585 #endif | |
| 1586 GrGLint value; | |
| 1587 if (insideClip) { | |
| 1588 value = (1 << (stencilBitCount - 1)); | |
| 1589 } else { | |
| 1590 value = 0; | |
| 1591 } | |
| 1592 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); | |
| 1593 this->flushRenderTarget(glRT, &SkIRect::EmptyIRect()); | |
| 1594 | |
| 1595 GrScissorState scissorState; | |
| 1596 scissorState.fEnabled = true; | |
| 1597 scissorState.fRect = rect; | |
| 1598 this->flushScissor(scissorState, glRT->getViewport(), glRT->origin()); | |
| 1599 | |
| 1600 GL_CALL(StencilMask((uint32_t) clipStencilMask)); | |
| 1601 GL_CALL(ClearStencil(value)); | |
| 1602 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); | |
| 1603 fHWStencilSettings.invalidate(); | |
| 1604 } | |
| 1605 | |
| 1606 bool GrGLGpu::readPixelsWillPayForYFlip(GrRenderTarget* renderTarget, | |
| 1607 int left, int top, | |
| 1608 int width, int height, | |
| 1609 GrPixelConfig config, | |
| 1610 size_t rowBytes) const { | |
| 1611 // If this rendertarget is aready TopLeft, we don't need to flip. | |
| 1612 if (kTopLeft_GrSurfaceOrigin == renderTarget->origin()) { | |
| 1613 return false; | |
| 1614 } | |
| 1615 | |
| 1616 // if GL can do the flip then we'll never pay for it. | |
| 1617 if (this->glCaps().packFlipYSupport()) { | |
| 1618 return false; | |
| 1619 } | |
| 1620 | |
| 1621 // If we have to do memcpy to handle non-trim rowBytes then we | |
| 1622 // get the flip for free. Otherwise it costs. | |
| 1623 if (this->glCaps().packRowLengthSupport()) { | |
| 1624 return true; | |
| 1625 } | |
| 1626 // If we have to do memcpys to handle rowBytes then y-flip is free | |
| 1627 // Note the rowBytes might be tight to the passed in data, but if data | |
| 1628 // gets clipped in x to the target the rowBytes will no longer be tight. | |
| 1629 if (left >= 0 && (left + width) < renderTarget->width()) { | |
| 1630 return 0 == rowBytes || | |
| 1631 GrBytesPerPixel(config) * width == rowBytes; | |
| 1632 } else { | |
| 1633 return false; | |
| 1634 } | |
| 1635 } | |
| 1636 | |
| 1637 bool GrGLGpu::onReadPixels(GrRenderTarget* target, | |
| 1638 int left, int top, | |
| 1639 int width, int height, | |
| 1640 GrPixelConfig config, | |
| 1641 void* buffer, | |
| 1642 size_t rowBytes) { | |
| 1643 // We cannot read pixels into a compressed buffer | |
| 1644 if (GrPixelConfigIsCompressed(config)) { | |
| 1645 return false; | |
| 1646 } | |
| 1647 | |
| 1648 GrGLenum format = 0; | |
| 1649 GrGLenum type = 0; | |
| 1650 bool flipY = kBottomLeft_GrSurfaceOrigin == target->origin(); | |
| 1651 if (!this->configToGLFormats(config, false, NULL, &format, &type)) { | |
| 1652 return false; | |
| 1653 } | |
| 1654 size_t bpp = GrBytesPerPixel(config); | |
| 1655 if (!adjust_pixel_ops_params(target->width(), target->height(), bpp, | |
| 1656 &left, &top, &width, &height, | |
| 1657 const_cast<const void**>(&buffer), | |
| 1658 &rowBytes)) { | |
| 1659 return false; | |
| 1660 } | |
| 1661 | |
| 1662 // resolve the render target if necessary | |
| 1663 GrGLRenderTarget* tgt = static_cast<GrGLRenderTarget*>(target); | |
| 1664 switch (tgt->getResolveType()) { | |
| 1665 case GrGLRenderTarget::kCantResolve_ResolveType: | |
| 1666 return false; | |
| 1667 case GrGLRenderTarget::kAutoResolves_ResolveType: | |
| 1668 this->flushRenderTarget(static_cast<GrGLRenderTarget*>(target), &SkI
Rect::EmptyIRect()); | |
| 1669 break; | |
| 1670 case GrGLRenderTarget::kCanResolve_ResolveType: | |
| 1671 this->onResolveRenderTarget(tgt); | |
| 1672 // we don't track the state of the READ FBO ID. | |
| 1673 fGPUStats.incRenderTargetBinds(); | |
| 1674 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, | |
| 1675 tgt->textureFBOID())); | |
| 1676 break; | |
| 1677 default: | |
| 1678 SkFAIL("Unknown resolve type"); | |
| 1679 } | |
| 1680 | |
| 1681 const GrGLIRect& glvp = tgt->getViewport(); | |
| 1682 | |
| 1683 // the read rect is viewport-relative | |
| 1684 GrGLIRect readRect; | |
| 1685 readRect.setRelativeTo(glvp, left, top, width, height, target->origin()); | |
| 1686 | |
| 1687 size_t tightRowBytes = bpp * width; | |
| 1688 if (0 == rowBytes) { | |
| 1689 rowBytes = tightRowBytes; | |
| 1690 } | |
| 1691 size_t readDstRowBytes = tightRowBytes; | |
| 1692 void* readDst = buffer; | |
| 1693 | |
| 1694 // determine if GL can read using the passed rowBytes or if we need | |
| 1695 // a scratch buffer. | |
| 1696 GrAutoMalloc<32 * sizeof(GrColor)> scratch; | |
| 1697 if (rowBytes != tightRowBytes) { | |
| 1698 if (this->glCaps().packRowLengthSupport()) { | |
| 1699 SkASSERT(!(rowBytes % sizeof(GrColor))); | |
| 1700 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, | |
| 1701 static_cast<GrGLint>(rowBytes / sizeof(GrColor))
)); | |
| 1702 readDstRowBytes = rowBytes; | |
| 1703 } else { | |
| 1704 scratch.reset(tightRowBytes * height); | |
| 1705 readDst = scratch.get(); | |
| 1706 } | |
| 1707 } | |
| 1708 if (flipY && this->glCaps().packFlipYSupport()) { | |
| 1709 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 1)); | |
| 1710 } | |
| 1711 GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom, | |
| 1712 readRect.fWidth, readRect.fHeight, | |
| 1713 format, type, readDst)); | |
| 1714 if (readDstRowBytes != tightRowBytes) { | |
| 1715 SkASSERT(this->glCaps().packRowLengthSupport()); | |
| 1716 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); | |
| 1717 } | |
| 1718 if (flipY && this->glCaps().packFlipYSupport()) { | |
| 1719 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 0)); | |
| 1720 flipY = false; | |
| 1721 } | |
| 1722 | |
| 1723 // now reverse the order of the rows, since GL's are bottom-to-top, but our | |
| 1724 // API presents top-to-bottom. We must preserve the padding contents. Note | |
| 1725 // that the above readPixels did not overwrite the padding. | |
| 1726 if (readDst == buffer) { | |
| 1727 SkASSERT(rowBytes == readDstRowBytes); | |
| 1728 if (flipY) { | |
| 1729 scratch.reset(tightRowBytes); | |
| 1730 void* tmpRow = scratch.get(); | |
| 1731 // flip y in-place by rows | |
| 1732 const int halfY = height >> 1; | |
| 1733 char* top = reinterpret_cast<char*>(buffer); | |
| 1734 char* bottom = top + (height - 1) * rowBytes; | |
| 1735 for (int y = 0; y < halfY; y++) { | |
| 1736 memcpy(tmpRow, top, tightRowBytes); | |
| 1737 memcpy(top, bottom, tightRowBytes); | |
| 1738 memcpy(bottom, tmpRow, tightRowBytes); | |
| 1739 top += rowBytes; | |
| 1740 bottom -= rowBytes; | |
| 1741 } | |
| 1742 } | |
| 1743 } else { | |
| 1744 SkASSERT(readDst != buffer); SkASSERT(rowBytes != tightRowBytes); | |
| 1745 // copy from readDst to buffer while flipping y | |
| 1746 // const int halfY = height >> 1; | |
| 1747 const char* src = reinterpret_cast<const char*>(readDst); | |
| 1748 char* dst = reinterpret_cast<char*>(buffer); | |
| 1749 if (flipY) { | |
| 1750 dst += (height-1) * rowBytes; | |
| 1751 } | |
| 1752 for (int y = 0; y < height; y++) { | |
| 1753 memcpy(dst, src, tightRowBytes); | |
| 1754 src += readDstRowBytes; | |
| 1755 if (!flipY) { | |
| 1756 dst += rowBytes; | |
| 1757 } else { | |
| 1758 dst -= rowBytes; | |
| 1759 } | |
| 1760 } | |
| 1761 } | |
| 1762 return true; | |
| 1763 } | |
| 1764 | |
| 1765 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, const SkIRect* bound)
{ | |
| 1766 | |
| 1767 SkASSERT(target); | |
| 1768 | |
| 1769 uint32_t rtID = target->getUniqueID(); | |
| 1770 if (fHWBoundRenderTargetUniqueID != rtID) { | |
| 1771 fGPUStats.incRenderTargetBinds(); | |
| 1772 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID())); | |
| 1773 #ifdef SK_DEBUG | |
| 1774 // don't do this check in Chromium -- this is causing | |
| 1775 // lots of repeated command buffer flushes when the compositor is | |
| 1776 // rendering with Ganesh, which is really slow; even too slow for | |
| 1777 // Debug mode. | |
| 1778 if (!this->glContext().isChromium()) { | |
| 1779 GrGLenum status; | |
| 1780 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); | |
| 1781 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { | |
| 1782 SkDebugf("GrGLGpu::flushRenderTarget glCheckFramebufferStatus %x
\n", status); | |
| 1783 } | |
| 1784 } | |
| 1785 #endif | |
| 1786 fHWBoundRenderTargetUniqueID = rtID; | |
| 1787 const GrGLIRect& vp = target->getViewport(); | |
| 1788 if (fHWViewport != vp) { | |
| 1789 vp.pushToGLViewport(this->glInterface()); | |
| 1790 fHWViewport = vp; | |
| 1791 } | |
| 1792 } | |
| 1793 if (NULL == bound || !bound->isEmpty()) { | |
| 1794 target->flagAsNeedingResolve(bound); | |
| 1795 } | |
| 1796 | |
| 1797 GrTexture *texture = target->asTexture(); | |
| 1798 if (texture) { | |
| 1799 texture->texturePriv().dirtyMipMaps(true); | |
| 1800 } | |
| 1801 } | |
| 1802 | |
| 1803 GrGLenum gPrimitiveType2GLMode[] = { | |
| 1804 GR_GL_TRIANGLES, | |
| 1805 GR_GL_TRIANGLE_STRIP, | |
| 1806 GR_GL_TRIANGLE_FAN, | |
| 1807 GR_GL_POINTS, | |
| 1808 GR_GL_LINES, | |
| 1809 GR_GL_LINE_STRIP | |
| 1810 }; | |
| 1811 | |
| 1812 #define SWAP_PER_DRAW 0 | |
| 1813 | |
| 1814 #if SWAP_PER_DRAW | |
| 1815 #if defined(SK_BUILD_FOR_MAC) | |
| 1816 #include <AGL/agl.h> | |
| 1817 #elif defined(SK_BUILD_FOR_WIN32) | |
| 1818 #include <gl/GL.h> | |
| 1819 void SwapBuf() { | |
| 1820 DWORD procID = GetCurrentProcessId(); | |
| 1821 HWND hwnd = GetTopWindow(GetDesktopWindow()); | |
| 1822 while(hwnd) { | |
| 1823 DWORD wndProcID = 0; | |
| 1824 GetWindowThreadProcessId(hwnd, &wndProcID); | |
| 1825 if(wndProcID == procID) { | |
| 1826 SwapBuffers(GetDC(hwnd)); | |
| 1827 } | |
| 1828 hwnd = GetNextWindow(hwnd, GW_HWNDNEXT); | |
| 1829 } | |
| 1830 } | |
| 1831 #endif | |
| 1832 #endif | |
| 1833 | |
| 1834 void GrGLGpu::onDraw(const GrOptDrawState& ds, const GrDrawTarget::DrawInfo& inf
o) { | |
| 1835 if (!this->flushGLState(ds)) { | |
| 1836 return; | |
| 1837 } | |
| 1838 | |
| 1839 size_t indexOffsetInBytes; | |
| 1840 this->setupGeometry(ds, info, &indexOffsetInBytes); | |
| 1841 | |
| 1842 SkASSERT((size_t)info.primitiveType() < SK_ARRAY_COUNT(gPrimitiveType2GLMode
)); | |
| 1843 | |
| 1844 if (info.isIndexed()) { | |
| 1845 GrGLvoid* indices = | |
| 1846 reinterpret_cast<GrGLvoid*>(indexOffsetInBytes + sizeof(uint16_t) *
info.startIndex()); | |
| 1847 // info.startVertex() was accounted for by setupGeometry. | |
| 1848 GL_CALL(DrawElements(gPrimitiveType2GLMode[info.primitiveType()], | |
| 1849 info.indexCount(), | |
| 1850 GR_GL_UNSIGNED_SHORT, | |
| 1851 indices)); | |
| 1852 } else { | |
| 1853 // Pass 0 for parameter first. We have to adjust glVertexAttribPointer()
to account for | |
| 1854 // startVertex in the DrawElements case. So we always rely on setupGeome
try to have | |
| 1855 // accounted for startVertex. | |
| 1856 GL_CALL(DrawArrays(gPrimitiveType2GLMode[info.primitiveType()], 0, info.
vertexCount())); | |
| 1857 } | |
| 1858 #if SWAP_PER_DRAW | |
| 1859 glFlush(); | |
| 1860 #if defined(SK_BUILD_FOR_MAC) | |
| 1861 aglSwapBuffers(aglGetCurrentContext()); | |
| 1862 int set_a_break_pt_here = 9; | |
| 1863 aglSwapBuffers(aglGetCurrentContext()); | |
| 1864 #elif defined(SK_BUILD_FOR_WIN32) | |
| 1865 SwapBuf(); | |
| 1866 int set_a_break_pt_here = 9; | |
| 1867 SwapBuf(); | |
| 1868 #endif | |
| 1869 #endif | |
| 1870 } | |
| 1871 | |
| 1872 void GrGLGpu::onStencilPath(const GrPath* path, const StencilPathState& state) { | |
| 1873 this->flushColorWrite(false); | |
| 1874 this->flushDrawFace(GrDrawState::kBoth_DrawFace); | |
| 1875 | |
| 1876 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(state.fRenderTarget); | |
| 1877 SkISize size = SkISize::Make(rt->width(), rt->height()); | |
| 1878 this->glPathRendering()->setProjectionMatrix(*state.fViewMatrix, size, rt->o
rigin()); | |
| 1879 this->flushScissor(*state.fScissor, rt->getViewport(), rt->origin()); | |
| 1880 this->flushHWAAState(rt, state.fUseHWAA, false); | |
| 1881 this->flushRenderTarget(rt, NULL); | |
| 1882 | |
| 1883 fPathRendering->stencilPath(path, *state.fStencil); | |
| 1884 } | |
| 1885 | |
| 1886 void GrGLGpu::onDrawPath(const GrOptDrawState& ds, const GrPath* path, | |
| 1887 const GrStencilSettings& stencil) { | |
| 1888 if (!this->flushGLState(ds)) { | |
| 1889 return; | |
| 1890 } | |
| 1891 fPathRendering->drawPath(path, stencil); | |
| 1892 } | |
| 1893 | |
| 1894 void GrGLGpu::onDrawPaths(const GrOptDrawState& ds, | |
| 1895 const GrPathRange* pathRange, | |
| 1896 const void* indices, | |
| 1897 GrDrawTarget::PathIndexType indexType, | |
| 1898 const float transformValues[], | |
| 1899 GrDrawTarget::PathTransformType transformType, | |
| 1900 int count, | |
| 1901 const GrStencilSettings& stencil) { | |
| 1902 if (!this->flushGLState(ds)) { | |
| 1903 return; | |
| 1904 } | |
| 1905 fPathRendering->drawPaths(pathRange, indices, indexType, transformValues, | |
| 1906 transformType, count, stencil); | |
| 1907 } | |
| 1908 | |
| 1909 void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target) { | |
| 1910 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target); | |
| 1911 if (rt->needsResolve()) { | |
| 1912 // Some extensions automatically resolves the texture when it is read. | |
| 1913 if (this->glCaps().usesMSAARenderBuffers()) { | |
| 1914 SkASSERT(rt->textureFBOID() != rt->renderFBOID()); | |
| 1915 fGPUStats.incRenderTargetBinds(); | |
| 1916 fGPUStats.incRenderTargetBinds(); | |
| 1917 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID())); | |
| 1918 GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID()))
; | |
| 1919 // make sure we go through flushRenderTarget() since we've modified | |
| 1920 // the bound DRAW FBO ID. | |
| 1921 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; | |
| 1922 const GrGLIRect& vp = rt->getViewport(); | |
| 1923 const SkIRect dirtyRect = rt->getResolveRect(); | |
| 1924 GrGLIRect r; | |
| 1925 r.setRelativeTo(vp, dirtyRect.fLeft, dirtyRect.fTop, | |
| 1926 dirtyRect.width(), dirtyRect.height(), target->origi
n()); | |
| 1927 | |
| 1928 if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) { | |
| 1929 // Apple's extension uses the scissor as the blit bounds. | |
| 1930 GrScissorState scissorState; | |
| 1931 scissorState.fEnabled = true; | |
| 1932 scissorState.fRect = dirtyRect; | |
| 1933 this->flushScissor(scissorState, rt->getViewport(), rt->origin()
); | |
| 1934 GL_CALL(ResolveMultisampleFramebuffer()); | |
| 1935 } else { | |
| 1936 int right = r.fLeft + r.fWidth; | |
| 1937 int top = r.fBottom + r.fHeight; | |
| 1938 | |
| 1939 // BlitFrameBuffer respects the scissor, so disable it. | |
| 1940 this->disableScissor(); | |
| 1941 GL_CALL(BlitFramebuffer(r.fLeft, r.fBottom, right, top, | |
| 1942 r.fLeft, r.fBottom, right, top, | |
| 1943 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); | |
| 1944 } | |
| 1945 } | |
| 1946 rt->flagAsResolved(); | |
| 1947 } | |
| 1948 } | |
| 1949 | |
| 1950 namespace { | |
| 1951 | |
| 1952 | |
| 1953 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) { | |
| 1954 static const GrGLenum gTable[] = { | |
| 1955 GR_GL_KEEP, // kKeep_StencilOp | |
| 1956 GR_GL_REPLACE, // kReplace_StencilOp | |
| 1957 GR_GL_INCR_WRAP, // kIncWrap_StencilOp | |
| 1958 GR_GL_INCR, // kIncClamp_StencilOp | |
| 1959 GR_GL_DECR_WRAP, // kDecWrap_StencilOp | |
| 1960 GR_GL_DECR, // kDecClamp_StencilOp | |
| 1961 GR_GL_ZERO, // kZero_StencilOp | |
| 1962 GR_GL_INVERT, // kInvert_StencilOp | |
| 1963 }; | |
| 1964 GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kStencilOpCount); | |
| 1965 GR_STATIC_ASSERT(0 == kKeep_StencilOp); | |
| 1966 GR_STATIC_ASSERT(1 == kReplace_StencilOp); | |
| 1967 GR_STATIC_ASSERT(2 == kIncWrap_StencilOp); | |
| 1968 GR_STATIC_ASSERT(3 == kIncClamp_StencilOp); | |
| 1969 GR_STATIC_ASSERT(4 == kDecWrap_StencilOp); | |
| 1970 GR_STATIC_ASSERT(5 == kDecClamp_StencilOp); | |
| 1971 GR_STATIC_ASSERT(6 == kZero_StencilOp); | |
| 1972 GR_STATIC_ASSERT(7 == kInvert_StencilOp); | |
| 1973 SkASSERT((unsigned) op < kStencilOpCount); | |
| 1974 return gTable[op]; | |
| 1975 } | |
| 1976 | |
| 1977 void set_gl_stencil(const GrGLInterface* gl, | |
| 1978 const GrStencilSettings& settings, | |
| 1979 GrGLenum glFace, | |
| 1980 GrStencilSettings::Face grFace) { | |
| 1981 GrGLenum glFunc = GrToGLStencilFunc(settings.func(grFace)); | |
| 1982 GrGLenum glFailOp = gr_to_gl_stencil_op(settings.failOp(grFace)); | |
| 1983 GrGLenum glPassOp = gr_to_gl_stencil_op(settings.passOp(grFace)); | |
| 1984 | |
| 1985 GrGLint ref = settings.funcRef(grFace); | |
| 1986 GrGLint mask = settings.funcMask(grFace); | |
| 1987 GrGLint writeMask = settings.writeMask(grFace); | |
| 1988 | |
| 1989 if (GR_GL_FRONT_AND_BACK == glFace) { | |
| 1990 // we call the combined func just in case separate stencil is not | |
| 1991 // supported. | |
| 1992 GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask)); | |
| 1993 GR_GL_CALL(gl, StencilMask(writeMask)); | |
| 1994 GR_GL_CALL(gl, StencilOp(glFailOp, glPassOp, glPassOp)); | |
| 1995 } else { | |
| 1996 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask)); | |
| 1997 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask)); | |
| 1998 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, glPassOp, glPassOp)); | |
| 1999 } | |
| 2000 } | |
| 2001 } | |
| 2002 | |
| 2003 void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings) { | |
| 2004 if (fHWStencilSettings != stencilSettings) { | |
| 2005 if (stencilSettings.isDisabled()) { | |
| 2006 if (kNo_TriState != fHWStencilTestEnabled) { | |
| 2007 GL_CALL(Disable(GR_GL_STENCIL_TEST)); | |
| 2008 fHWStencilTestEnabled = kNo_TriState; | |
| 2009 } | |
| 2010 } else { | |
| 2011 if (kYes_TriState != fHWStencilTestEnabled) { | |
| 2012 GL_CALL(Enable(GR_GL_STENCIL_TEST)); | |
| 2013 fHWStencilTestEnabled = kYes_TriState; | |
| 2014 } | |
| 2015 } | |
| 2016 if (!stencilSettings.isDisabled()) { | |
| 2017 if (this->caps()->twoSidedStencilSupport()) { | |
| 2018 set_gl_stencil(this->glInterface(), | |
| 2019 stencilSettings, | |
| 2020 GR_GL_FRONT, | |
| 2021 GrStencilSettings::kFront_Face); | |
| 2022 set_gl_stencil(this->glInterface(), | |
| 2023 stencilSettings, | |
| 2024 GR_GL_BACK, | |
| 2025 GrStencilSettings::kBack_Face); | |
| 2026 } else { | |
| 2027 set_gl_stencil(this->glInterface(), | |
| 2028 stencilSettings, | |
| 2029 GR_GL_FRONT_AND_BACK, | |
| 2030 GrStencilSettings::kFront_Face); | |
| 2031 } | |
| 2032 } | |
| 2033 fHWStencilSettings = stencilSettings; | |
| 2034 } | |
| 2035 } | |
| 2036 | |
| 2037 void GrGLGpu::flushHWAAState(GrRenderTarget* rt, bool useHWAA, bool isLineDraw)
{ | |
| 2038 // At least some ATI linux drivers will render GL_LINES incorrectly when MSAA st
ate is enabled but | |
| 2039 // the target is not multisampled. Single pixel wide lines are rendered thicker
than 1 pixel wide. | |
| 2040 #if 0 | |
| 2041 // Replace RT_HAS_MSAA with this definition once this driver bug is no longe
r a relevant concern | |
| 2042 #define RT_HAS_MSAA rt->isMultisampled() | |
| 2043 #else | |
| 2044 #define RT_HAS_MSAA (rt->isMultisampled() || isLineDraw) | |
| 2045 #endif | |
| 2046 | |
| 2047 if (kGL_GrGLStandard == this->glStandard()) { | |
| 2048 if (RT_HAS_MSAA) { | |
| 2049 if (useHWAA) { | |
| 2050 if (kYes_TriState != fMSAAEnabled) { | |
| 2051 GL_CALL(Enable(GR_GL_MULTISAMPLE)); | |
| 2052 fMSAAEnabled = kYes_TriState; | |
| 2053 } | |
| 2054 } else { | |
| 2055 if (kNo_TriState != fMSAAEnabled) { | |
| 2056 GL_CALL(Disable(GR_GL_MULTISAMPLE)); | |
| 2057 fMSAAEnabled = kNo_TriState; | |
| 2058 } | |
| 2059 } | |
| 2060 } | |
| 2061 } | |
| 2062 } | |
| 2063 | |
| 2064 void GrGLGpu::flushBlend(const GrXferProcessor::BlendInfo& blendInfo) { | |
| 2065 // Any optimization to disable blending should have already been applied and | |
| 2066 // tweaked the coeffs to (1, 0). | |
| 2067 | |
| 2068 GrBlendCoeff srcCoeff = blendInfo.fSrcBlend; | |
| 2069 GrBlendCoeff dstCoeff = blendInfo.fDstBlend; | |
| 2070 bool blendOff = kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCo
eff; | |
| 2071 if (blendOff) { | |
| 2072 if (kNo_TriState != fHWBlendState.fEnabled) { | |
| 2073 GL_CALL(Disable(GR_GL_BLEND)); | |
| 2074 fHWBlendState.fEnabled = kNo_TriState; | |
| 2075 } | |
| 2076 } else { | |
| 2077 if (kYes_TriState != fHWBlendState.fEnabled) { | |
| 2078 GL_CALL(Enable(GR_GL_BLEND)); | |
| 2079 fHWBlendState.fEnabled = kYes_TriState; | |
| 2080 } | |
| 2081 if (fHWBlendState.fSrcCoeff != srcCoeff || | |
| 2082 fHWBlendState.fDstCoeff != dstCoeff) { | |
| 2083 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff], | |
| 2084 gXfermodeCoeff2Blend[dstCoeff])); | |
| 2085 fHWBlendState.fSrcCoeff = srcCoeff; | |
| 2086 fHWBlendState.fDstCoeff = dstCoeff; | |
| 2087 } | |
| 2088 GrColor blendConst = blendInfo.fBlendConstant; | |
| 2089 if ((BlendCoeffReferencesConstant(srcCoeff) || | |
| 2090 BlendCoeffReferencesConstant(dstCoeff)) && | |
| 2091 (!fHWBlendState.fConstColorValid || | |
| 2092 fHWBlendState.fConstColor != blendConst)) { | |
| 2093 GrGLfloat c[4]; | |
| 2094 GrColorToRGBAFloat(blendConst, c); | |
| 2095 GL_CALL(BlendColor(c[0], c[1], c[2], c[3])); | |
| 2096 fHWBlendState.fConstColor = blendConst; | |
| 2097 fHWBlendState.fConstColorValid = true; | |
| 2098 } | |
| 2099 } | |
| 2100 } | |
| 2101 | |
| 2102 static inline GrGLenum tile_to_gl_wrap(SkShader::TileMode tm) { | |
| 2103 static const GrGLenum gWrapModes[] = { | |
| 2104 GR_GL_CLAMP_TO_EDGE, | |
| 2105 GR_GL_REPEAT, | |
| 2106 GR_GL_MIRRORED_REPEAT | |
| 2107 }; | |
| 2108 GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes)); | |
| 2109 GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode); | |
| 2110 GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode); | |
| 2111 GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode); | |
| 2112 return gWrapModes[tm]; | |
| 2113 } | |
| 2114 | |
| 2115 void GrGLGpu::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTextur
e* texture) { | |
| 2116 SkASSERT(texture); | |
| 2117 | |
| 2118 // If we created a rt/tex and rendered to it without using a texture and now
we're texturing | |
| 2119 // from the rt it will still be the last bound texture, but it needs resolvi
ng. So keep this | |
| 2120 // out of the "last != next" check. | |
| 2121 GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTa
rget()); | |
| 2122 if (texRT) { | |
| 2123 this->onResolveRenderTarget(texRT); | |
| 2124 } | |
| 2125 | |
| 2126 uint32_t textureID = texture->getUniqueID(); | |
| 2127 if (fHWBoundTextureUniqueIDs[unitIdx] != textureID) { | |
| 2128 this->setTextureUnit(unitIdx); | |
| 2129 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, texture->textureID())); | |
| 2130 fHWBoundTextureUniqueIDs[unitIdx] = textureID; | |
| 2131 } | |
| 2132 | |
| 2133 ResetTimestamp timestamp; | |
| 2134 const GrGLTexture::TexParams& oldTexParams = texture->getCachedTexParams(&ti
mestamp); | |
| 2135 bool setAll = timestamp < this->getResetTimestamp(); | |
| 2136 GrGLTexture::TexParams newTexParams; | |
| 2137 | |
| 2138 static GrGLenum glMinFilterModes[] = { | |
| 2139 GR_GL_NEAREST, | |
| 2140 GR_GL_LINEAR, | |
| 2141 GR_GL_LINEAR_MIPMAP_LINEAR | |
| 2142 }; | |
| 2143 static GrGLenum glMagFilterModes[] = { | |
| 2144 GR_GL_NEAREST, | |
| 2145 GR_GL_LINEAR, | |
| 2146 GR_GL_LINEAR | |
| 2147 }; | |
| 2148 GrTextureParams::FilterMode filterMode = params.filterMode(); | |
| 2149 | |
| 2150 if (GrTextureParams::kMipMap_FilterMode == filterMode) { | |
| 2151 if (!this->caps()->mipMapSupport() || GrPixelConfigIsCompressed(texture-
>config())) { | |
| 2152 filterMode = GrTextureParams::kBilerp_FilterMode; | |
| 2153 } | |
| 2154 } | |
| 2155 | |
| 2156 newTexParams.fMinFilter = glMinFilterModes[filterMode]; | |
| 2157 newTexParams.fMagFilter = glMagFilterModes[filterMode]; | |
| 2158 | |
| 2159 if (GrTextureParams::kMipMap_FilterMode == filterMode && | |
| 2160 texture->texturePriv().mipMapsAreDirty()) { | |
| 2161 GL_CALL(GenerateMipmap(GR_GL_TEXTURE_2D)); | |
| 2162 texture->texturePriv().dirtyMipMaps(false); | |
| 2163 } | |
| 2164 | |
| 2165 newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX()); | |
| 2166 newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY()); | |
| 2167 memcpy(newTexParams.fSwizzleRGBA, | |
| 2168 GrGLShaderBuilder::GetTexParamSwizzle(texture->config(), this->glCaps
()), | |
| 2169 sizeof(newTexParams.fSwizzleRGBA)); | |
| 2170 if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) { | |
| 2171 this->setTextureUnit(unitIdx); | |
| 2172 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, | |
| 2173 GR_GL_TEXTURE_MAG_FILTER, | |
| 2174 newTexParams.fMagFilter)); | |
| 2175 } | |
| 2176 if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) { | |
| 2177 this->setTextureUnit(unitIdx); | |
| 2178 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, | |
| 2179 GR_GL_TEXTURE_MIN_FILTER, | |
| 2180 newTexParams.fMinFilter)); | |
| 2181 } | |
| 2182 if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) { | |
| 2183 this->setTextureUnit(unitIdx); | |
| 2184 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, | |
| 2185 GR_GL_TEXTURE_WRAP_S, | |
| 2186 newTexParams.fWrapS)); | |
| 2187 } | |
| 2188 if (setAll || newTexParams.fWrapT != oldTexParams.fWrapT) { | |
| 2189 this->setTextureUnit(unitIdx); | |
| 2190 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, | |
| 2191 GR_GL_TEXTURE_WRAP_T, | |
| 2192 newTexParams.fWrapT)); | |
| 2193 } | |
| 2194 if (this->glCaps().textureSwizzleSupport() && | |
| 2195 (setAll || memcmp(newTexParams.fSwizzleRGBA, | |
| 2196 oldTexParams.fSwizzleRGBA, | |
| 2197 sizeof(newTexParams.fSwizzleRGBA)))) { | |
| 2198 this->setTextureUnit(unitIdx); | |
| 2199 if (this->glStandard() == kGLES_GrGLStandard) { | |
| 2200 // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA. | |
| 2201 const GrGLenum* swizzle = newTexParams.fSwizzleRGBA; | |
| 2202 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_R, swi
zzle[0])); | |
| 2203 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_G, swi
zzle[1])); | |
| 2204 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_B, swi
zzle[2])); | |
| 2205 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_A, swi
zzle[3])); | |
| 2206 } else { | |
| 2207 GR_STATIC_ASSERT(sizeof(newTexParams.fSwizzleRGBA[0]) == sizeof(GrGL
int)); | |
| 2208 const GrGLint* swizzle = reinterpret_cast<const GrGLint*>(newTexPara
ms.fSwizzleRGBA); | |
| 2209 GL_CALL(TexParameteriv(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_RGBA,
swizzle)); | |
| 2210 } | |
| 2211 } | |
| 2212 texture->setCachedTexParams(newTexParams, this->getResetTimestamp()); | |
| 2213 } | |
| 2214 | |
| 2215 void GrGLGpu::flushDither(bool dither) { | |
| 2216 if (dither) { | |
| 2217 if (kYes_TriState != fHWDitherEnabled) { | |
| 2218 GL_CALL(Enable(GR_GL_DITHER)); | |
| 2219 fHWDitherEnabled = kYes_TriState; | |
| 2220 } | |
| 2221 } else { | |
| 2222 if (kNo_TriState != fHWDitherEnabled) { | |
| 2223 GL_CALL(Disable(GR_GL_DITHER)); | |
| 2224 fHWDitherEnabled = kNo_TriState; | |
| 2225 } | |
| 2226 } | |
| 2227 } | |
| 2228 | |
| 2229 void GrGLGpu::flushColorWrite(bool writeColor) { | |
| 2230 if (!writeColor) { | |
| 2231 if (kNo_TriState != fHWWriteToColor) { | |
| 2232 GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE, | |
| 2233 GR_GL_FALSE, GR_GL_FALSE)); | |
| 2234 fHWWriteToColor = kNo_TriState; | |
| 2235 } | |
| 2236 } else { | |
| 2237 if (kYes_TriState != fHWWriteToColor) { | |
| 2238 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); | |
| 2239 fHWWriteToColor = kYes_TriState; | |
| 2240 } | |
| 2241 } | |
| 2242 } | |
| 2243 | |
| 2244 void GrGLGpu::flushDrawFace(GrDrawState::DrawFace face) { | |
| 2245 if (fHWDrawFace != face) { | |
| 2246 switch (face) { | |
| 2247 case GrDrawState::kCCW_DrawFace: | |
| 2248 GL_CALL(Enable(GR_GL_CULL_FACE)); | |
| 2249 GL_CALL(CullFace(GR_GL_BACK)); | |
| 2250 break; | |
| 2251 case GrDrawState::kCW_DrawFace: | |
| 2252 GL_CALL(Enable(GR_GL_CULL_FACE)); | |
| 2253 GL_CALL(CullFace(GR_GL_FRONT)); | |
| 2254 break; | |
| 2255 case GrDrawState::kBoth_DrawFace: | |
| 2256 GL_CALL(Disable(GR_GL_CULL_FACE)); | |
| 2257 break; | |
| 2258 default: | |
| 2259 SkFAIL("Unknown draw face."); | |
| 2260 } | |
| 2261 fHWDrawFace = face; | |
| 2262 } | |
| 2263 } | |
| 2264 | |
| 2265 bool GrGLGpu::configToGLFormats(GrPixelConfig config, | |
| 2266 bool getSizedInternalFormat, | |
| 2267 GrGLenum* internalFormat, | |
| 2268 GrGLenum* externalFormat, | |
| 2269 GrGLenum* externalType) { | |
| 2270 GrGLenum dontCare; | |
| 2271 if (NULL == internalFormat) { | |
| 2272 internalFormat = &dontCare; | |
| 2273 } | |
| 2274 if (NULL == externalFormat) { | |
| 2275 externalFormat = &dontCare; | |
| 2276 } | |
| 2277 if (NULL == externalType) { | |
| 2278 externalType = &dontCare; | |
| 2279 } | |
| 2280 | |
| 2281 if(!this->glCaps().isConfigTexturable(config)) { | |
| 2282 return false; | |
| 2283 } | |
| 2284 | |
| 2285 switch (config) { | |
| 2286 case kRGBA_8888_GrPixelConfig: | |
| 2287 *internalFormat = GR_GL_RGBA; | |
| 2288 *externalFormat = GR_GL_RGBA; | |
| 2289 if (getSizedInternalFormat) { | |
| 2290 *internalFormat = GR_GL_RGBA8; | |
| 2291 } else { | |
| 2292 *internalFormat = GR_GL_RGBA; | |
| 2293 } | |
| 2294 *externalType = GR_GL_UNSIGNED_BYTE; | |
| 2295 break; | |
| 2296 case kBGRA_8888_GrPixelConfig: | |
| 2297 if (this->glCaps().bgraIsInternalFormat()) { | |
| 2298 if (getSizedInternalFormat) { | |
| 2299 *internalFormat = GR_GL_BGRA8; | |
| 2300 } else { | |
| 2301 *internalFormat = GR_GL_BGRA; | |
| 2302 } | |
| 2303 } else { | |
| 2304 if (getSizedInternalFormat) { | |
| 2305 *internalFormat = GR_GL_RGBA8; | |
| 2306 } else { | |
| 2307 *internalFormat = GR_GL_RGBA; | |
| 2308 } | |
| 2309 } | |
| 2310 *externalFormat = GR_GL_BGRA; | |
| 2311 *externalType = GR_GL_UNSIGNED_BYTE; | |
| 2312 break; | |
| 2313 case kSRGBA_8888_GrPixelConfig: | |
| 2314 *internalFormat = GR_GL_SRGB_ALPHA; | |
| 2315 *externalFormat = GR_GL_SRGB_ALPHA; | |
| 2316 if (getSizedInternalFormat) { | |
| 2317 *internalFormat = GR_GL_SRGB8_ALPHA8; | |
| 2318 } else { | |
| 2319 *internalFormat = GR_GL_SRGB_ALPHA; | |
| 2320 } | |
| 2321 *externalType = GR_GL_UNSIGNED_BYTE; | |
| 2322 break; | |
| 2323 case kRGB_565_GrPixelConfig: | |
| 2324 *internalFormat = GR_GL_RGB; | |
| 2325 *externalFormat = GR_GL_RGB; | |
| 2326 if (getSizedInternalFormat) { | |
| 2327 if (!this->glCaps().ES2CompatibilitySupport()) { | |
| 2328 *internalFormat = GR_GL_RGB5; | |
| 2329 } else { | |
| 2330 *internalFormat = GR_GL_RGB565; | |
| 2331 } | |
| 2332 } else { | |
| 2333 *internalFormat = GR_GL_RGB; | |
| 2334 } | |
| 2335 *externalType = GR_GL_UNSIGNED_SHORT_5_6_5; | |
| 2336 break; | |
| 2337 case kRGBA_4444_GrPixelConfig: | |
| 2338 *internalFormat = GR_GL_RGBA; | |
| 2339 *externalFormat = GR_GL_RGBA; | |
| 2340 if (getSizedInternalFormat) { | |
| 2341 *internalFormat = GR_GL_RGBA4; | |
| 2342 } else { | |
| 2343 *internalFormat = GR_GL_RGBA; | |
| 2344 } | |
| 2345 *externalType = GR_GL_UNSIGNED_SHORT_4_4_4_4; | |
| 2346 break; | |
| 2347 case kIndex_8_GrPixelConfig: | |
| 2348 // no sized/unsized internal format distinction here | |
| 2349 *internalFormat = GR_GL_PALETTE8_RGBA8; | |
| 2350 break; | |
| 2351 case kAlpha_8_GrPixelConfig: | |
| 2352 if (this->glCaps().textureRedSupport()) { | |
| 2353 *internalFormat = GR_GL_RED; | |
| 2354 *externalFormat = GR_GL_RED; | |
| 2355 if (getSizedInternalFormat) { | |
| 2356 *internalFormat = GR_GL_R8; | |
| 2357 } else { | |
| 2358 *internalFormat = GR_GL_RED; | |
| 2359 } | |
| 2360 *externalType = GR_GL_UNSIGNED_BYTE; | |
| 2361 } else { | |
| 2362 *internalFormat = GR_GL_ALPHA; | |
| 2363 *externalFormat = GR_GL_ALPHA; | |
| 2364 if (getSizedInternalFormat) { | |
| 2365 *internalFormat = GR_GL_ALPHA8; | |
| 2366 } else { | |
| 2367 *internalFormat = GR_GL_ALPHA; | |
| 2368 } | |
| 2369 *externalType = GR_GL_UNSIGNED_BYTE; | |
| 2370 } | |
| 2371 break; | |
| 2372 case kETC1_GrPixelConfig: | |
| 2373 *internalFormat = GR_GL_COMPRESSED_RGB8_ETC1; | |
| 2374 break; | |
| 2375 case kLATC_GrPixelConfig: | |
| 2376 switch(this->glCaps().latcAlias()) { | |
| 2377 case GrGLCaps::kLATC_LATCAlias: | |
| 2378 *internalFormat = GR_GL_COMPRESSED_LUMINANCE_LATC1; | |
| 2379 break; | |
| 2380 case GrGLCaps::kRGTC_LATCAlias: | |
| 2381 *internalFormat = GR_GL_COMPRESSED_RED_RGTC1; | |
| 2382 break; | |
| 2383 case GrGLCaps::k3DC_LATCAlias: | |
| 2384 *internalFormat = GR_GL_COMPRESSED_3DC_X; | |
| 2385 break; | |
| 2386 } | |
| 2387 break; | |
| 2388 case kR11_EAC_GrPixelConfig: | |
| 2389 *internalFormat = GR_GL_COMPRESSED_R11; | |
| 2390 break; | |
| 2391 | |
| 2392 case kASTC_12x12_GrPixelConfig: | |
| 2393 *internalFormat = GR_GL_COMPRESSED_RGBA_ASTC_12x12; | |
| 2394 break; | |
| 2395 | |
| 2396 case kRGBA_float_GrPixelConfig: | |
| 2397 *internalFormat = GR_GL_RGBA32F; | |
| 2398 *externalFormat = GR_GL_RGBA; | |
| 2399 *externalType = GR_GL_FLOAT; | |
| 2400 break; | |
| 2401 | |
| 2402 case kAlpha_half_GrPixelConfig: | |
| 2403 if (this->glCaps().textureRedSupport()) { | |
| 2404 if (getSizedInternalFormat) { | |
| 2405 *internalFormat = GR_GL_R16F; | |
| 2406 } else { | |
| 2407 *internalFormat = GR_GL_RED; | |
| 2408 } | |
| 2409 *externalFormat = GR_GL_RED; | |
| 2410 } else { | |
| 2411 if (getSizedInternalFormat) { | |
| 2412 *internalFormat = GR_GL_ALPHA16F; | |
| 2413 } else { | |
| 2414 *internalFormat = GR_GL_ALPHA; | |
| 2415 } | |
| 2416 *externalFormat = GR_GL_ALPHA; | |
| 2417 } | |
| 2418 if (kGL_GrGLStandard == this->glStandard() || this->glVersion() >= G
R_GL_VER(3, 0)) { | |
| 2419 *externalType = GR_GL_HALF_FLOAT; | |
| 2420 } else { | |
| 2421 *externalType = GR_GL_HALF_FLOAT_OES; | |
| 2422 } | |
| 2423 break; | |
| 2424 | |
| 2425 default: | |
| 2426 return false; | |
| 2427 } | |
| 2428 return true; | |
| 2429 } | |
| 2430 | |
| 2431 void GrGLGpu::setTextureUnit(int unit) { | |
| 2432 SkASSERT(unit >= 0 && unit < fHWBoundTextureUniqueIDs.count()); | |
| 2433 if (unit != fHWActiveTextureUnitIdx) { | |
| 2434 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit)); | |
| 2435 fHWActiveTextureUnitIdx = unit; | |
| 2436 } | |
| 2437 } | |
| 2438 | |
| 2439 void GrGLGpu::setScratchTextureUnit() { | |
| 2440 // Bind the last texture unit since it is the least likely to be used by GrG
LProgram. | |
| 2441 int lastUnitIdx = fHWBoundTextureUniqueIDs.count() - 1; | |
| 2442 if (lastUnitIdx != fHWActiveTextureUnitIdx) { | |
| 2443 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx)); | |
| 2444 fHWActiveTextureUnitIdx = lastUnitIdx; | |
| 2445 } | |
| 2446 // clear out the this field so that if a program does use this unit it will
rebind the correct | |
| 2447 // texture. | |
| 2448 fHWBoundTextureUniqueIDs[lastUnitIdx] = SK_InvalidUniqueID; | |
| 2449 } | |
| 2450 | |
| 2451 namespace { | |
| 2452 // Determines whether glBlitFramebuffer could be used between src and dst. | |
| 2453 inline bool can_blit_framebuffer(const GrSurface* dst, | |
| 2454 const GrSurface* src, | |
| 2455 const GrGLGpu* gpu, | |
| 2456 bool* wouldNeedTempFBO = NULL) { | |
| 2457 if (gpu->glCaps().isConfigRenderable(dst->config(), dst->desc().fSampleCnt >
0) && | |
| 2458 gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt >
0) && | |
| 2459 gpu->glCaps().usesMSAARenderBuffers()) { | |
| 2460 // ES3 doesn't allow framebuffer blits when the src has MSAA and the con
figs don't match | |
| 2461 // or the rects are not the same (not just the same size but have the sa
me edges). | |
| 2462 if (GrGLCaps::kES_3_0_MSFBOType == gpu->glCaps().msFBOType() && | |
| 2463 (src->desc().fSampleCnt > 0 || src->config() != dst->config())) { | |
| 2464 return false; | |
| 2465 } | |
| 2466 if (wouldNeedTempFBO) { | |
| 2467 *wouldNeedTempFBO = NULL == dst->asRenderTarget() || NULL == src->as
RenderTarget(); | |
| 2468 } | |
| 2469 return true; | |
| 2470 } else { | |
| 2471 return false; | |
| 2472 } | |
| 2473 } | |
| 2474 | |
| 2475 inline bool can_copy_texsubimage(const GrSurface* dst, | |
| 2476 const GrSurface* src, | |
| 2477 const GrGLGpu* gpu, | |
| 2478 bool* wouldNeedTempFBO = NULL) { | |
| 2479 // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSub
Image | |
| 2480 // and BGRA isn't in the spec. There doesn't appear to be any extension that
adds it. Perhaps | |
| 2481 // many drivers would allow it to work, but ANGLE does not. | |
| 2482 if (kGLES_GrGLStandard == gpu->glStandard() && gpu->glCaps().bgraIsInternalF
ormat() && | |
| 2483 (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig =
= src->config())) { | |
| 2484 return false; | |
| 2485 } | |
| 2486 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->as
RenderTarget()); | |
| 2487 // If dst is multisampled (and uses an extension where there is a separate M
SAA renderbuffer) | |
| 2488 // then we don't want to copy to the texture but to the MSAA buffer. | |
| 2489 if (dstRT && dstRT->renderFBOID() != dstRT->textureFBOID()) { | |
| 2490 return false; | |
| 2491 } | |
| 2492 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->as
RenderTarget()); | |
| 2493 // If the src is multisampled (and uses an extension where there is a separa
te MSAA | |
| 2494 // renderbuffer) then it is an invalid operation to call CopyTexSubImage | |
| 2495 if (srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) { | |
| 2496 return false; | |
| 2497 } | |
| 2498 if (gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt >
0) && | |
| 2499 dst->asTexture() && | |
| 2500 dst->origin() == src->origin() && | |
| 2501 !GrPixelConfigIsCompressed(src->config())) { | |
| 2502 if (wouldNeedTempFBO) { | |
| 2503 *wouldNeedTempFBO = NULL == src->asRenderTarget(); | |
| 2504 } | |
| 2505 return true; | |
| 2506 } else { | |
| 2507 return false; | |
| 2508 } | |
| 2509 } | |
| 2510 | |
| 2511 } | |
| 2512 | |
| 2513 // If a temporary FBO was created, its non-zero ID is returned. The viewport tha
t the copy rect is | |
| 2514 // relative to is output. | |
| 2515 GrGLuint GrGLGpu::bindSurfaceAsFBO(GrSurface* surface, GrGLenum fboTarget, GrGLI
Rect* viewport) { | |
| 2516 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarge
t()); | |
| 2517 GrGLuint tempFBOID; | |
| 2518 if (NULL == rt) { | |
| 2519 SkASSERT(surface->asTexture()); | |
| 2520 GrGLuint texID = static_cast<GrGLTexture*>(surface->asTexture())->textur
eID(); | |
| 2521 GR_GL_CALL(this->glInterface(), GenFramebuffers(1, &tempFBOID)); | |
| 2522 fGPUStats.incRenderTargetBinds(); | |
| 2523 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, tempFBOID)); | |
| 2524 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget, | |
| 2525 GR_GL_COLOR_ATTACHM
ENT0, | |
| 2526 GR_GL_TEXTURE_2D, | |
| 2527 texID, | |
| 2528 0)); | |
| 2529 viewport->fLeft = 0; | |
| 2530 viewport->fBottom = 0; | |
| 2531 viewport->fWidth = surface->width(); | |
| 2532 viewport->fHeight = surface->height(); | |
| 2533 } else { | |
| 2534 tempFBOID = 0; | |
| 2535 fGPUStats.incRenderTargetBinds(); | |
| 2536 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, rt->renderFBO
ID())); | |
| 2537 *viewport = rt->getViewport(); | |
| 2538 } | |
| 2539 return tempFBOID; | |
| 2540 } | |
| 2541 | |
| 2542 bool GrGLGpu::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc)
{ | |
| 2543 // In here we look for opportunities to use CopyTexSubImage, or fbo blit. If
neither are | |
| 2544 // possible and we return false to fallback to creating a render target dst
for render-to- | |
| 2545 // texture. This code prefers CopyTexSubImage to fbo blit and avoids trigger
ing temporary fbo | |
| 2546 // creation. It isn't clear that avoiding temporary fbo creation is actually
optimal. | |
| 2547 | |
| 2548 // Check for format issues with glCopyTexSubImage2D | |
| 2549 if (kGLES_GrGLStandard == this->glStandard() && this->glCaps().bgraIsInterna
lFormat() && | |
| 2550 kBGRA_8888_GrPixelConfig == src->config()) { | |
| 2551 // glCopyTexSubImage2D doesn't work with this config. If the bgra can be
used with fbo blit | |
| 2552 // then we set up for that, otherwise fail. | |
| 2553 if (this->caps()->isConfigRenderable(kBGRA_8888_GrPixelConfig, false)) { | |
| 2554 desc->fOrigin = kDefault_GrSurfaceOrigin; | |
| 2555 desc->fFlags = kRenderTarget_GrSurfaceFlag | kNoStencil_GrSurfaceFla
g; | |
| 2556 desc->fConfig = kBGRA_8888_GrPixelConfig; | |
| 2557 return true; | |
| 2558 } | |
| 2559 return false; | |
| 2560 } else if (NULL == src->asRenderTarget()) { | |
| 2561 // CopyTexSubImage2D or fbo blit would require creating a temp fbo for t
he src. | |
| 2562 return false; | |
| 2563 } | |
| 2564 | |
| 2565 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->as
RenderTarget()); | |
| 2566 if (srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) { | |
| 2567 // It's illegal to call CopyTexSubImage2D on a MSAA renderbuffer. Set up
for FBO blit or | |
| 2568 // fail. | |
| 2569 if (this->caps()->isConfigRenderable(src->config(), false)) { | |
| 2570 desc->fOrigin = kDefault_GrSurfaceOrigin; | |
| 2571 desc->fFlags = kRenderTarget_GrSurfaceFlag | kNoStencil_GrSurfaceFla
g; | |
| 2572 desc->fConfig = src->config(); | |
| 2573 return true; | |
| 2574 } | |
| 2575 return false; | |
| 2576 } | |
| 2577 | |
| 2578 // We'll do a CopyTexSubImage. Make the dst a plain old texture. | |
| 2579 desc->fConfig = src->config(); | |
| 2580 desc->fOrigin = src->origin(); | |
| 2581 desc->fFlags = kNone_GrSurfaceFlags; | |
| 2582 return true; | |
| 2583 } | |
| 2584 | |
| 2585 bool GrGLGpu::copySurface(GrSurface* dst, | |
| 2586 GrSurface* src, | |
| 2587 const SkIRect& srcRect, | |
| 2588 const SkIPoint& dstPoint) { | |
| 2589 bool copied = false; | |
| 2590 if (can_copy_texsubimage(dst, src, this)) { | |
| 2591 GrGLuint srcFBO; | |
| 2592 GrGLIRect srcVP; | |
| 2593 srcFBO = this->bindSurfaceAsFBO(src, GR_GL_FRAMEBUFFER, &srcVP); | |
| 2594 GrGLTexture* dstTex = static_cast<GrGLTexture*>(dst->asTexture()); | |
| 2595 SkASSERT(dstTex); | |
| 2596 // We modified the bound FBO | |
| 2597 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; | |
| 2598 GrGLIRect srcGLRect; | |
| 2599 srcGLRect.setRelativeTo(srcVP, | |
| 2600 srcRect.fLeft, | |
| 2601 srcRect.fTop, | |
| 2602 srcRect.width(), | |
| 2603 srcRect.height(), | |
| 2604 src->origin()); | |
| 2605 | |
| 2606 this->setScratchTextureUnit(); | |
| 2607 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, dstTex->textureID())); | |
| 2608 GrGLint dstY; | |
| 2609 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) { | |
| 2610 dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight); | |
| 2611 } else { | |
| 2612 dstY = dstPoint.fY; | |
| 2613 } | |
| 2614 GL_CALL(CopyTexSubImage2D(GR_GL_TEXTURE_2D, 0, | |
| 2615 dstPoint.fX, dstY, | |
| 2616 srcGLRect.fLeft, srcGLRect.fBottom, | |
| 2617 srcGLRect.fWidth, srcGLRect.fHeight)); | |
| 2618 copied = true; | |
| 2619 if (srcFBO) { | |
| 2620 GL_CALL(DeleteFramebuffers(1, &srcFBO)); | |
| 2621 } | |
| 2622 } else if (can_blit_framebuffer(dst, src, this)) { | |
| 2623 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, | |
| 2624 srcRect.width(), srcRect.height()); | |
| 2625 bool selfOverlap = false; | |
| 2626 if (dst == src) { | |
| 2627 selfOverlap = SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect); | |
| 2628 } | |
| 2629 | |
| 2630 if (!selfOverlap) { | |
| 2631 GrGLuint dstFBO; | |
| 2632 GrGLuint srcFBO; | |
| 2633 GrGLIRect dstVP; | |
| 2634 GrGLIRect srcVP; | |
| 2635 dstFBO = this->bindSurfaceAsFBO(dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP)
; | |
| 2636 srcFBO = this->bindSurfaceAsFBO(src, GR_GL_READ_FRAMEBUFFER, &srcVP)
; | |
| 2637 // We modified the bound FBO | |
| 2638 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; | |
| 2639 GrGLIRect srcGLRect; | |
| 2640 GrGLIRect dstGLRect; | |
| 2641 srcGLRect.setRelativeTo(srcVP, | |
| 2642 srcRect.fLeft, | |
| 2643 srcRect.fTop, | |
| 2644 srcRect.width(), | |
| 2645 srcRect.height(), | |
| 2646 src->origin()); | |
| 2647 dstGLRect.setRelativeTo(dstVP, | |
| 2648 dstRect.fLeft, | |
| 2649 dstRect.fTop, | |
| 2650 dstRect.width(), | |
| 2651 dstRect.height(), | |
| 2652 dst->origin()); | |
| 2653 | |
| 2654 // BlitFrameBuffer respects the scissor, so disable it. | |
| 2655 this->disableScissor(); | |
| 2656 | |
| 2657 GrGLint srcY0; | |
| 2658 GrGLint srcY1; | |
| 2659 // Does the blit need to y-mirror or not? | |
| 2660 if (src->origin() == dst->origin()) { | |
| 2661 srcY0 = srcGLRect.fBottom; | |
| 2662 srcY1 = srcGLRect.fBottom + srcGLRect.fHeight; | |
| 2663 } else { | |
| 2664 srcY0 = srcGLRect.fBottom + srcGLRect.fHeight; | |
| 2665 srcY1 = srcGLRect.fBottom; | |
| 2666 } | |
| 2667 GL_CALL(BlitFramebuffer(srcGLRect.fLeft, | |
| 2668 srcY0, | |
| 2669 srcGLRect.fLeft + srcGLRect.fWidth, | |
| 2670 srcY1, | |
| 2671 dstGLRect.fLeft, | |
| 2672 dstGLRect.fBottom, | |
| 2673 dstGLRect.fLeft + dstGLRect.fWidth, | |
| 2674 dstGLRect.fBottom + dstGLRect.fHeight, | |
| 2675 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); | |
| 2676 if (dstFBO) { | |
| 2677 GL_CALL(DeleteFramebuffers(1, &dstFBO)); | |
| 2678 } | |
| 2679 if (srcFBO) { | |
| 2680 GL_CALL(DeleteFramebuffers(1, &srcFBO)); | |
| 2681 } | |
| 2682 copied = true; | |
| 2683 } | |
| 2684 } | |
| 2685 return copied; | |
| 2686 } | |
| 2687 | |
| 2688 bool GrGLGpu::canCopySurface(const GrSurface* dst, | |
| 2689 const GrSurface* src, | |
| 2690 const SkIRect& srcRect, | |
| 2691 const SkIPoint& dstPoint) { | |
| 2692 // This mirrors the logic in onCopySurface. We prefer our base makes the co
py if we need to | |
| 2693 // create a temp fbo. TODO verify the assumption that temp fbos are expensiv
e; it may not be | |
| 2694 // true at all. | |
| 2695 bool wouldNeedTempFBO = false; | |
| 2696 if (can_copy_texsubimage(dst, src, this, &wouldNeedTempFBO) && !wouldNeedTem
pFBO) { | |
| 2697 return true; | |
| 2698 } | |
| 2699 if (can_blit_framebuffer(dst, src, this, &wouldNeedTempFBO) && !wouldNeedTem
pFBO) { | |
| 2700 if (dst == src) { | |
| 2701 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, | |
| 2702 srcRect.width(), srcRect.height(
)); | |
| 2703 if(!SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) { | |
| 2704 return true; | |
| 2705 } | |
| 2706 } else { | |
| 2707 return true; | |
| 2708 } | |
| 2709 } | |
| 2710 return false; | |
| 2711 } | |
| 2712 | |
| 2713 void GrGLGpu::didAddGpuTraceMarker() { | |
| 2714 if (this->caps()->gpuTracingSupport()) { | |
| 2715 const GrTraceMarkerSet& markerArray = this->getActiveTraceMarkers(); | |
| 2716 SkString markerString = markerArray.toStringLast(); | |
| 2717 GL_CALL(PushGroupMarker(0, markerString.c_str())); | |
| 2718 } | |
| 2719 } | |
| 2720 | |
| 2721 void GrGLGpu::didRemoveGpuTraceMarker() { | |
| 2722 if (this->caps()->gpuTracingSupport()) { | |
| 2723 GL_CALL(PopGroupMarker()); | |
| 2724 } | |
| 2725 } | |
| 2726 | |
| 2727 /////////////////////////////////////////////////////////////////////////////// | |
| 2728 | |
| 2729 GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw( | |
| 2730 GrGLGpu* gpu, | |
| 2731 const GrGLVertexBuffer* vbuffer, | |
| 2732 const GrGLIndexBuffer* ibuffer)
{ | |
| 2733 SkASSERT(vbuffer); | |
| 2734 GrGLAttribArrayState* attribState; | |
| 2735 | |
| 2736 // We use a vertex array if we're on a core profile and the verts are in a V
BO. | |
| 2737 if (gpu->glCaps().isCoreProfile() && !vbuffer->isCPUBacked()) { | |
| 2738 if (NULL == fVBOVertexArray || fVBOVertexArray->wasDestroyed()) { | |
| 2739 SkSafeUnref(fVBOVertexArray); | |
| 2740 GrGLuint arrayID; | |
| 2741 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID)); | |
| 2742 int attrCount = gpu->glCaps().maxVertexAttributes(); | |
| 2743 fVBOVertexArray = SkNEW_ARGS(GrGLVertexArray, (gpu, arrayID, attrCou
nt)); | |
| 2744 } | |
| 2745 attribState = fVBOVertexArray->bindWithIndexBuffer(ibuffer); | |
| 2746 } else { | |
| 2747 if (ibuffer) { | |
| 2748 this->setIndexBufferIDOnDefaultVertexArray(gpu, ibuffer->bufferID())
; | |
| 2749 } else { | |
| 2750 this->setVertexArrayID(gpu, 0); | |
| 2751 } | |
| 2752 int attrCount = gpu->glCaps().maxVertexAttributes(); | |
| 2753 if (fDefaultVertexArrayAttribState.count() != attrCount) { | |
| 2754 fDefaultVertexArrayAttribState.resize(attrCount); | |
| 2755 } | |
| 2756 attribState = &fDefaultVertexArrayAttribState; | |
| 2757 } | |
| 2758 return attribState; | |
| 2759 } | |
| OLD | NEW |