Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: src/gpu/gl/GrGpuGL.cpp

Issue 808593003: Rename GrGpuGL to GrGLGpu for consistency (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: rebase Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/gl/GrGpuGL.h ('k') | src/gpu/gl/GrGpuGL_program.cpp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2011 Google Inc. 2 * Copyright 2011 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 8
9 #include "GrGpuGL.h" 9 #include "GrGpuGL.h"
10 #include "GrGLStencilBuffer.h" 10 #include "GrGLStencilBuffer.h"
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
53 GR_GL_CONSTANT_ALPHA, 53 GR_GL_CONSTANT_ALPHA,
54 GR_GL_ONE_MINUS_CONSTANT_ALPHA, 54 GR_GL_ONE_MINUS_CONSTANT_ALPHA,
55 55
56 // extended blend coeffs 56 // extended blend coeffs
57 GR_GL_SRC1_COLOR, 57 GR_GL_SRC1_COLOR,
58 GR_GL_ONE_MINUS_SRC1_COLOR, 58 GR_GL_ONE_MINUS_SRC1_COLOR,
59 GR_GL_SRC1_ALPHA, 59 GR_GL_SRC1_ALPHA,
60 GR_GL_ONE_MINUS_SRC1_ALPHA, 60 GR_GL_ONE_MINUS_SRC1_ALPHA,
61 }; 61 };
62 62
63 bool GrGpuGL::BlendCoeffReferencesConstant(GrBlendCoeff coeff) { 63 bool GrGLGpu::BlendCoeffReferencesConstant(GrBlendCoeff coeff) {
64 static const bool gCoeffReferencesBlendConst[] = { 64 static const bool gCoeffReferencesBlendConst[] = {
65 false, 65 false,
66 false, 66 false,
67 false, 67 false,
68 false, 68 false,
69 false, 69 false,
70 false, 70 false,
71 false, 71 false,
72 false, 72 false,
73 false, 73 false,
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
109 109
110 // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope 110 // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope
111 GR_STATIC_ASSERT(kTotalGrBlendCoeffCount == 111 GR_STATIC_ASSERT(kTotalGrBlendCoeffCount ==
112 SK_ARRAY_COUNT(gXfermodeCoeff2Blend)); 112 SK_ARRAY_COUNT(gXfermodeCoeff2Blend));
113 } 113 }
114 114
115 /////////////////////////////////////////////////////////////////////////////// 115 ///////////////////////////////////////////////////////////////////////////////
116 116
117 static bool gPrintStartupSpew; 117 static bool gPrintStartupSpew;
118 118
119 GrGpuGL::GrGpuGL(const GrGLContext& ctx, GrContext* context) 119 GrGLGpu::GrGLGpu(const GrGLContext& ctx, GrContext* context)
120 : GrGpu(context) 120 : GrGpu(context)
121 , fGLContext(ctx) { 121 , fGLContext(ctx) {
122 122
123 SkASSERT(ctx.isInitialized()); 123 SkASSERT(ctx.isInitialized());
124 fCaps.reset(SkRef(ctx.caps())); 124 fCaps.reset(SkRef(ctx.caps()));
125 125
126 fHWBoundTextureUniqueIDs.reset(this->glCaps().maxFragmentTextureUnits()); 126 fHWBoundTextureUniqueIDs.reset(this->glCaps().maxFragmentTextureUnits());
127 127
128 GrGLClearErr(fGLContext.interface()); 128 GrGLClearErr(fGLContext.interface());
129 if (gPrintStartupSpew) { 129 if (gPrintStartupSpew) {
130 const GrGLubyte* vendor; 130 const GrGLubyte* vendor;
131 const GrGLubyte* renderer; 131 const GrGLubyte* renderer;
132 const GrGLubyte* version; 132 const GrGLubyte* version;
133 GL_CALL_RET(vendor, GetString(GR_GL_VENDOR)); 133 GL_CALL_RET(vendor, GetString(GR_GL_VENDOR));
134 GL_CALL_RET(renderer, GetString(GR_GL_RENDERER)); 134 GL_CALL_RET(renderer, GetString(GR_GL_RENDERER));
135 GL_CALL_RET(version, GetString(GR_GL_VERSION)); 135 GL_CALL_RET(version, GetString(GR_GL_VERSION));
136 SkDebugf("------------------------- create GrGpuGL %p --------------\n", 136 SkDebugf("------------------------- create GrGLGpu %p --------------\n",
137 this); 137 this);
138 SkDebugf("------ VENDOR %s\n", vendor); 138 SkDebugf("------ VENDOR %s\n", vendor);
139 SkDebugf("------ RENDERER %s\n", renderer); 139 SkDebugf("------ RENDERER %s\n", renderer);
140 SkDebugf("------ VERSION %s\n", version); 140 SkDebugf("------ VERSION %s\n", version);
141 SkDebugf("------ EXTENSIONS\n"); 141 SkDebugf("------ EXTENSIONS\n");
142 ctx.extensions().print(); 142 ctx.extensions().print();
143 SkDebugf("\n"); 143 SkDebugf("\n");
144 SkDebugf(this->glCaps().dump().c_str()); 144 SkDebugf(this->glCaps().dump().c_str());
145 } 145 }
146 146
147 fProgramCache = SkNEW_ARGS(ProgramCache, (this)); 147 fProgramCache = SkNEW_ARGS(ProgramCache, (this));
148 148
149 SkASSERT(this->glCaps().maxVertexAttributes() >= GrGeometryProcessor::kMaxVe rtexAttribs); 149 SkASSERT(this->glCaps().maxVertexAttributes() >= GrGeometryProcessor::kMaxVe rtexAttribs);
150 150
151 fLastSuccessfulStencilFmtIdx = 0; 151 fLastSuccessfulStencilFmtIdx = 0;
152 fHWProgramID = 0; 152 fHWProgramID = 0;
153 153
154 if (this->glCaps().pathRenderingSupport()) { 154 if (this->glCaps().pathRenderingSupport()) {
155 fPathRendering.reset(new GrGLPathRendering(this)); 155 fPathRendering.reset(new GrGLPathRendering(this));
156 } 156 }
157 } 157 }
158 158
159 GrGpuGL::~GrGpuGL() { 159 GrGLGpu::~GrGLGpu() {
160 if (0 != fHWProgramID) { 160 if (0 != fHWProgramID) {
161 // detach the current program so there is no confusion on OpenGL's part 161 // detach the current program so there is no confusion on OpenGL's part
162 // that we want it to be deleted 162 // that we want it to be deleted
163 SkASSERT(fHWProgramID == fCurrentProgram->programID()); 163 SkASSERT(fHWProgramID == fCurrentProgram->programID());
164 GL_CALL(UseProgram(0)); 164 GL_CALL(UseProgram(0));
165 } 165 }
166 166
167 delete fProgramCache; 167 delete fProgramCache;
168 } 168 }
169 169
170 void GrGpuGL::contextAbandoned() { 170 void GrGLGpu::contextAbandoned() {
171 INHERITED::contextAbandoned(); 171 INHERITED::contextAbandoned();
172 fProgramCache->abandon(); 172 fProgramCache->abandon();
173 fHWProgramID = 0; 173 fHWProgramID = 0;
174 if (this->glCaps().pathRenderingSupport()) { 174 if (this->glCaps().pathRenderingSupport()) {
175 this->glPathRendering()->abandonGpuResources(); 175 this->glPathRendering()->abandonGpuResources();
176 } 176 }
177 } 177 }
178 178
179 /////////////////////////////////////////////////////////////////////////////// 179 ///////////////////////////////////////////////////////////////////////////////
180 GrPixelConfig GrGpuGL::preferredReadPixelsConfig(GrPixelConfig readConfig, 180 GrPixelConfig GrGLGpu::preferredReadPixelsConfig(GrPixelConfig readConfig,
181 GrPixelConfig surfaceConfig) co nst { 181 GrPixelConfig surfaceConfig) co nst {
182 if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == readConfig ) { 182 if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == readConfig ) {
183 return kBGRA_8888_GrPixelConfig; 183 return kBGRA_8888_GrPixelConfig;
184 } else if (this->glContext().isMesa() && 184 } else if (this->glContext().isMesa() &&
185 GrBytesPerPixel(readConfig) == 4 && 185 GrBytesPerPixel(readConfig) == 4 &&
186 GrPixelConfigSwapRAndB(readConfig) == surfaceConfig) { 186 GrPixelConfigSwapRAndB(readConfig) == surfaceConfig) {
187 // Mesa 3D takes a slow path on when reading back BGRA from an RGBA sur face and vice-versa. 187 // Mesa 3D takes a slow path on when reading back BGRA from an RGBA sur face and vice-versa.
188 // Perhaps this should be guarded by some compiletime or runtime check. 188 // Perhaps this should be guarded by some compiletime or runtime check.
189 return surfaceConfig; 189 return surfaceConfig;
190 } else if (readConfig == kBGRA_8888_GrPixelConfig 190 } else if (readConfig == kBGRA_8888_GrPixelConfig
191 && !this->glCaps().readPixelsSupported( 191 && !this->glCaps().readPixelsSupported(
192 this->glInterface(), 192 this->glInterface(),
193 GR_GL_BGRA, 193 GR_GL_BGRA,
194 GR_GL_UNSIGNED_BYTE, 194 GR_GL_UNSIGNED_BYTE,
195 surfaceConfig 195 surfaceConfig
196 )) { 196 )) {
197 return kRGBA_8888_GrPixelConfig; 197 return kRGBA_8888_GrPixelConfig;
198 } else { 198 } else {
199 return readConfig; 199 return readConfig;
200 } 200 }
201 } 201 }
202 202
203 GrPixelConfig GrGpuGL::preferredWritePixelsConfig(GrPixelConfig writeConfig, 203 GrPixelConfig GrGLGpu::preferredWritePixelsConfig(GrPixelConfig writeConfig,
204 GrPixelConfig surfaceConfig) c onst { 204 GrPixelConfig surfaceConfig) c onst {
205 if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == writeConfi g) { 205 if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == writeConfi g) {
206 return kBGRA_8888_GrPixelConfig; 206 return kBGRA_8888_GrPixelConfig;
207 } else { 207 } else {
208 return writeConfig; 208 return writeConfig;
209 } 209 }
210 } 210 }
211 211
212 bool GrGpuGL::canWriteTexturePixels(const GrTexture* texture, GrPixelConfig srcC onfig) const { 212 bool GrGLGpu::canWriteTexturePixels(const GrTexture* texture, GrPixelConfig srcC onfig) const {
213 if (kIndex_8_GrPixelConfig == srcConfig || kIndex_8_GrPixelConfig == texture ->config()) { 213 if (kIndex_8_GrPixelConfig == srcConfig || kIndex_8_GrPixelConfig == texture ->config()) {
214 return false; 214 return false;
215 } 215 }
216 if (srcConfig != texture->config() && kGLES_GrGLStandard == this->glStandard ()) { 216 if (srcConfig != texture->config() && kGLES_GrGLStandard == this->glStandard ()) {
217 // In general ES2 requires the internal format of the texture and the fo rmat of the src 217 // In general ES2 requires the internal format of the texture and the fo rmat of the src
218 // pixels to match. However, It may or may not be possible to upload BGR A data to a RGBA 218 // pixels to match. However, It may or may not be possible to upload BGR A data to a RGBA
219 // texture. It depends upon which extension added BGRA. The Apple extens ion allows it 219 // texture. It depends upon which extension added BGRA. The Apple extens ion allows it
220 // (BGRA's internal format is RGBA) while the EXT extension does not (BG RA is its own 220 // (BGRA's internal format is RGBA) while the EXT extension does not (BG RA is its own
221 // internal format). 221 // internal format).
222 if (this->glCaps().isConfigTexturable(kBGRA_8888_GrPixelConfig) && 222 if (this->glCaps().isConfigTexturable(kBGRA_8888_GrPixelConfig) &&
223 !this->glCaps().bgraIsInternalFormat() && 223 !this->glCaps().bgraIsInternalFormat() &&
224 kBGRA_8888_GrPixelConfig == srcConfig && 224 kBGRA_8888_GrPixelConfig == srcConfig &&
225 kRGBA_8888_GrPixelConfig == texture->config()) { 225 kRGBA_8888_GrPixelConfig == texture->config()) {
226 return true; 226 return true;
227 } else { 227 } else {
228 return false; 228 return false;
229 } 229 }
230 } else { 230 } else {
231 return true; 231 return true;
232 } 232 }
233 } 233 }
234 234
235 bool GrGpuGL::fullReadPixelsIsFasterThanPartial() const { 235 bool GrGLGpu::fullReadPixelsIsFasterThanPartial() const {
236 return SkToBool(GR_GL_FULL_READPIXELS_FASTER_THAN_PARTIAL); 236 return SkToBool(GR_GL_FULL_READPIXELS_FASTER_THAN_PARTIAL);
237 } 237 }
238 238
239 void GrGpuGL::onResetContext(uint32_t resetBits) { 239 void GrGLGpu::onResetContext(uint32_t resetBits) {
240 // we don't use the zb at all 240 // we don't use the zb at all
241 if (resetBits & kMisc_GrGLBackendState) { 241 if (resetBits & kMisc_GrGLBackendState) {
242 GL_CALL(Disable(GR_GL_DEPTH_TEST)); 242 GL_CALL(Disable(GR_GL_DEPTH_TEST));
243 GL_CALL(DepthMask(GR_GL_FALSE)); 243 GL_CALL(DepthMask(GR_GL_FALSE));
244 244
245 fHWDrawFace = GrDrawState::kInvalid_DrawFace; 245 fHWDrawFace = GrDrawState::kInvalid_DrawFace;
246 fHWDitherEnabled = kUnknown_TriState; 246 fHWDitherEnabled = kUnknown_TriState;
247 247
248 if (kGL_GrGLStandard == this->glStandard()) { 248 if (kGL_GrGLStandard == this->glStandard()) {
249 // Desktop-only state that we never change 249 // Desktop-only state that we never change
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
352 // to render upside down. 352 // to render upside down.
353 if (kDefault_GrSurfaceOrigin == origin) { 353 if (kDefault_GrSurfaceOrigin == origin) {
354 return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOr igin; 354 return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOr igin;
355 } else { 355 } else {
356 return origin; 356 return origin;
357 } 357 }
358 } 358 }
359 359
360 } 360 }
361 361
362 GrTexture* GrGpuGL::onWrapBackendTexture(const GrBackendTextureDesc& desc) { 362 GrTexture* GrGLGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc) {
363 if (!this->configToGLFormats(desc.fConfig, false, NULL, NULL, NULL)) { 363 if (!this->configToGLFormats(desc.fConfig, false, NULL, NULL, NULL)) {
364 return NULL; 364 return NULL;
365 } 365 }
366 366
367 if (0 == desc.fTextureHandle) { 367 if (0 == desc.fTextureHandle) {
368 return NULL; 368 return NULL;
369 } 369 }
370 370
371 int maxSize = this->caps()->maxTextureSize(); 371 int maxSize = this->caps()->maxTextureSize();
372 if (desc.fWidth > maxSize || desc.fHeight > maxSize) { 372 if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
406 } else { 406 } else {
407 texture = SkNEW_ARGS(GrGLTexture, (this, surfDesc, idDesc)); 407 texture = SkNEW_ARGS(GrGLTexture, (this, surfDesc, idDesc));
408 } 408 }
409 if (NULL == texture) { 409 if (NULL == texture) {
410 return NULL; 410 return NULL;
411 } 411 }
412 412
413 return texture; 413 return texture;
414 } 414 }
415 415
416 GrRenderTarget* GrGpuGL::onWrapBackendRenderTarget(const GrBackendRenderTargetDe sc& wrapDesc) { 416 GrRenderTarget* GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe sc& wrapDesc) {
417 GrGLRenderTarget::IDDesc idDesc; 417 GrGLRenderTarget::IDDesc idDesc;
418 idDesc.fRTFBOID = static_cast<GrGLuint>(wrapDesc.fRenderTargetHandle); 418 idDesc.fRTFBOID = static_cast<GrGLuint>(wrapDesc.fRenderTargetHandle);
419 idDesc.fMSColorRenderbufferID = 0; 419 idDesc.fMSColorRenderbufferID = 0;
420 idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; 420 idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID;
421 421
422 GrSurfaceDesc desc; 422 GrSurfaceDesc desc;
423 desc.fConfig = wrapDesc.fConfig; 423 desc.fConfig = wrapDesc.fConfig;
424 desc.fFlags = kCheckAllocation_GrSurfaceFlag; 424 desc.fFlags = kCheckAllocation_GrSurfaceFlag;
425 desc.fWidth = wrapDesc.fWidth; 425 desc.fWidth = wrapDesc.fWidth;
426 desc.fHeight = wrapDesc.fHeight; 426 desc.fHeight = wrapDesc.fHeight;
(...skipping 17 matching lines...) Expand all
444 desc.fSampleCnt, 444 desc.fSampleCnt,
445 format)); 445 format));
446 tgt->setStencilBuffer(sb); 446 tgt->setStencilBuffer(sb);
447 sb->unref(); 447 sb->unref();
448 } 448 }
449 return tgt; 449 return tgt;
450 } 450 }
451 451
452 //////////////////////////////////////////////////////////////////////////////// 452 ////////////////////////////////////////////////////////////////////////////////
453 453
454 bool GrGpuGL::onWriteTexturePixels(GrTexture* texture, 454 bool GrGLGpu::onWriteTexturePixels(GrTexture* texture,
455 int left, int top, int width, int height, 455 int left, int top, int width, int height,
456 GrPixelConfig config, const void* buffer, 456 GrPixelConfig config, const void* buffer,
457 size_t rowBytes) { 457 size_t rowBytes) {
458 if (NULL == buffer) { 458 if (NULL == buffer) {
459 return false; 459 return false;
460 } 460 }
461 GrGLTexture* glTex = static_cast<GrGLTexture*>(texture); 461 GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
462 462
463 this->setScratchTextureUnit(); 463 this->setScratchTextureUnit();
464 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTex->textureID())); 464 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTex->textureID()));
465 465
466 bool success = false; 466 bool success = false;
467 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { 467 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
468 // We check that config == desc.fConfig in GrGpuGL::canWriteTexturePixel s() 468 // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixel s()
469 SkASSERT(config == glTex->desc().fConfig); 469 SkASSERT(config == glTex->desc().fConfig);
470 success = this->uploadCompressedTexData(glTex->desc(), buffer, false, le ft, top, width, 470 success = this->uploadCompressedTexData(glTex->desc(), buffer, false, le ft, top, width,
471 height); 471 height);
472 } else { 472 } else {
473 success = this->uploadTexData(glTex->desc(), false, left, top, width, he ight, config, 473 success = this->uploadTexData(glTex->desc(), false, left, top, width, he ight, config,
474 buffer, rowBytes); 474 buffer, rowBytes);
475 } 475 }
476 476
477 if (success) { 477 if (success) {
478 texture->texturePriv().dirtyMipMaps(true); 478 texture->texturePriv().dirtyMipMaps(true);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
510 510
511 static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc, 511 static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc,
512 const GrGLInterface* interface) { 512 const GrGLInterface* interface) {
513 if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) { 513 if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) {
514 return GR_GL_GET_ERROR(interface); 514 return GR_GL_GET_ERROR(interface);
515 } else { 515 } else {
516 return CHECK_ALLOC_ERROR(interface); 516 return CHECK_ALLOC_ERROR(interface);
517 } 517 }
518 } 518 }
519 519
520 bool GrGpuGL::uploadTexData(const GrSurfaceDesc& desc, 520 bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
521 bool isNewTexture, 521 bool isNewTexture,
522 int left, int top, int width, int height, 522 int left, int top, int width, int height,
523 GrPixelConfig dataConfig, 523 GrPixelConfig dataConfig,
524 const void* data, 524 const void* data,
525 size_t rowBytes) { 525 size_t rowBytes) {
526 SkASSERT(data || isNewTexture); 526 SkASSERT(data || isNewTexture);
527 527
528 // If we're uploading compressed data then we should be using uploadCompress edTexData 528 // If we're uploading compressed data then we should be using uploadCompress edTexData
529 SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); 529 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
530 530
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
682 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); 682 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
683 } 683 }
684 return succeeded; 684 return succeeded;
685 } 685 }
686 686
687 // TODO: This function is using a lot of wonky semantics like, if width == -1 687 // TODO: This function is using a lot of wonky semantics like, if width == -1
688 // then set width = desc.fWdith ... blah. A better way to do it might be to 688 // then set width = desc.fWdith ... blah. A better way to do it might be to
689 // create a CompressedTexData struct that takes a desc/ptr and figures out 689 // create a CompressedTexData struct that takes a desc/ptr and figures out
690 // the proper upload semantics. Then users can construct this function how they 690 // the proper upload semantics. Then users can construct this function how they
691 // see fit if they want to go against the "standard" way to do it. 691 // see fit if they want to go against the "standard" way to do it.
692 bool GrGpuGL::uploadCompressedTexData(const GrSurfaceDesc& desc, 692 bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
693 const void* data, 693 const void* data,
694 bool isNewTexture, 694 bool isNewTexture,
695 int left, int top, int width, int height) { 695 int left, int top, int width, int height) {
696 SkASSERT(data || isNewTexture); 696 SkASSERT(data || isNewTexture);
697 697
698 // No support for software flip y, yet... 698 // No support for software flip y, yet...
699 SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin); 699 SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin);
700 700
701 if (-1 == width) { 701 if (-1 == width) {
702 width = desc.fWidth; 702 width = desc.fWidth;
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
788 format, 788 format,
789 width, height)); 789 width, height));
790 break; 790 break;
791 case GrGLCaps::kNone_MSFBOType: 791 case GrGLCaps::kNone_MSFBOType:
792 SkFAIL("Shouldn't be here if we don't support multisampled renderbuf fers."); 792 SkFAIL("Shouldn't be here if we don't support multisampled renderbuf fers.");
793 break; 793 break;
794 } 794 }
795 return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));; 795 return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));;
796 } 796 }
797 797
798 bool GrGpuGL::createRenderTargetObjects(const GrSurfaceDesc& desc, GrGLuint texI D, 798 bool GrGLGpu::createRenderTargetObjects(const GrSurfaceDesc& desc, GrGLuint texI D,
799 GrGLRenderTarget::IDDesc* idDesc) { 799 GrGLRenderTarget::IDDesc* idDesc) {
800 idDesc->fMSColorRenderbufferID = 0; 800 idDesc->fMSColorRenderbufferID = 0;
801 idDesc->fRTFBOID = 0; 801 idDesc->fRTFBOID = 0;
802 idDesc->fTexFBOID = 0; 802 idDesc->fTexFBOID = 0;
803 idDesc->fIsWrapped = false; 803 idDesc->fIsWrapped = false;
804 804
805 GrGLenum status; 805 GrGLenum status;
806 806
807 GrGLenum msColorFormat = 0; // suppress warning 807 GrGLenum msColorFormat = 0; // suppress warning
808 808
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
906 // SkDEBUGFAIL("null texture"); 906 // SkDEBUGFAIL("null texture");
907 return NULL; 907 return NULL;
908 } 908 }
909 909
910 #if 0 && defined(SK_DEBUG) 910 #if 0 && defined(SK_DEBUG)
911 static size_t as_size_t(int x) { 911 static size_t as_size_t(int x) {
912 return x; 912 return x;
913 } 913 }
914 #endif 914 #endif
915 915
916 GrTexture* GrGpuGL::onCreateTexture(const GrSurfaceDesc& origDesc, 916 GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& origDesc,
917 const void* srcData, 917 const void* srcData,
918 size_t rowBytes) { 918 size_t rowBytes) {
919 919
920 GrSurfaceDesc desc = origDesc; 920 GrSurfaceDesc desc = origDesc;
921 GrGLRenderTarget::IDDesc rtIDDesc; 921 GrGLRenderTarget::IDDesc rtIDDesc;
922 922
923 // Attempt to catch un- or wrongly initialized sample counts; 923 // Attempt to catch un- or wrongly initialized sample counts;
924 SkASSERT(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64); 924 SkASSERT(desc.fSampleCnt >= 0 && desc.fSampleCnt <= 64);
925 // We fail if the MSAA was requested and is not available. 925 // We fail if the MSAA was requested and is not available.
926 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleC nt) { 926 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleC nt) {
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
1017 tex = SkNEW_ARGS(GrGLTexture, (this, desc, idDesc)); 1017 tex = SkNEW_ARGS(GrGLTexture, (this, desc, idDesc));
1018 } 1018 }
1019 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); 1019 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
1020 #ifdef TRACE_TEXTURE_CREATION 1020 #ifdef TRACE_TEXTURE_CREATION
1021 SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n", 1021 SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n",
1022 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig); 1022 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
1023 #endif 1023 #endif
1024 return tex; 1024 return tex;
1025 } 1025 }
1026 1026
1027 GrTexture* GrGpuGL::onCreateCompressedTexture(const GrSurfaceDesc& origDesc, con st void* srcData) { 1027 GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& origDesc, con st void* srcData) {
1028 1028
1029 if(SkToBool(origDesc.fFlags & kRenderTarget_GrSurfaceFlag) || origDesc.fSamp leCnt > 0) { 1029 if(SkToBool(origDesc.fFlags & kRenderTarget_GrSurfaceFlag) || origDesc.fSamp leCnt > 0) {
1030 return return_null_texture(); 1030 return return_null_texture();
1031 } 1031 }
1032 1032
1033 // Make sure that we're not flipping Y. 1033 // Make sure that we're not flipping Y.
1034 GrSurfaceOrigin texOrigin = resolve_origin(origDesc.fOrigin, false); 1034 GrSurfaceOrigin texOrigin = resolve_origin(origDesc.fOrigin, false);
1035 if (kBottomLeft_GrSurfaceOrigin == texOrigin) { 1035 if (kBottomLeft_GrSurfaceOrigin == texOrigin) {
1036 return return_null_texture(); 1036 return return_null_texture();
1037 } 1037 }
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
1111 GR_GL_RENDERBUFFER_DEPTH_SIZE, 1111 GR_GL_RENDERBUFFER_DEPTH_SIZE,
1112 (GrGLint*)&format->fTotalBits); 1112 (GrGLint*)&format->fTotalBits);
1113 format->fTotalBits += format->fStencilBits; 1113 format->fTotalBits += format->fStencilBits;
1114 } else { 1114 } else {
1115 format->fTotalBits = format->fStencilBits; 1115 format->fTotalBits = format->fStencilBits;
1116 } 1116 }
1117 } 1117 }
1118 } 1118 }
1119 } 1119 }
1120 1120
1121 bool GrGpuGL::createStencilBufferForRenderTarget(GrRenderTarget* rt, int width, int height) { 1121 bool GrGLGpu::createStencilBufferForRenderTarget(GrRenderTarget* rt, int width, int height) {
1122 1122
1123 // All internally created RTs are also textures. We don't create 1123 // All internally created RTs are also textures. We don't create
1124 // SBs for a client's standalone RT (that is a RT that isn't also a texture) . 1124 // SBs for a client's standalone RT (that is a RT that isn't also a texture) .
1125 SkASSERT(rt->asTexture()); 1125 SkASSERT(rt->asTexture());
1126 SkASSERT(width >= rt->width()); 1126 SkASSERT(width >= rt->width());
1127 SkASSERT(height >= rt->height()); 1127 SkASSERT(height >= rt->height());
1128 1128
1129 int samples = rt->numSamples(); 1129 int samples = rt->numSamples();
1130 GrGLuint sbID = 0; 1130 GrGLuint sbID = 0;
1131 1131
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
1176 // again. 1176 // again.
1177 sb->cacheAccess().removeScratchKey(); 1177 sb->cacheAccess().removeScratchKey();
1178 // Set this to 0 since we handed the valid ID off to the failed sten cil buffer resource. 1178 // Set this to 0 since we handed the valid ID off to the failed sten cil buffer resource.
1179 sbID = 0; 1179 sbID = 0;
1180 } 1180 }
1181 } 1181 }
1182 GL_CALL(DeleteRenderbuffers(1, &sbID)); 1182 GL_CALL(DeleteRenderbuffers(1, &sbID));
1183 return false; 1183 return false;
1184 } 1184 }
1185 1185
1186 bool GrGpuGL::attachStencilBufferToRenderTarget(GrStencilBuffer* sb, GrRenderTar get* rt) { 1186 bool GrGLGpu::attachStencilBufferToRenderTarget(GrStencilBuffer* sb, GrRenderTar get* rt) {
1187 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt); 1187 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
1188 1188
1189 GrGLuint fbo = glrt->renderFBOID(); 1189 GrGLuint fbo = glrt->renderFBOID();
1190 1190
1191 if (NULL == sb) { 1191 if (NULL == sb) {
1192 if (rt->getStencilBuffer()) { 1192 if (rt->getStencilBuffer()) {
1193 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1193 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1194 GR_GL_STENCIL_ATTACHMENT, 1194 GR_GL_STENCIL_ATTACHMENT,
1195 GR_GL_RENDERBUFFER, 0)); 1195 GR_GL_RENDERBUFFER, 0));
1196 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, 1196 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
1241 rt->config(), 1241 rt->config(),
1242 glsb->format()); 1242 glsb->format());
1243 } 1243 }
1244 } 1244 }
1245 return true; 1245 return true;
1246 } 1246 }
1247 } 1247 }
1248 1248
1249 //////////////////////////////////////////////////////////////////////////////// 1249 ////////////////////////////////////////////////////////////////////////////////
1250 1250
1251 GrVertexBuffer* GrGpuGL::onCreateVertexBuffer(size_t size, bool dynamic) { 1251 GrVertexBuffer* GrGLGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
1252 GrGLVertexBuffer::Desc desc; 1252 GrGLVertexBuffer::Desc desc;
1253 desc.fDynamic = dynamic; 1253 desc.fDynamic = dynamic;
1254 desc.fSizeInBytes = size; 1254 desc.fSizeInBytes = size;
1255 desc.fIsWrapped = false; 1255 desc.fIsWrapped = false;
1256 1256
1257 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) { 1257 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) {
1258 desc.fID = 0; 1258 desc.fID = 0;
1259 GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, des c)); 1259 GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, des c));
1260 return vertexBuffer; 1260 return vertexBuffer;
1261 } else { 1261 } else {
(...skipping 12 matching lines...) Expand all
1274 this->notifyVertexBufferDelete(desc.fID); 1274 this->notifyVertexBufferDelete(desc.fID);
1275 return NULL; 1275 return NULL;
1276 } 1276 }
1277 GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc)); 1277 GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc));
1278 return vertexBuffer; 1278 return vertexBuffer;
1279 } 1279 }
1280 return NULL; 1280 return NULL;
1281 } 1281 }
1282 } 1282 }
1283 1283
1284 GrIndexBuffer* GrGpuGL::onCreateIndexBuffer(size_t size, bool dynamic) { 1284 GrIndexBuffer* GrGLGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
1285 GrGLIndexBuffer::Desc desc; 1285 GrGLIndexBuffer::Desc desc;
1286 desc.fDynamic = dynamic; 1286 desc.fDynamic = dynamic;
1287 desc.fSizeInBytes = size; 1287 desc.fSizeInBytes = size;
1288 desc.fIsWrapped = false; 1288 desc.fIsWrapped = false;
1289 1289
1290 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) { 1290 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) {
1291 desc.fID = 0; 1291 desc.fID = 0;
1292 GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc)); 1292 GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc));
1293 return indexBuffer; 1293 return indexBuffer;
1294 } else { 1294 } else {
(...skipping 12 matching lines...) Expand all
1307 this->notifyIndexBufferDelete(desc.fID); 1307 this->notifyIndexBufferDelete(desc.fID);
1308 return NULL; 1308 return NULL;
1309 } 1309 }
1310 GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc )); 1310 GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc ));
1311 return indexBuffer; 1311 return indexBuffer;
1312 } 1312 }
1313 return NULL; 1313 return NULL;
1314 } 1314 }
1315 } 1315 }
1316 1316
1317 void GrGpuGL::flushScissor(const GrClipMaskManager::ScissorState& scissorState, 1317 void GrGLGpu::flushScissor(const GrClipMaskManager::ScissorState& scissorState,
1318 const GrGLIRect& rtViewport, 1318 const GrGLIRect& rtViewport,
1319 GrSurfaceOrigin rtOrigin) { 1319 GrSurfaceOrigin rtOrigin) {
1320 if (scissorState.fEnabled) { 1320 if (scissorState.fEnabled) {
1321 GrGLIRect scissor; 1321 GrGLIRect scissor;
1322 scissor.setRelativeTo(rtViewport, 1322 scissor.setRelativeTo(rtViewport,
1323 scissorState.fRect.fLeft, 1323 scissorState.fRect.fLeft,
1324 scissorState.fRect.fTop, 1324 scissorState.fRect.fTop,
1325 scissorState.fRect.width(), 1325 scissorState.fRect.width(),
1326 scissorState.fRect.height(), 1326 scissorState.fRect.height(),
1327 rtOrigin); 1327 rtOrigin);
1328 // if the scissor fully contains the viewport then we fall through and 1328 // if the scissor fully contains the viewport then we fall through and
1329 // disable the scissor test. 1329 // disable the scissor test.
1330 if (!scissor.contains(rtViewport)) { 1330 if (!scissor.contains(rtViewport)) {
1331 if (fHWScissorSettings.fRect != scissor) { 1331 if (fHWScissorSettings.fRect != scissor) {
1332 scissor.pushToGLScissor(this->glInterface()); 1332 scissor.pushToGLScissor(this->glInterface());
1333 fHWScissorSettings.fRect = scissor; 1333 fHWScissorSettings.fRect = scissor;
1334 } 1334 }
1335 if (kYes_TriState != fHWScissorSettings.fEnabled) { 1335 if (kYes_TriState != fHWScissorSettings.fEnabled) {
1336 GL_CALL(Enable(GR_GL_SCISSOR_TEST)); 1336 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
1337 fHWScissorSettings.fEnabled = kYes_TriState; 1337 fHWScissorSettings.fEnabled = kYes_TriState;
1338 } 1338 }
1339 return; 1339 return;
1340 } 1340 }
1341 } 1341 }
1342 1342
1343 // See fall through note above 1343 // See fall through note above
1344 this->disableScissor(); 1344 this->disableScissor();
1345 } 1345 }
1346 1346
1347 bool GrGpuGL::flushGraphicsState(const GrOptDrawState& optState) { 1347 bool GrGLGpu::flushGraphicsState(const GrOptDrawState& optState) {
1348 // GrGpu::setupClipAndFlushState should have already checked this and bailed if not true. 1348 // GrGpu::setupClipAndFlushState should have already checked this and bailed if not true.
1349 SkASSERT(optState.getRenderTarget()); 1349 SkASSERT(optState.getRenderTarget());
1350 1350
1351 if (kStencilPath_DrawType == optState.drawType()) { 1351 if (kStencilPath_DrawType == optState.drawType()) {
1352 const GrRenderTarget* rt = optState.getRenderTarget(); 1352 const GrRenderTarget* rt = optState.getRenderTarget();
1353 SkISize size; 1353 SkISize size;
1354 size.set(rt->width(), rt->height()); 1354 size.set(rt->width(), rt->height());
1355 this->glPathRendering()->setProjectionMatrix(optState.getViewMatrix(), s ize, rt->origin()); 1355 this->glPathRendering()->setProjectionMatrix(optState.getViewMatrix(), s ize, rt->origin());
1356 } else { 1356 } else {
1357 this->flushMiscFixedFunctionState(optState); 1357 this->flushMiscFixedFunctionState(optState);
(...skipping 22 matching lines...) Expand all
1380 this->flushScissor(optState.getScissorState(), glRT->getViewport(), glRT->or igin()); 1380 this->flushScissor(optState.getScissorState(), glRT->getViewport(), glRT->or igin());
1381 this->flushAAState(optState); 1381 this->flushAAState(optState);
1382 1382
1383 // This must come after textures are flushed because a texture may need 1383 // This must come after textures are flushed because a texture may need
1384 // to be msaa-resolved (which will modify bound FBO state). 1384 // to be msaa-resolved (which will modify bound FBO state).
1385 this->flushRenderTarget(glRT, NULL); 1385 this->flushRenderTarget(glRT, NULL);
1386 1386
1387 return true; 1387 return true;
1388 } 1388 }
1389 1389
1390 void GrGpuGL::setupGeometry(const GrOptDrawState& optState, 1390 void GrGLGpu::setupGeometry(const GrOptDrawState& optState,
1391 const GrDrawTarget::DrawInfo& info, 1391 const GrDrawTarget::DrawInfo& info,
1392 size_t* indexOffsetInBytes) { 1392 size_t* indexOffsetInBytes) {
1393 GrGLVertexBuffer* vbuf; 1393 GrGLVertexBuffer* vbuf;
1394 vbuf = (GrGLVertexBuffer*) info.vertexBuffer(); 1394 vbuf = (GrGLVertexBuffer*) info.vertexBuffer();
1395 1395
1396 SkASSERT(vbuf); 1396 SkASSERT(vbuf);
1397 SkASSERT(!vbuf->isMapped()); 1397 SkASSERT(!vbuf->isMapped());
1398 1398
1399 GrGLIndexBuffer* ibuf = NULL; 1399 GrGLIndexBuffer* ibuf = NULL;
1400 if (info.isIndexed()) { 1400 if (info.isIndexed()) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1434 GrGLAttribTypeToLayout(attribType).fType, 1434 GrGLAttribTypeToLayout(attribType).fType,
1435 GrGLAttribTypeToLayout(attribType).fNormalized, 1435 GrGLAttribTypeToLayout(attribType).fNormalized,
1436 stride, 1436 stride,
1437 reinterpret_cast<GrGLvoid*>(vertexOffsetInBytes + o ffset)); 1437 reinterpret_cast<GrGLvoid*>(vertexOffsetInBytes + o ffset));
1438 offset += attribs[attribIndex].fOffset; 1438 offset += attribs[attribIndex].fOffset;
1439 } 1439 }
1440 attribState->disableUnusedArrays(this, usedAttribArraysMask); 1440 attribState->disableUnusedArrays(this, usedAttribArraysMask);
1441 } 1441 }
1442 } 1442 }
1443 1443
1444 void GrGpuGL::buildProgramDesc(const GrOptDrawState& optState, 1444 void GrGLGpu::buildProgramDesc(const GrOptDrawState& optState,
1445 const GrProgramDesc::DescInfo& descInfo, 1445 const GrProgramDesc::DescInfo& descInfo,
1446 GrGpu::DrawType drawType, 1446 GrGpu::DrawType drawType,
1447 GrProgramDesc* desc) { 1447 GrProgramDesc* desc) {
1448 if (!GrGLProgramDescBuilder::Build(optState, descInfo, drawType, this, desc) ) { 1448 if (!GrGLProgramDescBuilder::Build(optState, descInfo, drawType, this, desc) ) {
1449 SkDEBUGFAIL("Failed to generate GL program descriptor"); 1449 SkDEBUGFAIL("Failed to generate GL program descriptor");
1450 } 1450 }
1451 } 1451 }
1452 1452
1453 void GrGpuGL::disableScissor() { 1453 void GrGLGpu::disableScissor() {
1454 if (kNo_TriState != fHWScissorSettings.fEnabled) { 1454 if (kNo_TriState != fHWScissorSettings.fEnabled) {
1455 GL_CALL(Disable(GR_GL_SCISSOR_TEST)); 1455 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
1456 fHWScissorSettings.fEnabled = kNo_TriState; 1456 fHWScissorSettings.fEnabled = kNo_TriState;
1457 return; 1457 return;
1458 } 1458 }
1459 } 1459 }
1460 1460
1461 void GrGpuGL::onClear(GrRenderTarget* target, const SkIRect* rect, GrColor color , 1461 void GrGLGpu::onClear(GrRenderTarget* target, const SkIRect* rect, GrColor color ,
1462 bool canIgnoreRect) { 1462 bool canIgnoreRect) {
1463 // parent class should never let us get here with no RT 1463 // parent class should never let us get here with no RT
1464 SkASSERT(target); 1464 SkASSERT(target);
1465 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); 1465 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
1466 1466
1467 if (canIgnoreRect && this->glCaps().fullClearIsFree()) { 1467 if (canIgnoreRect && this->glCaps().fullClearIsFree()) {
1468 rect = NULL; 1468 rect = NULL;
1469 } 1469 }
1470 1470
1471 SkIRect clippedRect; 1471 SkIRect clippedRect;
(...skipping 23 matching lines...) Expand all
1495 r = GrColorUnpackR(color) * scaleRGB; 1495 r = GrColorUnpackR(color) * scaleRGB;
1496 g = GrColorUnpackG(color) * scaleRGB; 1496 g = GrColorUnpackG(color) * scaleRGB;
1497 b = GrColorUnpackB(color) * scaleRGB; 1497 b = GrColorUnpackB(color) * scaleRGB;
1498 1498
1499 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); 1499 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
1500 fHWWriteToColor = kYes_TriState; 1500 fHWWriteToColor = kYes_TriState;
1501 GL_CALL(ClearColor(r, g, b, a)); 1501 GL_CALL(ClearColor(r, g, b, a));
1502 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); 1502 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
1503 } 1503 }
1504 1504
1505 void GrGpuGL::discard(GrRenderTarget* renderTarget) { 1505 void GrGLGpu::discard(GrRenderTarget* renderTarget) {
1506 SkASSERT(renderTarget); 1506 SkASSERT(renderTarget);
1507 if (!this->caps()->discardRenderTargetSupport()) { 1507 if (!this->caps()->discardRenderTargetSupport()) {
1508 return; 1508 return;
1509 } 1509 }
1510 1510
1511 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget); 1511 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget);
1512 if (renderTarget->getUniqueID() != fHWBoundRenderTargetUniqueID) { 1512 if (renderTarget->getUniqueID() != fHWBoundRenderTargetUniqueID) {
1513 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID; 1513 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
1514 fGPUStats.incRenderTargetBinds(); 1514 fGPUStats.incRenderTargetBinds();
1515 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, glRT->renderFBOID())); 1515 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, glRT->renderFBOID()));
(...skipping 29 matching lines...) Expand all
1545 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(att achments), 1545 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(att achments),
1546 attachments)); 1546 attachments));
1547 } 1547 }
1548 break; 1548 break;
1549 } 1549 }
1550 } 1550 }
1551 renderTarget->flagAsResolved(); 1551 renderTarget->flagAsResolved();
1552 } 1552 }
1553 1553
1554 1554
1555 void GrGpuGL::clearStencil(GrRenderTarget* target) { 1555 void GrGLGpu::clearStencil(GrRenderTarget* target) {
1556 if (NULL == target) { 1556 if (NULL == target) {
1557 return; 1557 return;
1558 } 1558 }
1559 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); 1559 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
1560 this->flushRenderTarget(glRT, &SkIRect::EmptyIRect()); 1560 this->flushRenderTarget(glRT, &SkIRect::EmptyIRect());
1561 1561
1562 this->disableScissor(); 1562 this->disableScissor();
1563 1563
1564 GL_CALL(StencilMask(0xffffffff)); 1564 GL_CALL(StencilMask(0xffffffff));
1565 GL_CALL(ClearStencil(0)); 1565 GL_CALL(ClearStencil(0));
1566 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); 1566 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
1567 fHWStencilSettings.invalidate(); 1567 fHWStencilSettings.invalidate();
1568 } 1568 }
1569 1569
1570 void GrGpuGL::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bo ol insideClip) { 1570 void GrGLGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bo ol insideClip) {
1571 SkASSERT(target); 1571 SkASSERT(target);
1572 1572
1573 // this should only be called internally when we know we have a 1573 // this should only be called internally when we know we have a
1574 // stencil buffer. 1574 // stencil buffer.
1575 SkASSERT(target->getStencilBuffer()); 1575 SkASSERT(target->getStencilBuffer());
1576 GrGLint stencilBitCount = target->getStencilBuffer()->bits(); 1576 GrGLint stencilBitCount = target->getStencilBuffer()->bits();
1577 #if 0 1577 #if 0
1578 SkASSERT(stencilBitCount > 0); 1578 SkASSERT(stencilBitCount > 0);
1579 GrGLint clipStencilMask = (1 << (stencilBitCount - 1)); 1579 GrGLint clipStencilMask = (1 << (stencilBitCount - 1));
1580 #else 1580 #else
(...skipping 17 matching lines...) Expand all
1598 scissorState.fEnabled = true; 1598 scissorState.fEnabled = true;
1599 scissorState.fRect = rect; 1599 scissorState.fRect = rect;
1600 this->flushScissor(scissorState, glRT->getViewport(), glRT->origin()); 1600 this->flushScissor(scissorState, glRT->getViewport(), glRT->origin());
1601 1601
1602 GL_CALL(StencilMask((uint32_t) clipStencilMask)); 1602 GL_CALL(StencilMask((uint32_t) clipStencilMask));
1603 GL_CALL(ClearStencil(value)); 1603 GL_CALL(ClearStencil(value));
1604 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); 1604 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
1605 fHWStencilSettings.invalidate(); 1605 fHWStencilSettings.invalidate();
1606 } 1606 }
1607 1607
1608 bool GrGpuGL::readPixelsWillPayForYFlip(GrRenderTarget* renderTarget, 1608 bool GrGLGpu::readPixelsWillPayForYFlip(GrRenderTarget* renderTarget,
1609 int left, int top, 1609 int left, int top,
1610 int width, int height, 1610 int width, int height,
1611 GrPixelConfig config, 1611 GrPixelConfig config,
1612 size_t rowBytes) const { 1612 size_t rowBytes) const {
1613 // If this rendertarget is aready TopLeft, we don't need to flip. 1613 // If this rendertarget is aready TopLeft, we don't need to flip.
1614 if (kTopLeft_GrSurfaceOrigin == renderTarget->origin()) { 1614 if (kTopLeft_GrSurfaceOrigin == renderTarget->origin()) {
1615 return false; 1615 return false;
1616 } 1616 }
1617 1617
1618 // if GL can do the flip then we'll never pay for it. 1618 // if GL can do the flip then we'll never pay for it.
(...skipping 10 matching lines...) Expand all
1629 // Note the rowBytes might be tight to the passed in data, but if data 1629 // Note the rowBytes might be tight to the passed in data, but if data
1630 // gets clipped in x to the target the rowBytes will no longer be tight. 1630 // gets clipped in x to the target the rowBytes will no longer be tight.
1631 if (left >= 0 && (left + width) < renderTarget->width()) { 1631 if (left >= 0 && (left + width) < renderTarget->width()) {
1632 return 0 == rowBytes || 1632 return 0 == rowBytes ||
1633 GrBytesPerPixel(config) * width == rowBytes; 1633 GrBytesPerPixel(config) * width == rowBytes;
1634 } else { 1634 } else {
1635 return false; 1635 return false;
1636 } 1636 }
1637 } 1637 }
1638 1638
1639 bool GrGpuGL::onReadPixels(GrRenderTarget* target, 1639 bool GrGLGpu::onReadPixels(GrRenderTarget* target,
1640 int left, int top, 1640 int left, int top,
1641 int width, int height, 1641 int width, int height,
1642 GrPixelConfig config, 1642 GrPixelConfig config,
1643 void* buffer, 1643 void* buffer,
1644 size_t rowBytes) { 1644 size_t rowBytes) {
1645 // We cannot read pixels into a compressed buffer 1645 // We cannot read pixels into a compressed buffer
1646 if (GrPixelConfigIsCompressed(config)) { 1646 if (GrPixelConfigIsCompressed(config)) {
1647 return false; 1647 return false;
1648 } 1648 }
1649 1649
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
1757 if (!flipY) { 1757 if (!flipY) {
1758 dst += rowBytes; 1758 dst += rowBytes;
1759 } else { 1759 } else {
1760 dst -= rowBytes; 1760 dst -= rowBytes;
1761 } 1761 }
1762 } 1762 }
1763 } 1763 }
1764 return true; 1764 return true;
1765 } 1765 }
1766 1766
1767 void GrGpuGL::flushRenderTarget(GrGLRenderTarget* target, const SkIRect* bound) { 1767 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, const SkIRect* bound) {
1768 1768
1769 SkASSERT(target); 1769 SkASSERT(target);
1770 1770
1771 uint32_t rtID = target->getUniqueID(); 1771 uint32_t rtID = target->getUniqueID();
1772 if (fHWBoundRenderTargetUniqueID != rtID) { 1772 if (fHWBoundRenderTargetUniqueID != rtID) {
1773 fGPUStats.incRenderTargetBinds(); 1773 fGPUStats.incRenderTargetBinds();
1774 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID())); 1774 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID()));
1775 #ifdef SK_DEBUG 1775 #ifdef SK_DEBUG
1776 // don't do this check in Chromium -- this is causing 1776 // don't do this check in Chromium -- this is causing
1777 // lots of repeated command buffer flushes when the compositor is 1777 // lots of repeated command buffer flushes when the compositor is
1778 // rendering with Ganesh, which is really slow; even too slow for 1778 // rendering with Ganesh, which is really slow; even too slow for
1779 // Debug mode. 1779 // Debug mode.
1780 if (!this->glContext().isChromium()) { 1780 if (!this->glContext().isChromium()) {
1781 GrGLenum status; 1781 GrGLenum status;
1782 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); 1782 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1783 if (status != GR_GL_FRAMEBUFFER_COMPLETE) { 1783 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1784 SkDebugf("GrGpuGL::flushRenderTarget glCheckFramebufferStatus %x \n", status); 1784 SkDebugf("GrGLGpu::flushRenderTarget glCheckFramebufferStatus %x \n", status);
1785 } 1785 }
1786 } 1786 }
1787 #endif 1787 #endif
1788 fHWBoundRenderTargetUniqueID = rtID; 1788 fHWBoundRenderTargetUniqueID = rtID;
1789 const GrGLIRect& vp = target->getViewport(); 1789 const GrGLIRect& vp = target->getViewport();
1790 if (fHWViewport != vp) { 1790 if (fHWViewport != vp) {
1791 vp.pushToGLViewport(this->glInterface()); 1791 vp.pushToGLViewport(this->glInterface());
1792 fHWViewport = vp; 1792 fHWViewport = vp;
1793 } 1793 }
1794 } 1794 }
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
1826 GetWindowThreadProcessId(hwnd, &wndProcID); 1826 GetWindowThreadProcessId(hwnd, &wndProcID);
1827 if(wndProcID == procID) { 1827 if(wndProcID == procID) {
1828 SwapBuffers(GetDC(hwnd)); 1828 SwapBuffers(GetDC(hwnd));
1829 } 1829 }
1830 hwnd = GetNextWindow(hwnd, GW_HWNDNEXT); 1830 hwnd = GetNextWindow(hwnd, GW_HWNDNEXT);
1831 } 1831 }
1832 } 1832 }
1833 #endif 1833 #endif
1834 #endif 1834 #endif
1835 1835
1836 void GrGpuGL::onDraw(const GrOptDrawState& ds, const GrDrawTarget::DrawInfo& inf o) { 1836 void GrGLGpu::onDraw(const GrOptDrawState& ds, const GrDrawTarget::DrawInfo& inf o) {
1837 size_t indexOffsetInBytes; 1837 size_t indexOffsetInBytes;
1838 this->setupGeometry(ds, info, &indexOffsetInBytes); 1838 this->setupGeometry(ds, info, &indexOffsetInBytes);
1839 1839
1840 SkASSERT((size_t)info.primitiveType() < SK_ARRAY_COUNT(gPrimitiveType2GLMode )); 1840 SkASSERT((size_t)info.primitiveType() < SK_ARRAY_COUNT(gPrimitiveType2GLMode ));
1841 1841
1842 if (info.isIndexed()) { 1842 if (info.isIndexed()) {
1843 GrGLvoid* indices = 1843 GrGLvoid* indices =
1844 reinterpret_cast<GrGLvoid*>(indexOffsetInBytes + sizeof(uint16_t) * info.startIndex()); 1844 reinterpret_cast<GrGLvoid*>(indexOffsetInBytes + sizeof(uint16_t) * info.startIndex());
1845 // info.startVertex() was accounted for by setupGeometry. 1845 // info.startVertex() was accounted for by setupGeometry.
1846 GL_CALL(DrawElements(gPrimitiveType2GLMode[info.primitiveType()], 1846 GL_CALL(DrawElements(gPrimitiveType2GLMode[info.primitiveType()],
(...skipping 13 matching lines...) Expand all
1860 int set_a_break_pt_here = 9; 1860 int set_a_break_pt_here = 9;
1861 aglSwapBuffers(aglGetCurrentContext()); 1861 aglSwapBuffers(aglGetCurrentContext());
1862 #elif defined(SK_BUILD_FOR_WIN32) 1862 #elif defined(SK_BUILD_FOR_WIN32)
1863 SwapBuf(); 1863 SwapBuf();
1864 int set_a_break_pt_here = 9; 1864 int set_a_break_pt_here = 9;
1865 SwapBuf(); 1865 SwapBuf();
1866 #endif 1866 #endif
1867 #endif 1867 #endif
1868 } 1868 }
1869 1869
1870 void GrGpuGL::onResolveRenderTarget(GrRenderTarget* target) { 1870 void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target) {
1871 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target); 1871 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target);
1872 if (rt->needsResolve()) { 1872 if (rt->needsResolve()) {
1873 // Some extensions automatically resolves the texture when it is read. 1873 // Some extensions automatically resolves the texture when it is read.
1874 if (this->glCaps().usesMSAARenderBuffers()) { 1874 if (this->glCaps().usesMSAARenderBuffers()) {
1875 SkASSERT(rt->textureFBOID() != rt->renderFBOID()); 1875 SkASSERT(rt->textureFBOID() != rt->renderFBOID());
1876 fGPUStats.incRenderTargetBinds(); 1876 fGPUStats.incRenderTargetBinds();
1877 fGPUStats.incRenderTargetBinds(); 1877 fGPUStats.incRenderTargetBinds();
1878 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID())); 1878 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID()));
1879 GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID())) ; 1879 GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID())) ;
1880 // make sure we go through flushRenderTarget() since we've modified 1880 // make sure we go through flushRenderTarget() since we've modified
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
1954 GR_GL_CALL(gl, StencilMask(writeMask)); 1954 GR_GL_CALL(gl, StencilMask(writeMask));
1955 GR_GL_CALL(gl, StencilOp(glFailOp, glPassOp, glPassOp)); 1955 GR_GL_CALL(gl, StencilOp(glFailOp, glPassOp, glPassOp));
1956 } else { 1956 } else {
1957 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask)); 1957 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask));
1958 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask)); 1958 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask));
1959 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, glPassOp, glPassOp)); 1959 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, glPassOp, glPassOp));
1960 } 1960 }
1961 } 1961 }
1962 } 1962 }
1963 1963
1964 void GrGpuGL::flushStencil(const GrStencilSettings& stencilSettings, DrawType ty pe) { 1964 void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings, DrawType ty pe) {
1965 // TODO figure out why we need to flush stencil settings on path draws at al l 1965 // TODO figure out why we need to flush stencil settings on path draws at al l
1966 if (kStencilPath_DrawType != type && fHWStencilSettings != stencilSettings) { 1966 if (kStencilPath_DrawType != type && fHWStencilSettings != stencilSettings) {
1967 if (stencilSettings.isDisabled()) { 1967 if (stencilSettings.isDisabled()) {
1968 if (kNo_TriState != fHWStencilTestEnabled) { 1968 if (kNo_TriState != fHWStencilTestEnabled) {
1969 GL_CALL(Disable(GR_GL_STENCIL_TEST)); 1969 GL_CALL(Disable(GR_GL_STENCIL_TEST));
1970 fHWStencilTestEnabled = kNo_TriState; 1970 fHWStencilTestEnabled = kNo_TriState;
1971 } 1971 }
1972 } else { 1972 } else {
1973 if (kYes_TriState != fHWStencilTestEnabled) { 1973 if (kYes_TriState != fHWStencilTestEnabled) {
1974 GL_CALL(Enable(GR_GL_STENCIL_TEST)); 1974 GL_CALL(Enable(GR_GL_STENCIL_TEST));
(...skipping 14 matching lines...) Expand all
1989 set_gl_stencil(this->glInterface(), 1989 set_gl_stencil(this->glInterface(),
1990 stencilSettings, 1990 stencilSettings,
1991 GR_GL_FRONT_AND_BACK, 1991 GR_GL_FRONT_AND_BACK,
1992 GrStencilSettings::kFront_Face); 1992 GrStencilSettings::kFront_Face);
1993 } 1993 }
1994 } 1994 }
1995 fHWStencilSettings = stencilSettings; 1995 fHWStencilSettings = stencilSettings;
1996 } 1996 }
1997 } 1997 }
1998 1998
1999 void GrGpuGL::flushAAState(const GrOptDrawState& optState) { 1999 void GrGLGpu::flushAAState(const GrOptDrawState& optState) {
2000 // At least some ATI linux drivers will render GL_LINES incorrectly when MSAA st ate is enabled but 2000 // At least some ATI linux drivers will render GL_LINES incorrectly when MSAA st ate is enabled but
2001 // the target is not multisampled. Single pixel wide lines are rendered thicker than 1 pixel wide. 2001 // the target is not multisampled. Single pixel wide lines are rendered thicker than 1 pixel wide.
2002 #if 0 2002 #if 0
2003 // Replace RT_HAS_MSAA with this definition once this driver bug is no longe r a relevant concern 2003 // Replace RT_HAS_MSAA with this definition once this driver bug is no longe r a relevant concern
2004 #define RT_HAS_MSAA rt->isMultisampled() 2004 #define RT_HAS_MSAA rt->isMultisampled()
2005 #else 2005 #else
2006 #define RT_HAS_MSAA (rt->isMultisampled() || kDrawLines_DrawType == optState .drawType()) 2006 #define RT_HAS_MSAA (rt->isMultisampled() || kDrawLines_DrawType == optState .drawType())
2007 #endif 2007 #endif
2008 2008
2009 const GrRenderTarget* rt = optState.getRenderTarget(); 2009 const GrRenderTarget* rt = optState.getRenderTarget();
2010 if (kGL_GrGLStandard == this->glStandard()) { 2010 if (kGL_GrGLStandard == this->glStandard()) {
2011 if (RT_HAS_MSAA) { 2011 if (RT_HAS_MSAA) {
2012 bool enableMSAA = optState.isHWAntialiasState(); 2012 bool enableMSAA = optState.isHWAntialiasState();
2013 if (enableMSAA) { 2013 if (enableMSAA) {
2014 if (kYes_TriState != fMSAAEnabled) { 2014 if (kYes_TriState != fMSAAEnabled) {
2015 GL_CALL(Enable(GR_GL_MULTISAMPLE)); 2015 GL_CALL(Enable(GR_GL_MULTISAMPLE));
2016 fMSAAEnabled = kYes_TriState; 2016 fMSAAEnabled = kYes_TriState;
2017 } 2017 }
2018 } else { 2018 } else {
2019 if (kNo_TriState != fMSAAEnabled) { 2019 if (kNo_TriState != fMSAAEnabled) {
2020 GL_CALL(Disable(GR_GL_MULTISAMPLE)); 2020 GL_CALL(Disable(GR_GL_MULTISAMPLE));
2021 fMSAAEnabled = kNo_TriState; 2021 fMSAAEnabled = kNo_TriState;
2022 } 2022 }
2023 } 2023 }
2024 } 2024 }
2025 } 2025 }
2026 } 2026 }
2027 2027
2028 void GrGpuGL::flushBlend(const GrOptDrawState& optState) { 2028 void GrGLGpu::flushBlend(const GrOptDrawState& optState) {
2029 // Any optimization to disable blending should have already been applied and 2029 // Any optimization to disable blending should have already been applied and
2030 // tweaked the coeffs to (1, 0). 2030 // tweaked the coeffs to (1, 0).
2031 2031
2032 GrXferProcessor::BlendInfo blendInfo; 2032 GrXferProcessor::BlendInfo blendInfo;
2033 optState.getXferProcessor()->getBlendInfo(&blendInfo); 2033 optState.getXferProcessor()->getBlendInfo(&blendInfo);
2034 GrBlendCoeff srcCoeff = blendInfo.fSrcBlend; 2034 GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
2035 GrBlendCoeff dstCoeff = blendInfo.fDstBlend; 2035 GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
2036 bool blendOff = kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCo eff; 2036 bool blendOff = kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCo eff;
2037 if (blendOff) { 2037 if (blendOff) {
2038 if (kNo_TriState != fHWBlendState.fEnabled) { 2038 if (kNo_TriState != fHWBlendState.fEnabled) {
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
2071 GR_GL_REPEAT, 2071 GR_GL_REPEAT,
2072 GR_GL_MIRRORED_REPEAT 2072 GR_GL_MIRRORED_REPEAT
2073 }; 2073 };
2074 GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes)); 2074 GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes));
2075 GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode); 2075 GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode);
2076 GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode); 2076 GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode);
2077 GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode); 2077 GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode);
2078 return gWrapModes[tm]; 2078 return gWrapModes[tm];
2079 } 2079 }
2080 2080
2081 void GrGpuGL::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTextur e* texture) { 2081 void GrGLGpu::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTextur e* texture) {
2082 SkASSERT(texture); 2082 SkASSERT(texture);
2083 2083
2084 // If we created a rt/tex and rendered to it without using a texture and now we're texturing 2084 // If we created a rt/tex and rendered to it without using a texture and now we're texturing
2085 // from the rt it will still be the last bound texture, but it needs resolvi ng. So keep this 2085 // from the rt it will still be the last bound texture, but it needs resolvi ng. So keep this
2086 // out of the "last != next" check. 2086 // out of the "last != next" check.
2087 GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTa rget()); 2087 GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTa rget());
2088 if (texRT) { 2088 if (texRT) {
2089 this->onResolveRenderTarget(texRT); 2089 this->onResolveRenderTarget(texRT);
2090 } 2090 }
2091 2091
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
2171 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_A, swi zzle[3])); 2171 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_A, swi zzle[3]));
2172 } else { 2172 } else {
2173 GR_STATIC_ASSERT(sizeof(newTexParams.fSwizzleRGBA[0]) == sizeof(GrGL int)); 2173 GR_STATIC_ASSERT(sizeof(newTexParams.fSwizzleRGBA[0]) == sizeof(GrGL int));
2174 const GrGLint* swizzle = reinterpret_cast<const GrGLint*>(newTexPara ms.fSwizzleRGBA); 2174 const GrGLint* swizzle = reinterpret_cast<const GrGLint*>(newTexPara ms.fSwizzleRGBA);
2175 GL_CALL(TexParameteriv(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_RGBA, swizzle)); 2175 GL_CALL(TexParameteriv(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_RGBA, swizzle));
2176 } 2176 }
2177 } 2177 }
2178 texture->setCachedTexParams(newTexParams, this->getResetTimestamp()); 2178 texture->setCachedTexParams(newTexParams, this->getResetTimestamp());
2179 } 2179 }
2180 2180
2181 void GrGpuGL::flushMiscFixedFunctionState(const GrOptDrawState& optState) { 2181 void GrGLGpu::flushMiscFixedFunctionState(const GrOptDrawState& optState) {
2182 if (optState.isDitherState()) { 2182 if (optState.isDitherState()) {
2183 if (kYes_TriState != fHWDitherEnabled) { 2183 if (kYes_TriState != fHWDitherEnabled) {
2184 GL_CALL(Enable(GR_GL_DITHER)); 2184 GL_CALL(Enable(GR_GL_DITHER));
2185 fHWDitherEnabled = kYes_TriState; 2185 fHWDitherEnabled = kYes_TriState;
2186 } 2186 }
2187 } else { 2187 } else {
2188 if (kNo_TriState != fHWDitherEnabled) { 2188 if (kNo_TriState != fHWDitherEnabled) {
2189 GL_CALL(Disable(GR_GL_DITHER)); 2189 GL_CALL(Disable(GR_GL_DITHER));
2190 fHWDitherEnabled = kNo_TriState; 2190 fHWDitherEnabled = kNo_TriState;
2191 } 2191 }
(...skipping 25 matching lines...) Expand all
2217 case GrDrawState::kBoth_DrawFace: 2217 case GrDrawState::kBoth_DrawFace:
2218 GL_CALL(Disable(GR_GL_CULL_FACE)); 2218 GL_CALL(Disable(GR_GL_CULL_FACE));
2219 break; 2219 break;
2220 default: 2220 default:
2221 SkFAIL("Unknown draw face."); 2221 SkFAIL("Unknown draw face.");
2222 } 2222 }
2223 fHWDrawFace = optState.getDrawFace(); 2223 fHWDrawFace = optState.getDrawFace();
2224 } 2224 }
2225 } 2225 }
2226 2226
2227 bool GrGpuGL::configToGLFormats(GrPixelConfig config, 2227 bool GrGLGpu::configToGLFormats(GrPixelConfig config,
2228 bool getSizedInternalFormat, 2228 bool getSizedInternalFormat,
2229 GrGLenum* internalFormat, 2229 GrGLenum* internalFormat,
2230 GrGLenum* externalFormat, 2230 GrGLenum* externalFormat,
2231 GrGLenum* externalType) { 2231 GrGLenum* externalType) {
2232 GrGLenum dontCare; 2232 GrGLenum dontCare;
2233 if (NULL == internalFormat) { 2233 if (NULL == internalFormat) {
2234 internalFormat = &dontCare; 2234 internalFormat = &dontCare;
2235 } 2235 }
2236 if (NULL == externalFormat) { 2236 if (NULL == externalFormat) {
2237 externalFormat = &dontCare; 2237 externalFormat = &dontCare;
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
2366 *externalType = GR_GL_HALF_FLOAT; 2366 *externalType = GR_GL_HALF_FLOAT;
2367 } 2367 }
2368 break; 2368 break;
2369 2369
2370 default: 2370 default:
2371 return false; 2371 return false;
2372 } 2372 }
2373 return true; 2373 return true;
2374 } 2374 }
2375 2375
2376 void GrGpuGL::setTextureUnit(int unit) { 2376 void GrGLGpu::setTextureUnit(int unit) {
2377 SkASSERT(unit >= 0 && unit < fHWBoundTextureUniqueIDs.count()); 2377 SkASSERT(unit >= 0 && unit < fHWBoundTextureUniqueIDs.count());
2378 if (unit != fHWActiveTextureUnitIdx) { 2378 if (unit != fHWActiveTextureUnitIdx) {
2379 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit)); 2379 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
2380 fHWActiveTextureUnitIdx = unit; 2380 fHWActiveTextureUnitIdx = unit;
2381 } 2381 }
2382 } 2382 }
2383 2383
2384 void GrGpuGL::setScratchTextureUnit() { 2384 void GrGLGpu::setScratchTextureUnit() {
2385 // Bind the last texture unit since it is the least likely to be used by GrG LProgram. 2385 // Bind the last texture unit since it is the least likely to be used by GrG LProgram.
2386 int lastUnitIdx = fHWBoundTextureUniqueIDs.count() - 1; 2386 int lastUnitIdx = fHWBoundTextureUniqueIDs.count() - 1;
2387 if (lastUnitIdx != fHWActiveTextureUnitIdx) { 2387 if (lastUnitIdx != fHWActiveTextureUnitIdx) {
2388 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx)); 2388 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx));
2389 fHWActiveTextureUnitIdx = lastUnitIdx; 2389 fHWActiveTextureUnitIdx = lastUnitIdx;
2390 } 2390 }
2391 // clear out the this field so that if a program does use this unit it will rebind the correct 2391 // clear out the this field so that if a program does use this unit it will rebind the correct
2392 // texture. 2392 // texture.
2393 fHWBoundTextureUniqueIDs[lastUnitIdx] = SK_InvalidUniqueID; 2393 fHWBoundTextureUniqueIDs[lastUnitIdx] = SK_InvalidUniqueID;
2394 } 2394 }
2395 2395
2396 namespace { 2396 namespace {
2397 // Determines whether glBlitFramebuffer could be used between src and dst. 2397 // Determines whether glBlitFramebuffer could be used between src and dst.
2398 inline bool can_blit_framebuffer(const GrSurface* dst, 2398 inline bool can_blit_framebuffer(const GrSurface* dst,
2399 const GrSurface* src, 2399 const GrSurface* src,
2400 const GrGpuGL* gpu, 2400 const GrGLGpu* gpu,
2401 bool* wouldNeedTempFBO = NULL) { 2401 bool* wouldNeedTempFBO = NULL) {
2402 if (gpu->glCaps().isConfigRenderable(dst->config(), dst->desc().fSampleCnt > 0) && 2402 if (gpu->glCaps().isConfigRenderable(dst->config(), dst->desc().fSampleCnt > 0) &&
2403 gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) && 2403 gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) &&
2404 gpu->glCaps().usesMSAARenderBuffers()) { 2404 gpu->glCaps().usesMSAARenderBuffers()) {
2405 // ES3 doesn't allow framebuffer blits when the src has MSAA and the con figs don't match 2405 // ES3 doesn't allow framebuffer blits when the src has MSAA and the con figs don't match
2406 // or the rects are not the same (not just the same size but have the sa me edges). 2406 // or the rects are not the same (not just the same size but have the sa me edges).
2407 if (GrGLCaps::kES_3_0_MSFBOType == gpu->glCaps().msFBOType() && 2407 if (GrGLCaps::kES_3_0_MSFBOType == gpu->glCaps().msFBOType() &&
2408 (src->desc().fSampleCnt > 0 || src->config() != dst->config())) { 2408 (src->desc().fSampleCnt > 0 || src->config() != dst->config())) {
2409 return false; 2409 return false;
2410 } 2410 }
2411 if (wouldNeedTempFBO) { 2411 if (wouldNeedTempFBO) {
2412 *wouldNeedTempFBO = NULL == dst->asRenderTarget() || NULL == src->as RenderTarget(); 2412 *wouldNeedTempFBO = NULL == dst->asRenderTarget() || NULL == src->as RenderTarget();
2413 } 2413 }
2414 return true; 2414 return true;
2415 } else { 2415 } else {
2416 return false; 2416 return false;
2417 } 2417 }
2418 } 2418 }
2419 2419
2420 inline bool can_copy_texsubimage(const GrSurface* dst, 2420 inline bool can_copy_texsubimage(const GrSurface* dst,
2421 const GrSurface* src, 2421 const GrSurface* src,
2422 const GrGpuGL* gpu, 2422 const GrGLGpu* gpu,
2423 bool* wouldNeedTempFBO = NULL) { 2423 bool* wouldNeedTempFBO = NULL) {
2424 // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSub Image 2424 // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSub Image
2425 // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps 2425 // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps
2426 // many drivers would allow it to work, but ANGLE does not. 2426 // many drivers would allow it to work, but ANGLE does not.
2427 if (kGLES_GrGLStandard == gpu->glStandard() && gpu->glCaps().bgraIsInternalF ormat() && 2427 if (kGLES_GrGLStandard == gpu->glStandard() && gpu->glCaps().bgraIsInternalF ormat() &&
2428 (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig = = src->config())) { 2428 (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig = = src->config())) {
2429 return false; 2429 return false;
2430 } 2430 }
2431 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->as RenderTarget()); 2431 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->as RenderTarget());
2432 // If dst is multisampled (and uses an extension where there is a separate M SAA renderbuffer) 2432 // If dst is multisampled (and uses an extension where there is a separate M SAA renderbuffer)
(...skipping 17 matching lines...) Expand all
2450 return true; 2450 return true;
2451 } else { 2451 } else {
2452 return false; 2452 return false;
2453 } 2453 }
2454 } 2454 }
2455 2455
2456 } 2456 }
2457 2457
2458 // If a temporary FBO was created, its non-zero ID is returned. The viewport tha t the copy rect is 2458 // If a temporary FBO was created, its non-zero ID is returned. The viewport tha t the copy rect is
2459 // relative to is output. 2459 // relative to is output.
2460 GrGLuint GrGpuGL::bindSurfaceAsFBO(GrSurface* surface, GrGLenum fboTarget, GrGLI Rect* viewport) { 2460 GrGLuint GrGLGpu::bindSurfaceAsFBO(GrSurface* surface, GrGLenum fboTarget, GrGLI Rect* viewport) {
2461 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarge t()); 2461 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarge t());
2462 GrGLuint tempFBOID; 2462 GrGLuint tempFBOID;
2463 if (NULL == rt) { 2463 if (NULL == rt) {
2464 SkASSERT(surface->asTexture()); 2464 SkASSERT(surface->asTexture());
2465 GrGLuint texID = static_cast<GrGLTexture*>(surface->asTexture())->textur eID(); 2465 GrGLuint texID = static_cast<GrGLTexture*>(surface->asTexture())->textur eID();
2466 GR_GL_CALL(this->glInterface(), GenFramebuffers(1, &tempFBOID)); 2466 GR_GL_CALL(this->glInterface(), GenFramebuffers(1, &tempFBOID));
2467 fGPUStats.incRenderTargetBinds(); 2467 fGPUStats.incRenderTargetBinds();
2468 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, tempFBOID)); 2468 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, tempFBOID));
2469 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget, 2469 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
2470 GR_GL_COLOR_ATTACHM ENT0, 2470 GR_GL_COLOR_ATTACHM ENT0,
2471 GR_GL_TEXTURE_2D, 2471 GR_GL_TEXTURE_2D,
2472 texID, 2472 texID,
2473 0)); 2473 0));
2474 viewport->fLeft = 0; 2474 viewport->fLeft = 0;
2475 viewport->fBottom = 0; 2475 viewport->fBottom = 0;
2476 viewport->fWidth = surface->width(); 2476 viewport->fWidth = surface->width();
2477 viewport->fHeight = surface->height(); 2477 viewport->fHeight = surface->height();
2478 } else { 2478 } else {
2479 tempFBOID = 0; 2479 tempFBOID = 0;
2480 fGPUStats.incRenderTargetBinds(); 2480 fGPUStats.incRenderTargetBinds();
2481 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, rt->renderFBO ID())); 2481 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, rt->renderFBO ID()));
2482 *viewport = rt->getViewport(); 2482 *viewport = rt->getViewport();
2483 } 2483 }
2484 return tempFBOID; 2484 return tempFBOID;
2485 } 2485 }
2486 2486
2487 bool GrGpuGL::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) { 2487 bool GrGLGpu::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) {
2488 // In here we look for opportunities to use CopyTexSubImage, or fbo blit. If neither are 2488 // In here we look for opportunities to use CopyTexSubImage, or fbo blit. If neither are
2489 // possible and we return false to fallback to creating a render target dst for render-to- 2489 // possible and we return false to fallback to creating a render target dst for render-to-
2490 // texture. This code prefers CopyTexSubImage to fbo blit and avoids trigger ing temporary fbo 2490 // texture. This code prefers CopyTexSubImage to fbo blit and avoids trigger ing temporary fbo
2491 // creation. It isn't clear that avoiding temporary fbo creation is actually optimal. 2491 // creation. It isn't clear that avoiding temporary fbo creation is actually optimal.
2492 2492
2493 // Check for format issues with glCopyTexSubImage2D 2493 // Check for format issues with glCopyTexSubImage2D
2494 if (kGLES_GrGLStandard == this->glStandard() && this->glCaps().bgraIsInterna lFormat() && 2494 if (kGLES_GrGLStandard == this->glStandard() && this->glCaps().bgraIsInterna lFormat() &&
2495 kBGRA_8888_GrPixelConfig == src->config()) { 2495 kBGRA_8888_GrPixelConfig == src->config()) {
2496 // glCopyTexSubImage2D doesn't work with this config. If the bgra can be used with fbo blit 2496 // glCopyTexSubImage2D doesn't work with this config. If the bgra can be used with fbo blit
2497 // then we set up for that, otherwise fail. 2497 // then we set up for that, otherwise fail.
(...skipping 22 matching lines...) Expand all
2520 return false; 2520 return false;
2521 } 2521 }
2522 2522
2523 // We'll do a CopyTexSubImage. Make the dst a plain old texture. 2523 // We'll do a CopyTexSubImage. Make the dst a plain old texture.
2524 desc->fConfig = src->config(); 2524 desc->fConfig = src->config();
2525 desc->fOrigin = src->origin(); 2525 desc->fOrigin = src->origin();
2526 desc->fFlags = kNone_GrSurfaceFlags; 2526 desc->fFlags = kNone_GrSurfaceFlags;
2527 return true; 2527 return true;
2528 } 2528 }
2529 2529
2530 bool GrGpuGL::copySurface(GrSurface* dst, 2530 bool GrGLGpu::copySurface(GrSurface* dst,
2531 GrSurface* src, 2531 GrSurface* src,
2532 const SkIRect& srcRect, 2532 const SkIRect& srcRect,
2533 const SkIPoint& dstPoint) { 2533 const SkIPoint& dstPoint) {
2534 bool copied = false; 2534 bool copied = false;
2535 if (can_copy_texsubimage(dst, src, this)) { 2535 if (can_copy_texsubimage(dst, src, this)) {
2536 GrGLuint srcFBO; 2536 GrGLuint srcFBO;
2537 GrGLIRect srcVP; 2537 GrGLIRect srcVP;
2538 srcFBO = this->bindSurfaceAsFBO(src, GR_GL_FRAMEBUFFER, &srcVP); 2538 srcFBO = this->bindSurfaceAsFBO(src, GR_GL_FRAMEBUFFER, &srcVP);
2539 GrGLTexture* dstTex = static_cast<GrGLTexture*>(dst->asTexture()); 2539 GrGLTexture* dstTex = static_cast<GrGLTexture*>(dst->asTexture());
2540 SkASSERT(dstTex); 2540 SkASSERT(dstTex);
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
2623 } 2623 }
2624 if (srcFBO) { 2624 if (srcFBO) {
2625 GL_CALL(DeleteFramebuffers(1, &srcFBO)); 2625 GL_CALL(DeleteFramebuffers(1, &srcFBO));
2626 } 2626 }
2627 copied = true; 2627 copied = true;
2628 } 2628 }
2629 } 2629 }
2630 return copied; 2630 return copied;
2631 } 2631 }
2632 2632
2633 bool GrGpuGL::canCopySurface(const GrSurface* dst, 2633 bool GrGLGpu::canCopySurface(const GrSurface* dst,
2634 const GrSurface* src, 2634 const GrSurface* src,
2635 const SkIRect& srcRect, 2635 const SkIRect& srcRect,
2636 const SkIPoint& dstPoint) { 2636 const SkIPoint& dstPoint) {
2637 // This mirrors the logic in onCopySurface. We prefer our base makes the co py if we need to 2637 // This mirrors the logic in onCopySurface. We prefer our base makes the co py if we need to
2638 // create a temp fbo. TODO verify the assumption that temp fbos are expensiv e; it may not be 2638 // create a temp fbo. TODO verify the assumption that temp fbos are expensiv e; it may not be
2639 // true at all. 2639 // true at all.
2640 bool wouldNeedTempFBO = false; 2640 bool wouldNeedTempFBO = false;
2641 if (can_copy_texsubimage(dst, src, this, &wouldNeedTempFBO) && !wouldNeedTem pFBO) { 2641 if (can_copy_texsubimage(dst, src, this, &wouldNeedTempFBO) && !wouldNeedTem pFBO) {
2642 return true; 2642 return true;
2643 } 2643 }
2644 if (can_blit_framebuffer(dst, src, this, &wouldNeedTempFBO) && !wouldNeedTem pFBO) { 2644 if (can_blit_framebuffer(dst, src, this, &wouldNeedTempFBO) && !wouldNeedTem pFBO) {
2645 if (dst == src) { 2645 if (dst == src) {
2646 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, 2646 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2647 srcRect.width(), srcRect.height( )); 2647 srcRect.width(), srcRect.height( ));
2648 if(!SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) { 2648 if(!SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) {
2649 return true; 2649 return true;
2650 } 2650 }
2651 } else { 2651 } else {
2652 return true; 2652 return true;
2653 } 2653 }
2654 } 2654 }
2655 return false; 2655 return false;
2656 } 2656 }
2657 2657
2658 void GrGpuGL::didAddGpuTraceMarker() { 2658 void GrGLGpu::didAddGpuTraceMarker() {
2659 if (this->caps()->gpuTracingSupport()) { 2659 if (this->caps()->gpuTracingSupport()) {
2660 const GrTraceMarkerSet& markerArray = this->getActiveTraceMarkers(); 2660 const GrTraceMarkerSet& markerArray = this->getActiveTraceMarkers();
2661 SkString markerString = markerArray.toStringLast(); 2661 SkString markerString = markerArray.toStringLast();
2662 GL_CALL(PushGroupMarker(0, markerString.c_str())); 2662 GL_CALL(PushGroupMarker(0, markerString.c_str()));
2663 } 2663 }
2664 } 2664 }
2665 2665
2666 void GrGpuGL::didRemoveGpuTraceMarker() { 2666 void GrGLGpu::didRemoveGpuTraceMarker() {
2667 if (this->caps()->gpuTracingSupport()) { 2667 if (this->caps()->gpuTracingSupport()) {
2668 GL_CALL(PopGroupMarker()); 2668 GL_CALL(PopGroupMarker());
2669 } 2669 }
2670 } 2670 }
2671 2671
2672 /////////////////////////////////////////////////////////////////////////////// 2672 ///////////////////////////////////////////////////////////////////////////////
2673 2673
2674 GrGLAttribArrayState* GrGpuGL::HWGeometryState::bindArrayAndBuffersToDraw( 2674 GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw(
2675 GrGpuGL* gpu, 2675 GrGLGpu* gpu,
2676 const GrGLVertexBuffer* vbuffer, 2676 const GrGLVertexBuffer* vbuffer,
2677 const GrGLIndexBuffer* ibuffer) { 2677 const GrGLIndexBuffer* ibuffer) {
2678 SkASSERT(vbuffer); 2678 SkASSERT(vbuffer);
2679 GrGLAttribArrayState* attribState; 2679 GrGLAttribArrayState* attribState;
2680 2680
2681 // We use a vertex array if we're on a core profile and the verts are in a V BO. 2681 // We use a vertex array if we're on a core profile and the verts are in a V BO.
2682 if (gpu->glCaps().isCoreProfile() && !vbuffer->isCPUBacked()) { 2682 if (gpu->glCaps().isCoreProfile() && !vbuffer->isCPUBacked()) {
2683 if (NULL == fVBOVertexArray || fVBOVertexArray->wasDestroyed()) { 2683 if (NULL == fVBOVertexArray || fVBOVertexArray->wasDestroyed()) {
2684 SkSafeUnref(fVBOVertexArray); 2684 SkSafeUnref(fVBOVertexArray);
2685 GrGLuint arrayID; 2685 GrGLuint arrayID;
2686 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID)); 2686 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
2687 int attrCount = gpu->glCaps().maxVertexAttributes(); 2687 int attrCount = gpu->glCaps().maxVertexAttributes();
2688 fVBOVertexArray = SkNEW_ARGS(GrGLVertexArray, (gpu, arrayID, attrCou nt)); 2688 fVBOVertexArray = SkNEW_ARGS(GrGLVertexArray, (gpu, arrayID, attrCou nt));
2689 } 2689 }
2690 attribState = fVBOVertexArray->bindWithIndexBuffer(ibuffer); 2690 attribState = fVBOVertexArray->bindWithIndexBuffer(ibuffer);
2691 } else { 2691 } else {
2692 if (ibuffer) { 2692 if (ibuffer) {
2693 this->setIndexBufferIDOnDefaultVertexArray(gpu, ibuffer->bufferID()) ; 2693 this->setIndexBufferIDOnDefaultVertexArray(gpu, ibuffer->bufferID()) ;
2694 } else { 2694 } else {
2695 this->setVertexArrayID(gpu, 0); 2695 this->setVertexArrayID(gpu, 0);
2696 } 2696 }
2697 int attrCount = gpu->glCaps().maxVertexAttributes(); 2697 int attrCount = gpu->glCaps().maxVertexAttributes();
2698 if (fDefaultVertexArrayAttribState.count() != attrCount) { 2698 if (fDefaultVertexArrayAttribState.count() != attrCount) {
2699 fDefaultVertexArrayAttribState.resize(attrCount); 2699 fDefaultVertexArrayAttribState.resize(attrCount);
2700 } 2700 }
2701 attribState = &fDefaultVertexArrayAttribState; 2701 attribState = &fDefaultVertexArrayAttribState;
2702 } 2702 }
2703 return attribState; 2703 return attribState;
2704 } 2704 }
OLDNEW
« no previous file with comments | « src/gpu/gl/GrGpuGL.h ('k') | src/gpu/gl/GrGpuGL_program.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698