Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright 2011 Google Inc. | 2 * Copyright 2011 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 | 8 |
| 9 #include "GrGLGpu.h" | 9 #include "GrGLGpu.h" |
| 10 #include "GrGLGLSL.h" | 10 #include "GrGLGLSL.h" |
| 11 #include "GrGLStencilAttachment.h" | 11 #include "GrGLStencilAttachment.h" |
| 12 #include "GrGLTextureRenderTarget.h" | 12 #include "GrGLTextureRenderTarget.h" |
| 13 #include "GrGpuResourcePriv.h" | 13 #include "GrGpuResourcePriv.h" |
| 14 #include "GrPipeline.h" | 14 #include "GrPipeline.h" |
| 15 #include "GrRenderTargetPriv.h" | 15 #include "GrRenderTargetPriv.h" |
| 16 #include "GrSurfacePriv.h" | 16 #include "GrSurfacePriv.h" |
| 17 #include "GrTexturePriv.h" | 17 #include "GrTexturePriv.h" |
| 18 #include "GrTypes.h" | 18 #include "GrTypes.h" |
| 19 #include "GrVertices.h" | 19 #include "GrVertices.h" |
| 20 #include "builders/GrGLShaderStringBuilder.h" | 20 #include "builders/GrGLShaderStringBuilder.h" |
| 21 #include "glsl/GrGLSL.h" | 21 #include "glsl/GrGLSL.h" |
| 22 #include "glsl/GrGLSLCaps.h" | 22 #include "glsl/GrGLSLCaps.h" |
| 23 #include "SkStrokeRec.h" | 23 #include "SkStrokeRec.h" |
| 24 #include "SkTemplates.h" | 24 #include "SkTemplates.h" |
| 25 #include "SkTypes.h" | |
| 25 | 26 |
| 26 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X) | 27 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X) |
| 27 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X) | 28 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X) |
| 28 | 29 |
| 29 #define SKIP_CACHE_CHECK true | 30 #define SKIP_CACHE_CHECK true |
| 30 | 31 |
| 31 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR | 32 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR |
| 32 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface) | 33 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface) |
| 33 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call) | 34 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call) |
| 34 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface) | 35 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface) |
| 35 #else | 36 #else |
| 36 #define CLEAR_ERROR_BEFORE_ALLOC(iface) | 37 #define CLEAR_ERROR_BEFORE_ALLOC(iface) |
| 37 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) | 38 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) |
| 38 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR | 39 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR |
| 39 #endif | 40 #endif |
| 40 | 41 |
| 42 #if defined(GOOGLE3) | |
| 43 // Stack frame size is limited in GOOGLE3. | |
| 44 typedef SkAutoSMalloc<64 * 128> SkAutoSMallocTexels; | |
| 45 #else | |
| 46 typedef SkAutoSMalloc<128 * 128> SkAutoSMallocTexels; | |
| 47 #endif | |
| 41 | 48 |
| 42 /////////////////////////////////////////////////////////////////////////////// | 49 /////////////////////////////////////////////////////////////////////////////// |
| 43 | 50 |
| 44 | 51 |
| 45 static const GrGLenum gXfermodeEquation2Blend[] = { | 52 static const GrGLenum gXfermodeEquation2Blend[] = { |
| 46 // Basic OpenGL blend equations. | 53 // Basic OpenGL blend equations. |
| 47 GR_GL_FUNC_ADD, | 54 GR_GL_FUNC_ADD, |
| 48 GR_GL_FUNC_SUBTRACT, | 55 GR_GL_FUNC_SUBTRACT, |
| 49 GR_GL_FUNC_REVERSE_SUBTRACT, | 56 GR_GL_FUNC_REVERSE_SUBTRACT, |
| 50 | 57 |
| (...skipping 407 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 458 return nullptr; | 465 return nullptr; |
| 459 } | 466 } |
| 460 | 467 |
| 461 switch (ownership) { | 468 switch (ownership) { |
| 462 case kAdopt_GrWrapOwnership: | 469 case kAdopt_GrWrapOwnership: |
| 463 idDesc.fLifeCycle = GrGpuResource::kAdopted_LifeCycle; | 470 idDesc.fLifeCycle = GrGpuResource::kAdopted_LifeCycle; |
| 464 break; | 471 break; |
| 465 case kBorrow_GrWrapOwnership: | 472 case kBorrow_GrWrapOwnership: |
| 466 idDesc.fLifeCycle = GrGpuResource::kBorrowed_LifeCycle; | 473 idDesc.fLifeCycle = GrGpuResource::kBorrowed_LifeCycle; |
| 467 break; | 474 break; |
| 468 } | 475 } |
| 469 | 476 |
| 470 surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags; | 477 surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags; |
| 471 surfDesc.fWidth = desc.fWidth; | 478 surfDesc.fWidth = desc.fWidth; |
| 472 surfDesc.fHeight = desc.fHeight; | 479 surfDesc.fHeight = desc.fHeight; |
| 473 surfDesc.fConfig = desc.fConfig; | 480 surfDesc.fConfig = desc.fConfig; |
| 474 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount() ); | 481 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount() ); |
| 475 // FIXME: this should be calling resolve_origin(), but Chrome code is curre ntly | 482 // FIXME: this should be calling resolve_origin(), but Chrome code is curre ntly |
| 476 // assuming the old behaviour, which is that backend textures are always | 483 // assuming the old behaviour, which is that backend textures are always |
| 477 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to: | 484 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to: |
| 478 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); | 485 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); |
| (...skipping 27 matching lines...) Expand all Loading... | |
| 506 idDesc.fRTFBOID = static_cast<GrGLuint>(wrapDesc.fRenderTargetHandle); | 513 idDesc.fRTFBOID = static_cast<GrGLuint>(wrapDesc.fRenderTargetHandle); |
| 507 idDesc.fMSColorRenderbufferID = 0; | 514 idDesc.fMSColorRenderbufferID = 0; |
| 508 idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; | 515 idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; |
| 509 switch (ownership) { | 516 switch (ownership) { |
| 510 case kAdopt_GrWrapOwnership: | 517 case kAdopt_GrWrapOwnership: |
| 511 idDesc.fLifeCycle = GrGpuResource::kAdopted_LifeCycle; | 518 idDesc.fLifeCycle = GrGpuResource::kAdopted_LifeCycle; |
| 512 break; | 519 break; |
| 513 case kBorrow_GrWrapOwnership: | 520 case kBorrow_GrWrapOwnership: |
| 514 idDesc.fLifeCycle = GrGpuResource::kBorrowed_LifeCycle; | 521 idDesc.fLifeCycle = GrGpuResource::kBorrowed_LifeCycle; |
| 515 break; | 522 break; |
| 516 } | 523 } |
| 517 idDesc.fSampleConfig = GrRenderTarget::kUnified_SampleConfig; | 524 idDesc.fSampleConfig = GrRenderTarget::kUnified_SampleConfig; |
| 518 | 525 |
| 519 GrSurfaceDesc desc; | 526 GrSurfaceDesc desc; |
| 520 desc.fConfig = wrapDesc.fConfig; | 527 desc.fConfig = wrapDesc.fConfig; |
| 521 desc.fFlags = kCheckAllocation_GrSurfaceFlag | kRenderTarget_GrSurfaceFlag; | 528 desc.fFlags = kCheckAllocation_GrSurfaceFlag | kRenderTarget_GrSurfaceFlag; |
| 522 desc.fWidth = wrapDesc.fWidth; | 529 desc.fWidth = wrapDesc.fWidth; |
| 523 desc.fHeight = wrapDesc.fHeight; | 530 desc.fHeight = wrapDesc.fHeight; |
| 524 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount() ); | 531 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount() ); |
| 525 desc.fOrigin = resolve_origin(wrapDesc.fOrigin, true); | 532 desc.fOrigin = resolve_origin(wrapDesc.fOrigin, true); |
| 526 | 533 |
| 527 return GrGLRenderTarget::CreateWrapped(this, desc, idDesc, wrapDesc.fStencil Bits); | 534 return GrGLRenderTarget::CreateWrapped(this, desc, idDesc, wrapDesc.fStencil Bits); |
| 528 } | 535 } |
| 529 | 536 |
| 530 //////////////////////////////////////////////////////////////////////////////// | 537 //////////////////////////////////////////////////////////////////////////////// |
| 531 bool GrGLGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height, | 538 bool GrGLGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height, |
| 532 size_t rowBytes, GrPixelConfig srcConfig, | 539 GrPixelConfig srcConfig, |
| 533 DrawPreference* drawPreference, | 540 DrawPreference* drawPreference, |
| 534 WritePixelTempDrawInfo* tempDrawInfo) { | 541 WritePixelTempDrawInfo* tempDrawInfo) { |
| 535 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurf ace->config())) { | 542 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurf ace->config())) { |
| 536 return false; | 543 return false; |
| 537 } | 544 } |
| 538 | 545 |
| 539 // This subclass only allows writes to textures. If the dst is not a texture we have to draw | 546 // This subclass only allows writes to textures. If the dst is not a texture we have to draw |
| 540 // into it. We could use glDrawPixels on GLs that have it, but we don't toda y. | 547 // into it. We could use glDrawPixels on GLs that have it, but we don't toda y. |
| 541 if (!dstSurface->asTexture()) { | 548 if (!dstSurface->asTexture()) { |
| 542 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); | 549 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 607 // Write or transfer of pixels is only implemented for TEXTURE_2D textures | 614 // Write or transfer of pixels is only implemented for TEXTURE_2D textures |
| 608 if (GR_GL_TEXTURE_2D != glTex->target()) { | 615 if (GR_GL_TEXTURE_2D != glTex->target()) { |
| 609 return false; | 616 return false; |
| 610 } | 617 } |
| 611 | 618 |
| 612 return true; | 619 return true; |
| 613 } | 620 } |
| 614 | 621 |
| 615 bool GrGLGpu::onWritePixels(GrSurface* surface, | 622 bool GrGLGpu::onWritePixels(GrSurface* surface, |
| 616 int left, int top, int width, int height, | 623 int left, int top, int width, int height, |
| 617 GrPixelConfig config, const void* buffer, | 624 GrPixelConfig config, |
| 618 size_t rowBytes) { | 625 const SkTArray<SkMipMapLevel>& texels) { |
| 619 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture()); | 626 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture()); |
| 620 | 627 |
| 621 if (!check_write_and_transfer_input(glTex, surface, config)) { | 628 if (!check_write_and_transfer_input(glTex, surface, config)) { |
| 622 return false; | 629 return false; |
| 623 } | 630 } |
| 624 | 631 |
| 625 this->setScratchTextureUnit(); | 632 this->setScratchTextureUnit(); |
| 626 GL_CALL(BindTexture(glTex->target(), glTex->textureID())); | 633 GL_CALL(BindTexture(glTex->target(), glTex->textureID())); |
| 627 | 634 |
| 628 bool success = false; | 635 bool success = false; |
| 629 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { | 636 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { |
| 630 // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixel s() | 637 // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixel s() |
| 631 SkASSERT(config == glTex->desc().fConfig); | 638 SkASSERT(config == glTex->desc().fConfig); |
| 632 success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), buffer, | 639 success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), texels, |
| 633 kWrite_UploadType, left, top, wi dth, height); | 640 kWrite_UploadType, left, top, wi dth, height); |
| 634 } else { | 641 } else { |
| 635 success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_Upl oadType, | 642 success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_Upl oadType, |
| 636 left, top, width, height, config, buffer, rowBytes); | 643 left, top, width, height, config, texels); |
| 637 } | 644 } |
| 638 | 645 |
| 639 if (success) { | 646 return success; |
| 640 glTex->texturePriv().dirtyMipMaps(true); | |
| 641 return true; | |
| 642 } | |
| 643 | |
| 644 return false; | |
| 645 } | 647 } |
| 646 | 648 |
| 647 bool GrGLGpu::onTransferPixels(GrSurface* surface, | 649 bool GrGLGpu::onTransferPixels(GrSurface* surface, |
| 648 int left, int top, int width, int height, | 650 int left, int top, int width, int height, |
| 649 GrPixelConfig config, GrTransferBuffer* buffer, | 651 GrPixelConfig config, GrTransferBuffer* buffer, |
| 650 size_t offset, size_t rowBytes) { | 652 size_t offset, size_t rowBytes) { |
| 651 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture()); | 653 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture()); |
| 652 | 654 |
| 653 if (!check_write_and_transfer_input(glTex, surface, config)) { | 655 if (!check_write_and_transfer_input(glTex, surface, config)) { |
| 654 return false; | 656 return false; |
| 655 } | 657 } |
| 656 | 658 |
| 657 // For the moment, can't transfer compressed data | 659 // For the moment, can't transfer compressed data |
| 658 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { | 660 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { |
| 659 return false; | 661 return false; |
| 660 } | 662 } |
| 661 | 663 |
| 662 this->setScratchTextureUnit(); | 664 this->setScratchTextureUnit(); |
| 663 GL_CALL(BindTexture(glTex->target(), glTex->textureID())); | 665 GL_CALL(BindTexture(glTex->target(), glTex->textureID())); |
| 664 | 666 |
| 665 SkASSERT(!buffer->isMapped()); | 667 SkASSERT(!buffer->isMapped()); |
| 666 GrGLTransferBuffer* glBuffer = reinterpret_cast<GrGLTransferBuffer*>(buffer) ; | 668 GrGLTransferBuffer* glBuffer = reinterpret_cast<GrGLTransferBuffer*>(buffer) ; |
| 667 // bind the transfer buffer | 669 // bind the transfer buffer |
| 668 SkASSERT(GR_GL_PIXEL_UNPACK_BUFFER == glBuffer->bufferType() || | 670 SkASSERT(GR_GL_PIXEL_UNPACK_BUFFER == glBuffer->bufferType() || |
| 669 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == glBuffer->bufferType ()); | 671 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == glBuffer->bufferType ()); |
| 670 GL_CALL(BindBuffer(glBuffer->bufferType(), glBuffer->bufferID())); | 672 GL_CALL(BindBuffer(glBuffer->bufferType(), glBuffer->bufferID())); |
| 671 | 673 |
| 672 bool success = false; | 674 bool success = false; |
| 675 SkMipMapLevel mipLevel(buffer, rowBytes, width, height); | |
| 676 SkSTArray<1, SkMipMapLevel> texels; | |
| 677 texels.push_back(mipLevel); | |
| 673 success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_Uplo adType, | 678 success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_Uplo adType, |
| 674 left, top, width, height, config, buffer, rowB ytes); | 679 left, top, width, height, config, texels); |
| 675 | 680 |
| 676 if (success) { | 681 if (success) { |
| 677 glTex->texturePriv().dirtyMipMaps(true); | 682 glTex->texturePriv().dirtyMipMaps(true); |
| 678 return true; | 683 return true; |
| 679 } | 684 } |
| 680 | 685 |
| 681 return false; | 686 return false; |
| 682 } | 687 } |
| 683 | 688 |
| 684 // For GL_[UN]PACK_ALIGNMENT. | 689 // For GL_[UN]PACK_ALIGNMENT. |
| (...skipping 19 matching lines...) Expand all Loading... | |
| 704 | 709 |
| 705 static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc, | 710 static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc, |
| 706 const GrGLInterface* interface) { | 711 const GrGLInterface* interface) { |
| 707 if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) { | 712 if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) { |
| 708 return GR_GL_GET_ERROR(interface); | 713 return GR_GL_GET_ERROR(interface); |
| 709 } else { | 714 } else { |
| 710 return CHECK_ALLOC_ERROR(interface); | 715 return CHECK_ALLOC_ERROR(interface); |
| 711 } | 716 } |
| 712 } | 717 } |
| 713 | 718 |
| 719 /** | |
| 720 * Creates storage space for the texture and fills it with texels. | |
| 721 * | |
| 722 * @param desc The surface descriptor for the texture being created. | |
| 723 * @param interface The GL interface in use. | |
| 724 * @param useTexStorage The result of a call to can_use_tex_storage(). | |
| 725 * @param internalFormat The data format used for the internal storage of the te xture. | |
| 726 * @param externalFormat The data format used for the external storage of the te xture. | |
| 727 * @param externalType The type of the data used for the external storage of t he texture. | |
| 728 * @param texels The texel data of the texture being created. | |
| 729 * @param succeeded Set to true if allocating and populating the texture co mpleted | |
| 730 * without error. | |
| 731 */ | |
| 732 static void allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc , | |
| 733 const GrGLInterface& inte rface, | |
| 734 GrGLenum target, | |
| 735 GrGLenum internalFormat, | |
| 736 GrGLenum externalFormat, | |
| 737 GrGLenum externalType, | |
| 738 const SkTArray<SkMipMapLe vel>& texels, | |
| 739 bool* succeeded) { | |
| 740 CLEAR_ERROR_BEFORE_ALLOC(&interface); | |
| 741 *succeeded = true; | |
| 742 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLe vel++) { | |
| 743 const void* currentMipData = texels[currentMipLevel].fTexelsOrOffset; | |
| 744 // Even if curremtMipData is nullptr, continue to call TexImage2D. | |
| 745 // This will allocate texture memory which we can later populate. | |
| 746 GL_ALLOC_CALL(&interface, | |
| 747 TexImage2D(target, | |
| 748 currentMipLevel, | |
| 749 internalFormat, | |
| 750 texels[currentMipLevel].fWidth, | |
| 751 texels[currentMipLevel].fHeight, | |
| 752 0, // border | |
| 753 externalFormat, externalType, | |
| 754 currentMipData)); | |
| 755 GrGLenum error = check_alloc_error(desc, &interface); | |
| 756 if (error != GR_GL_NO_ERROR) { | |
| 757 *succeeded = false; | |
| 758 break; | |
| 759 } | |
| 760 } | |
| 761 } | |
| 762 | |
| 763 /** | |
| 764 * Creates storage space for the texture and fills it with texels. | |
| 765 * | |
| 766 * @param desc The surface descriptor for the texture being created. | |
| 767 * @param interface The GL interface in use. | |
| 768 * @param useTexStorage The result of a call to can_use_tex_storage(). | |
| 769 * @param internalFormat The data format used for the internal storage of the te xture. | |
| 770 * @param texels The texel data of the texture being created. | |
| 771 */ | |
| 772 static bool allocate_and_populate_compressed_texture(const GrSurfaceDesc& desc, | |
| 773 const GrGLInterface& interf ace, | |
| 774 GrGLenum target, GrGLenum i nternalFormat, | |
| 775 const SkTArray<SkMipMapLeve l>& texels) { | |
| 776 CLEAR_ERROR_BEFORE_ALLOC(&interface); | |
| 777 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLe vel++) { | |
| 778 int width = texels[currentMipLevel].fWidth; | |
| 779 int height = texels[currentMipLevel].fHeight; | |
| 780 | |
| 781 // Make sure that the width and height that we pass to OpenGL | |
| 782 // is a multiple of the block size. | |
| 783 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height ); | |
| 784 | |
| 785 GL_ALLOC_CALL(&interface, | |
| 786 CompressedTexImage2D(target, | |
| 787 currentMipLevel, | |
| 788 internalFormat, | |
| 789 width, | |
| 790 height, | |
| 791 0, // border | |
| 792 SkToInt(dataSize), | |
| 793 texels[currentMipLevel].fTexelsOrOffs et)); | |
| 794 | |
| 795 GrGLenum error = check_alloc_error(desc, &interface); | |
| 796 if (error != GR_GL_NO_ERROR) { | |
| 797 return false; | |
| 798 } | |
| 799 } | |
| 800 | |
| 801 return true; | |
| 802 } | |
| 803 | |
| 804 /** | |
| 805 * After a texture is created, any state which was altered during its creation | |
| 806 * needs to be restored. | |
| 807 * | |
| 808 * @param interface The GL interface to use. | |
| 809 * @param caps The capabilities of the GL device. | |
| 810 * @param restoreGLRowLength Should the row length unpacking be restored? | |
| 811 * @param glFlipY Did GL flip the texture vertically? | |
| 812 */ | |
| 813 static void restore_pixelstore_state(const GrGLInterface& interface, const GrGLC aps& caps, | |
| 814 bool restoreGLRowLength, bool glFlipY) { | |
| 815 if (restoreGLRowLength) { | |
| 816 SkASSERT(caps.unpackRowLengthSupport()); | |
| 817 GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); | |
| 818 } | |
| 819 if (glFlipY) { | |
| 820 GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); | |
| 821 } | |
| 822 } | |
| 823 | |
| 714 bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc, | 824 bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc, |
| 715 GrGLenum target, | 825 GrGLenum target, |
| 716 UploadType uploadType, | 826 UploadType uploadType, |
| 717 int left, int top, int width, int height, | 827 int left, int top, int width, int height, |
| 718 GrPixelConfig dataConfig, | 828 GrPixelConfig dataConfig, |
| 719 const void* dataOrOffset, | 829 const SkTArray<SkMipMapLevel>& texels) { |
| 720 size_t rowBytes) { | |
| 721 SkASSERT(dataOrOffset || kNewTexture_UploadType == uploadType || | |
| 722 kTransfer_UploadType == uploadType); | |
| 723 | |
| 724 // If we're uploading compressed data then we should be using uploadCompress edTexData | 830 // If we're uploading compressed data then we should be using uploadCompress edTexData |
| 725 SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); | 831 SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); |
| 726 | 832 |
| 727 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig)); | 833 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig)); |
| 728 | 834 |
| 835 // texels is const. | |
| 836 // But we may need to flip the texture vertically to prepare it. | |
| 837 // Rather than flip in place and alter the incoming data, | |
| 838 // we allocate a new buffer to flip into. | |
| 839 // This means we need to make a non-const shallow copy of texels. | |
| 840 SkTArray<SkMipMapLevel> texelsShallowCopy(texels); | |
| 841 | |
| 842 for (int currentMipLevel = texelsShallowCopy.count() - 1; currentMipLevel >= 0; | |
| 843 currentMipLevel--) { | |
| 844 SkASSERT(texelsShallowCopy[currentMipLevel].fTexelsOrOffset || | |
| 845 kNewTexture_UploadType == uploadType || kTransfer_UploadType == uploadType); | |
| 846 } | |
| 847 | |
| 848 | |
| 849 const GrGLInterface* interface = this->glInterface(); | |
| 850 const GrGLCaps& caps = this->glCaps(); | |
| 851 | |
| 729 size_t bpp = GrBytesPerPixel(dataConfig); | 852 size_t bpp = GrBytesPerPixel(dataConfig); |
| 730 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, & left, &top, | 853 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); c urrentMipLevel++) { |
| 731 &width, &height, &dataOrOffset, & rowBytes)) { | 854 if (texelsShallowCopy[currentMipLevel].fTexelsOrOffset == nullptr) { |
| 732 return false; | 855 continue; |
| 856 } | |
| 857 | |
| 858 if (texelsShallowCopy[currentMipLevel].fHeight > SK_MaxS32 || | |
| 859 texelsShallowCopy[currentMipLevel].fWidth > SK_MaxS32) { | |
| 860 return false; | |
| 861 } | |
| 862 int currentMipHeight = texelsShallowCopy[currentMipLevel].fHeight; | |
| 863 int currentMipWidth = texelsShallowCopy[currentMipLevel].fWidth; | |
| 864 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bp p, &left, &top, | |
| 865 ¤tMipWidth, | |
| 866 ¤tMipHeight, | |
| 867 &texelsShallowCopy[currentMipLeve l].fTexelsOrOffset, | |
| 868 &texelsShallowCopy[currentMipLeve l].fRowBytes)) { | |
| 869 return false; | |
| 870 } | |
| 871 if (currentMipWidth < 0 || currentMipHeight < 0) { | |
| 872 return false; | |
| 873 } | |
| 874 texelsShallowCopy[currentMipLevel].fWidth = currentMipWidth; | |
| 875 texelsShallowCopy[currentMipLevel].fHeight = currentMipHeight; | |
| 733 } | 876 } |
| 734 size_t trimRowBytes = width * bpp; | |
| 735 | |
| 736 // in case we need a temporary, trimmed copy of the src pixels | |
| 737 #if defined(GOOGLE3) | |
| 738 // Stack frame size is limited in GOOGLE3. | |
| 739 SkAutoSMalloc<64 * 128> tempStorage; | |
| 740 #else | |
| 741 SkAutoSMalloc<128 * 128> tempStorage; | |
| 742 #endif | |
| 743 | 877 |
| 744 // Internal format comes from the texture desc. | 878 // Internal format comes from the texture desc. |
| 745 GrGLenum internalFormat; | 879 GrGLenum internalFormat; |
| 746 // External format and type come from the upload data. | 880 // External format and type come from the upload data. |
| 747 GrGLenum externalFormat; | 881 GrGLenum externalFormat; |
| 748 GrGLenum externalType; | 882 GrGLenum externalType; |
| 749 if (!this->glCaps().getTexImageFormats(desc.fConfig, dataConfig, &internalFo rmat, | 883 if (!this->glCaps().getTexImageFormats(desc.fConfig, dataConfig, &internalFo rmat, |
| 750 &externalFormat, &externalType)) { | 884 &externalFormat, &externalType)) { |
| 751 return false; | 885 return false; |
| 752 } | 886 } |
| 753 /* | 887 /* |
| 754 * Check whether to allocate a temporary buffer for flipping y or | 888 * Check whether to allocate a temporary buffer for flipping y or |
| 755 * because our srcData has extra bytes past each row. If so, we need | 889 * because our srcData has extra bytes past each row. If so, we need |
| 756 * to trim those off here, since GL ES may not let us specify | 890 * to trim those off here, since GL ES may not let us specify |
| 757 * GL_UNPACK_ROW_LENGTH. | 891 * GL_UNPACK_ROW_LENGTH. |
| 758 */ | 892 */ |
| 759 bool restoreGLRowLength = false; | 893 bool restoreGLRowLength = false; |
| 760 bool swFlipY = false; | 894 bool swFlipY = false; |
| 761 bool glFlipY = false; | 895 bool glFlipY = false; |
| 762 if (dataOrOffset) { | 896 |
| 763 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { | 897 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { |
| 764 if (this->glCaps().unpackFlipYSupport()) { | 898 if (caps.unpackFlipYSupport()) { |
| 765 glFlipY = true; | 899 glFlipY = true; |
| 766 } else { | 900 } else { |
| 767 swFlipY = true; | 901 swFlipY = true; |
| 768 } | |
| 769 } | 902 } |
| 770 if (this->glCaps().unpackRowLengthSupport() && !swFlipY) { | 903 } |
| 904 | |
| 905 // in case we need a temporary, trimmed copy of the src pixels | |
| 906 SkAutoSMallocTexels tempStorage; | |
| 907 | |
| 908 // find the combined size of all the mip levels and the relative offset of | |
| 909 // each into the collective buffer | |
| 910 size_t combined_buffer_size = 0; | |
| 911 SkTArray<size_t> individual_mip_offsets(texelsShallowCopy.count()); | |
| 912 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); c urrentMipLevel++) { | |
| 913 const size_t trimmedSize = texels[currentMipLevel].fWidth * bpp * | |
| 914 texelsShallowCopy[currentMipLevel].fHeight; | |
| 915 individual_mip_offsets.push_back(combined_buffer_size); | |
| 916 combined_buffer_size += trimmedSize; | |
| 917 } | |
| 918 char* buffer = (char*)tempStorage.reset(combined_buffer_size); | |
| 919 | |
| 920 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); c urrentMipLevel++) { | |
| 921 if (texelsShallowCopy[currentMipLevel].fTexelsOrOffset == nullptr) { | |
| 922 continue; | |
| 923 } | |
| 924 | |
| 925 const size_t trimRowBytes = texelsShallowCopy[currentMipLevel].fWidth * bpp; | |
| 926 | |
| 927 /* | |
| 928 * check whether to allocate a temporary buffer for flipping y or | |
| 929 * because our srcData has extra bytes past each row. If so, we need | |
| 930 * to trim those off here, since GL ES may not let us specify | |
| 931 * GL_UNPACK_ROW_LENGTH. | |
| 932 */ | |
| 933 restoreGLRowLength = false; | |
| 934 | |
| 935 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes; | |
| 936 if (caps.unpackRowLengthSupport() && !swFlipY) { | |
| 771 // can't use this for flipping, only non-neg values allowed. :( | 937 // can't use this for flipping, only non-neg values allowed. :( |
| 772 if (rowBytes != trimRowBytes) { | 938 if (rowBytes != trimRowBytes) { |
| 773 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp); | 939 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp); |
| 774 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength)); | 940 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLe ngth)); |
| 775 restoreGLRowLength = true; | 941 restoreGLRowLength = true; |
| 776 } | 942 } |
| 777 } else if (kTransfer_UploadType != uploadType) { | 943 } else if (kTransfer_UploadType != uploadType) { |
| 778 if (trimRowBytes != rowBytes || swFlipY) { | 944 if (trimRowBytes != rowBytes || swFlipY) { |
| 945 const int height = texelsShallowCopy[currentMipLevel].fHeight; | |
| 779 // copy data into our new storage, skipping the trailing bytes | 946 // copy data into our new storage, skipping the trailing bytes |
| 780 size_t trimSize = height * trimRowBytes; | 947 const char* src = (const char*)texelsShallowCopy[currentMipLevel ].fTexelsOrOffset; |
| 781 const char* src = (const char*)dataOrOffset; | 948 if (swFlipY && height >= 1) { |
| 782 if (swFlipY) { | |
| 783 src += (height - 1) * rowBytes; | 949 src += (height - 1) * rowBytes; |
| 784 } | 950 } |
| 785 char* dst = (char*)tempStorage.reset(trimSize); | 951 char* dst = buffer + individual_mip_offsets[currentMipLevel]; |
| 786 for (int y = 0; y < height; y++) { | 952 for (int y = 0; y < height; y++) { |
| 787 memcpy(dst, src, trimRowBytes); | 953 memcpy(dst, src, trimRowBytes); |
| 788 if (swFlipY) { | 954 if (swFlipY) { |
| 789 src -= rowBytes; | 955 src -= rowBytes; |
| 790 } else { | 956 } else { |
| 791 src += rowBytes; | 957 src += rowBytes; |
| 792 } | 958 } |
| 793 dst += trimRowBytes; | 959 dst += trimRowBytes; |
| 794 } | 960 } |
| 795 // now point data to our copied version | 961 // now point data to our copied version |
| 796 dataOrOffset = tempStorage.get(); | 962 texelsShallowCopy[currentMipLevel] = |
| 963 SkMipMapLevel(buffer + individual_mip_offsets[currentMipLeve l], | |
| 964 trimRowBytes, | |
| 965 texelsShallowCopy[currentMipLevel].fWidth, | |
| 966 texelsShallowCopy[currentMipLevel].fHeight); | |
| 797 } | 967 } |
| 798 } else { | 968 } else { |
| 799 return false; | 969 return false; |
| 800 } | 970 } |
| 801 if (glFlipY) { | 971 if (glFlipY) { |
| 802 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE)); | 972 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE)); |
| 803 } | 973 } |
| 804 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(dataConfig) )); | 974 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ALIGNMENT, |
| 975 config_alignment(desc.fConfig))); | |
| 805 } | 976 } |
| 977 | |
| 806 bool succeeded = true; | 978 bool succeeded = true; |
| 807 if (kNewTexture_UploadType == uploadType) { | 979 if (kNewTexture_UploadType == uploadType && |
| 808 if (dataOrOffset && | 980 0 == left && 0 == top && |
| 809 !(0 == left && 0 == top && desc.fWidth == width && desc.fHeight == h eight)) { | 981 desc.fWidth == width && desc.fHeight == height) { |
| 810 succeeded = false; | 982 allocate_and_populate_uncompressed_texture(desc, *interface, target, int ernalFormat, |
| 811 } else { | 983 externalFormat, externalType, texelsShallowCopy, |
| 812 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); | 984 &succeeded); |
| 813 GL_ALLOC_CALL(this->glInterface(), TexImage2D(target, 0, internalFor mat, desc.fWidth, | |
| 814 desc.fHeight, 0, exter nalFormat, | |
| 815 externalType, dataOrOf fset)); | |
| 816 GrGLenum error = check_alloc_error(desc, this->glInterface()); | |
| 817 if (error != GR_GL_NO_ERROR) { | |
| 818 succeeded = false; | |
| 819 } | |
| 820 } | |
| 821 } else { | 985 } else { |
| 822 if (swFlipY || glFlipY) { | 986 if (swFlipY || glFlipY) { |
| 823 top = desc.fHeight - (top + height); | 987 top = desc.fHeight - (top + height); |
| 824 } | 988 } |
| 825 GL_CALL(TexSubImage2D(target, | 989 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count( ); |
| 826 0, // level | 990 currentMipLevel++) { |
| 827 left, top, | 991 if (texelsShallowCopy[currentMipLevel].fTexelsOrOffset == nullptr) { |
| 828 width, height, | 992 continue; |
| 829 externalFormat, externalType, dataOrOffset)); | 993 } |
| 994 | |
| 995 GL_CALL(TexSubImage2D(target, | |
| 996 currentMipLevel, | |
| 997 left, top, | |
| 998 texelsShallowCopy[currentMipLevel].fWidth, | |
| 999 texelsShallowCopy[currentMipLevel].fHeight, | |
| 1000 externalFormat, externalType, | |
| 1001 texelsShallowCopy[currentMipLevel].fTexelsOrOf fset)); | |
| 1002 } | |
| 830 } | 1003 } |
| 831 | 1004 |
| 832 if (restoreGLRowLength) { | 1005 restore_pixelstore_state(*interface, caps, restoreGLRowLength, glFlipY); |
| 833 SkASSERT(this->glCaps().unpackRowLengthSupport()); | 1006 |
| 834 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); | |
| 835 } | |
| 836 if (glFlipY) { | |
| 837 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); | |
| 838 } | |
| 839 return succeeded; | 1007 return succeeded; |
| 840 } | 1008 } |
| 841 | 1009 |
| 842 // TODO: This function is using a lot of wonky semantics like, if width == -1 | 1010 // TODO: This function is using a lot of wonky semantics like, if width == -1 |
| 843 // then set width = desc.fWdith ... blah. A better way to do it might be to | 1011 // then set width = desc.fWdith ... blah. A better way to do it might be to |
| 844 // create a CompressedTexData struct that takes a desc/ptr and figures out | 1012 // create a CompressedTexData struct that takes a desc/ptr and figures out |
| 845 // the proper upload semantics. Then users can construct this function how they | 1013 // the proper upload semantics. Then users can construct this function how they |
| 846 // see fit if they want to go against the "standard" way to do it. | 1014 // see fit if they want to go against the "standard" way to do it. |
| 847 bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc, | 1015 bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc, |
| 848 GrGLenum target, | 1016 GrGLenum target, |
| 849 const void* data, | 1017 const SkTArray<SkMipMapLevel>& texels, |
| 850 UploadType uploadType, | 1018 UploadType uploadType, |
| 851 int left, int top, int width, int height) { | 1019 int left, int top, int width, int height) { |
| 852 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig)); | 1020 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig)); |
| 853 SkASSERT(kTransfer_UploadType != uploadType && | 1021 SkASSERT(kTransfer_UploadType != uploadType && |
| 854 (data || kNewTexture_UploadType != uploadType)); | 1022 (texels[0].fTexelsOrOffset || kNewTexture_UploadType != uploadType) ); |
| 855 | 1023 |
| 856 // No support for software flip y, yet... | 1024 // No support for software flip y, yet... |
| 857 SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin); | 1025 SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin); |
| 858 | 1026 |
| 1027 const GrGLInterface* interface = this->glInterface(); | |
| 1028 const GrGLCaps& caps = this->glCaps(); | |
| 1029 | |
| 859 if (-1 == width) { | 1030 if (-1 == width) { |
| 860 width = desc.fWidth; | 1031 width = desc.fWidth; |
| 861 } | 1032 } |
| 862 #ifdef SK_DEBUG | 1033 #ifdef SK_DEBUG |
| 863 else { | 1034 else { |
| 864 SkASSERT(width <= desc.fWidth); | 1035 SkASSERT(width <= desc.fWidth); |
| 865 } | 1036 } |
| 866 #endif | 1037 #endif |
| 867 | 1038 |
| 868 if (-1 == height) { | 1039 if (-1 == height) { |
| 869 height = desc.fHeight; | 1040 height = desc.fHeight; |
| 870 } | 1041 } |
| 871 #ifdef SK_DEBUG | 1042 #ifdef SK_DEBUG |
| 872 else { | 1043 else { |
| 873 SkASSERT(height <= desc.fHeight); | 1044 SkASSERT(height <= desc.fHeight); |
| 874 } | 1045 } |
| 875 #endif | 1046 #endif |
| 876 | 1047 |
| 877 // Make sure that the width and height that we pass to OpenGL | |
| 878 // is a multiple of the block size. | |
| 879 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height); | |
| 880 | |
| 881 // We only need the internal format for compressed 2D textures. | 1048 // We only need the internal format for compressed 2D textures. |
| 882 GrGLenum internalFormat; | 1049 GrGLenum internalFormat; |
| 883 if (!this->glCaps().getCompressedTexImageFormats(desc.fConfig, &internalForm at)) { | 1050 if (!caps.getCompressedTexImageFormats(desc.fConfig, &internalFormat)) { |
| 884 return false; | 1051 return false; |
| 885 } | 1052 } |
| 886 | 1053 |
| 887 if (kNewTexture_UploadType == uploadType) { | 1054 if (kNewTexture_UploadType == uploadType) { |
| 888 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); | 1055 return allocate_and_populate_compressed_texture(desc, *interface, target , internalFormat, |
| 889 GL_ALLOC_CALL(this->glInterface(), | 1056 texels); |
| 890 CompressedTexImage2D(target, | |
| 891 0, // level | |
| 892 internalFormat, | |
| 893 width, height, | |
| 894 0, // border | |
| 895 SkToInt(dataSize), | |
| 896 data)); | |
| 897 GrGLenum error = check_alloc_error(desc, this->glInterface()); | |
| 898 if (error != GR_GL_NO_ERROR) { | |
| 899 return false; | |
| 900 } | |
| 901 } else { | 1057 } else { |
| 902 // Paletted textures can't be updated. | 1058 // Paletted textures can't be updated. |
| 903 if (GR_GL_PALETTE8_RGBA8 == internalFormat) { | 1059 if (GR_GL_PALETTE8_RGBA8 == internalFormat) { |
| 904 return false; | 1060 return false; |
| 905 } | 1061 } |
| 906 GL_CALL(CompressedTexSubImage2D(target, | 1062 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentM ipLevel++) { |
| 907 0, // level | 1063 if (texels[currentMipLevel].fTexelsOrOffset == nullptr) { |
| 908 left, top, | 1064 continue; |
| 909 width, height, | 1065 } |
| 910 internalFormat, | 1066 |
| 911 SkToInt(dataSize), | 1067 // Make sure that the width and height that we pass to OpenGL |
| 912 data)); | 1068 // is a multiple of the block size. |
| 1069 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, | |
| 1070 texels[currentMipLevel] .fWidth, | |
| 1071 texels[currentMipLevel] .fHeight); | |
| 1072 GL_CALL(CompressedTexSubImage2D(target, | |
| 1073 currentMipLevel, | |
| 1074 left, top, | |
| 1075 texels[currentMipLevel].fWidth, | |
| 1076 texels[currentMipLevel].fHeight, | |
| 1077 internalFormat, | |
| 1078 dataSize, | |
| 1079 texels[currentMipLevel].fTexelsOrOff set)); | |
| 1080 } | |
| 913 } | 1081 } |
| 914 | 1082 |
| 915 return true; | 1083 return true; |
| 916 } | 1084 } |
| 917 | 1085 |
| 918 static bool renderbuffer_storage_msaa(const GrGLContext& ctx, | 1086 static bool renderbuffer_storage_msaa(const GrGLContext& ctx, |
| 919 int sampleCount, | 1087 int sampleCount, |
| 920 GrGLenum format, | 1088 GrGLenum format, |
| 921 int width, int height) { | 1089 int width, int height) { |
| 922 CLEAR_ERROR_BEFORE_ALLOC(ctx.interface()); | 1090 CLEAR_ERROR_BEFORE_ALLOC(ctx.interface()); |
| (...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1066 // SkDEBUGFAIL("null texture"); | 1234 // SkDEBUGFAIL("null texture"); |
| 1067 return nullptr; | 1235 return nullptr; |
| 1068 } | 1236 } |
| 1069 | 1237 |
| 1070 #if 0 && defined(SK_DEBUG) | 1238 #if 0 && defined(SK_DEBUG) |
| 1071 static size_t as_size_t(int x) { | 1239 static size_t as_size_t(int x) { |
| 1072 return x; | 1240 return x; |
| 1073 } | 1241 } |
| 1074 #endif | 1242 #endif |
| 1075 | 1243 |
| 1244 static GrGLTexture::IDDesc generate_gl_texture(const GrGLInterface* interface, | |
| 1245 GrGpuResource::LifeCycle lifeCycl e) { | |
| 1246 GrGLTexture::IDDesc idDesc; | |
| 1247 idDesc.fInfo.fID = 0; | |
| 1248 GR_GL_CALL(interface, GenTextures(1, &idDesc.fInfo.fID)); | |
| 1249 idDesc.fLifeCycle = lifeCycle; | |
| 1250 // We only support GL_TEXTURE_2D at the moment. | |
| 1251 idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D; | |
| 1252 return idDesc; | |
| 1253 } | |
| 1254 | |
| 1255 static GrGLTexture::TexParams set_initial_texture_params(const GrGLInterface* in terface, | |
| 1256 GrGLTexture::IDDesc idD esc) { | |
| 1257 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some | |
| 1258 // drivers have a bug where an FBO won't be complete if it includes a | |
| 1259 // texture that is not mipmap complete (considering the filter in use). | |
| 1260 GrGLTexture::TexParams initialTexParams; | |
| 1261 // we only set a subset here so invalidate first | |
| 1262 initialTexParams.invalidate(); | |
| 1263 initialTexParams.fMinFilter = GR_GL_NEAREST; | |
| 1264 initialTexParams.fMagFilter = GR_GL_NEAREST; | |
| 1265 initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE; | |
| 1266 initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE; | |
| 1267 GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget, | |
| 1268 GR_GL_TEXTURE_MAG_FILTER, | |
| 1269 initialTexParams.fMagFilter)); | |
| 1270 GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget, | |
| 1271 GR_GL_TEXTURE_MIN_FILTER, | |
| 1272 initialTexParams.fMinFilter)); | |
| 1273 GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget, | |
| 1274 GR_GL_TEXTURE_WRAP_S, | |
| 1275 initialTexParams.fWrapS)); | |
| 1276 GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget, | |
| 1277 GR_GL_TEXTURE_WRAP_T, | |
| 1278 initialTexParams.fWrapT)); | |
| 1279 return initialTexParams; | |
| 1280 } | |
| 1281 | |
| 1076 GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc, | 1282 GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc, |
| 1077 GrGpuResource::LifeCycle lifeCycle, | 1283 GrGpuResource::LifeCycle lifeCycle, |
| 1078 const void* srcData, size_t rowBytes) { | 1284 const SkTArray<SkMipMapLevel>& texels) { |
| 1079 // We fail if the MSAA was requested and is not available. | 1285 // We fail if the MSAA was requested and is not available. |
| 1080 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleC nt) { | 1286 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleC nt) { |
| 1081 //SkDebugf("MSAA RT requested but not supported on this platform."); | 1287 //SkDebugf("MSAA RT requested but not supported on this platform."); |
| 1082 return return_null_texture(); | 1288 return return_null_texture(); |
| 1083 } | 1289 } |
| 1084 | 1290 |
| 1085 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); | 1291 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); |
| 1086 | 1292 |
| 1087 GrGLTexture::IDDesc idDesc; | 1293 GrGLTexture::IDDesc idDesc = generate_gl_texture(this->glInterface(), lifeCy cle); |
| 1088 idDesc.fInfo.fID = 0; | |
| 1089 GL_CALL(GenTextures(1, &idDesc.fInfo.fID)); | |
| 1090 idDesc.fLifeCycle = lifeCycle; | |
| 1091 // We only support GL_TEXTURE_2D at the moment. | |
| 1092 idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D; | |
| 1093 | |
| 1094 if (!idDesc.fInfo.fID) { | 1294 if (!idDesc.fInfo.fID) { |
| 1095 return return_null_texture(); | 1295 return return_null_texture(); |
| 1096 } | 1296 } |
| 1097 | 1297 |
| 1098 this->setScratchTextureUnit(); | 1298 this->setScratchTextureUnit(); |
| 1099 GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID)); | 1299 GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID)); |
| 1100 | 1300 |
| 1101 if (renderTarget && this->glCaps().textureUsageSupport()) { | 1301 if (renderTarget && this->glCaps().textureUsageSupport()) { |
| 1102 // provides a hint about how this texture will be used | 1302 // provides a hint about how this texture will be used |
| 1103 GL_CALL(TexParameteri(idDesc.fInfo.fTarget, | 1303 GL_CALL(TexParameteri(idDesc.fInfo.fTarget, |
| 1104 GR_GL_TEXTURE_USAGE, | 1304 GR_GL_TEXTURE_USAGE, |
| 1105 GR_GL_FRAMEBUFFER_ATTACHMENT)); | 1305 GR_GL_FRAMEBUFFER_ATTACHMENT)); |
| 1106 } | 1306 } |
| 1107 | 1307 |
| 1108 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some | 1308 GrGLTexture::TexParams initialTexParams = set_initial_texture_params(this->g lInterface(), |
| 1109 // drivers have a bug where an FBO won't be complete if it includes a | 1309 idDesc) ; |
| 1110 // texture that is not mipmap complete (considering the filter in use). | 1310 |
| 1111 GrGLTexture::TexParams initialTexParams; | |
| 1112 // we only set a subset here so invalidate first | |
| 1113 initialTexParams.invalidate(); | |
| 1114 initialTexParams.fMinFilter = GR_GL_NEAREST; | |
| 1115 initialTexParams.fMagFilter = GR_GL_NEAREST; | |
| 1116 initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE; | |
| 1117 initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE; | |
| 1118 GL_CALL(TexParameteri(idDesc.fInfo.fTarget, | |
| 1119 GR_GL_TEXTURE_MAG_FILTER, | |
| 1120 initialTexParams.fMagFilter)); | |
| 1121 GL_CALL(TexParameteri(idDesc.fInfo.fTarget, | |
| 1122 GR_GL_TEXTURE_MIN_FILTER, | |
| 1123 initialTexParams.fMinFilter)); | |
| 1124 GL_CALL(TexParameteri(idDesc.fInfo.fTarget, | |
| 1125 GR_GL_TEXTURE_WRAP_S, | |
| 1126 initialTexParams.fWrapS)); | |
| 1127 GL_CALL(TexParameteri(idDesc.fInfo.fTarget, | |
| 1128 GR_GL_TEXTURE_WRAP_T, | |
| 1129 initialTexParams.fWrapT)); | |
| 1130 if (!this->uploadTexData(desc, idDesc.fInfo.fTarget, kNewTexture_UploadType, 0, 0, | 1311 if (!this->uploadTexData(desc, idDesc.fInfo.fTarget, kNewTexture_UploadType, 0, 0, |
| 1131 desc.fWidth, desc.fHeight, | 1312 desc.fWidth, desc.fHeight, |
| 1132 desc.fConfig, srcData, rowBytes)) { | 1313 desc.fConfig, texels)) { |
| 1133 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); | 1314 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); |
| 1134 return return_null_texture(); | 1315 return return_null_texture(); |
| 1135 } | 1316 } |
| 1136 | 1317 |
| 1137 GrGLTexture* tex; | 1318 GrGLTexture* tex; |
| 1138 if (renderTarget) { | 1319 if (renderTarget) { |
| 1139 // unbind the texture from the texture unit before binding it to the fra me buffer | 1320 // unbind the texture from the texture unit before binding it to the fra me buffer |
| 1140 GL_CALL(BindTexture(idDesc.fInfo.fTarget, 0)); | 1321 GL_CALL(BindTexture(idDesc.fInfo.fTarget, 0)); |
| 1141 GrGLRenderTarget::IDDesc rtIDDesc; | 1322 GrGLRenderTarget::IDDesc rtIDDesc; |
| 1142 | 1323 |
| 1143 if (!this->createRenderTargetObjects(desc, lifeCycle, idDesc.fInfo, &rtI DDesc)) { | 1324 if (!this->createRenderTargetObjects(desc, lifeCycle, idDesc.fInfo, &rtI DDesc)) { |
| 1144 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); | 1325 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); |
| 1145 return return_null_texture(); | 1326 return return_null_texture(); |
| 1146 } | 1327 } |
| 1147 tex = new GrGLTextureRenderTarget(this, desc, idDesc, rtIDDesc); | 1328 tex = new GrGLTextureRenderTarget(this, desc, idDesc, rtIDDesc); |
| 1148 } else { | 1329 } else { |
| 1149 tex = new GrGLTexture(this, desc, idDesc); | 1330 bool wasMipMapDataProvided = false; |
| 1331 if (texels.count() > 1) { | |
| 1332 wasMipMapDataProvided = true; | |
| 1333 } | |
| 1334 tex = new GrGLTexture(this, desc, idDesc, wasMipMapDataProvided); | |
| 1150 } | 1335 } |
| 1151 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); | 1336 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); |
| 1152 #ifdef TRACE_TEXTURE_CREATION | 1337 #ifdef TRACE_TEXTURE_CREATION |
| 1153 SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n", | 1338 SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n", |
| 1154 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig); | 1339 glTexDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig); |
| 1155 #endif | 1340 #endif |
| 1156 return tex; | 1341 return tex; |
| 1157 } | 1342 } |
| 1158 | 1343 |
| 1159 GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc, | 1344 GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc, |
| 1160 GrGpuResource::LifeCycle lifeCycle , | 1345 GrGpuResource::LifeCycle lifeCycle , |
| 1161 const void* srcData) { | 1346 const SkTArray<SkMipMapLevel>& tex els) { |
| 1162 // Make sure that we're not flipping Y. | 1347 // Make sure that we're not flipping Y. |
| 1163 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { | 1348 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { |
| 1164 return return_null_texture(); | 1349 return return_null_texture(); |
| 1165 } | 1350 } |
| 1166 | 1351 |
| 1167 GrGLTexture::IDDesc idDesc; | 1352 GrGLTexture::IDDesc idDesc = generate_gl_texture(this->glInterface(), lifeCy cle); |
| 1168 idDesc.fInfo.fID = 0; | |
| 1169 GL_CALL(GenTextures(1, &idDesc.fInfo.fID)); | |
| 1170 idDesc.fLifeCycle = lifeCycle; | |
| 1171 // We only support GL_TEXTURE_2D at the moment. | |
| 1172 idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D; | |
| 1173 | |
| 1174 if (!idDesc.fInfo.fID) { | 1353 if (!idDesc.fInfo.fID) { |
| 1175 return return_null_texture(); | 1354 return return_null_texture(); |
| 1176 } | 1355 } |
| 1177 | 1356 |
| 1178 this->setScratchTextureUnit(); | 1357 this->setScratchTextureUnit(); |
| 1179 GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID)); | 1358 GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID)); |
| 1180 | 1359 |
| 1181 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some | 1360 GrGLTexture::TexParams initialTexParams = set_initial_texture_params(this->g lInterface(), |
| 1182 // drivers have a bug where an FBO won't be complete if it includes a | 1361 idDesc) ; |
| 1183 // texture that is not mipmap complete (considering the filter in use). | |
| 1184 GrGLTexture::TexParams initialTexParams; | |
| 1185 // we only set a subset here so invalidate first | |
| 1186 initialTexParams.invalidate(); | |
| 1187 initialTexParams.fMinFilter = GR_GL_NEAREST; | |
| 1188 initialTexParams.fMagFilter = GR_GL_NEAREST; | |
| 1189 initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE; | |
| 1190 initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE; | |
| 1191 GL_CALL(TexParameteri(idDesc.fInfo.fTarget, | |
| 1192 GR_GL_TEXTURE_MAG_FILTER, | |
| 1193 initialTexParams.fMagFilter)); | |
| 1194 GL_CALL(TexParameteri(idDesc.fInfo.fTarget, | |
| 1195 GR_GL_TEXTURE_MIN_FILTER, | |
| 1196 initialTexParams.fMinFilter)); | |
| 1197 GL_CALL(TexParameteri(idDesc.fInfo.fTarget, | |
| 1198 GR_GL_TEXTURE_WRAP_S, | |
| 1199 initialTexParams.fWrapS)); | |
| 1200 GL_CALL(TexParameteri(idDesc.fInfo.fTarget, | |
| 1201 GR_GL_TEXTURE_WRAP_T, | |
| 1202 initialTexParams.fWrapT)); | |
| 1203 | 1362 |
| 1204 if (!this->uploadCompressedTexData(desc, idDesc.fInfo.fTarget, srcData)) { | 1363 if (!this->uploadCompressedTexData(desc, idDesc.fInfo.fTarget, texels)) { |
| 1205 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); | 1364 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); |
| 1206 return return_null_texture(); | 1365 return return_null_texture(); |
| 1207 } | 1366 } |
| 1208 | 1367 |
| 1209 GrGLTexture* tex; | 1368 GrGLTexture* tex; |
| 1210 tex = new GrGLTexture(this, desc, idDesc); | 1369 tex = new GrGLTexture(this, desc, idDesc); |
| 1211 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); | 1370 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); |
| 1212 #ifdef TRACE_TEXTURE_CREATION | 1371 #ifdef TRACE_TEXTURE_CREATION |
| 1213 SkDebugf("--- new compressed texture [%d] size=(%d %d) config=%d\n", | 1372 SkDebugf("--- new compressed texture [%d] size=(%d %d) config=%d\n", |
| 1214 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig); | 1373 glTexDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig); |
| 1215 #endif | 1374 #endif |
| 1216 return tex; | 1375 return tex; |
| 1217 } | 1376 } |
| 1218 | 1377 |
| 1219 namespace { | 1378 namespace { |
| 1220 | 1379 |
| 1221 const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount; | 1380 const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount; |
| 1222 | 1381 |
| 1223 void inline get_stencil_rb_sizes(const GrGLInterface* gl, | 1382 void inline get_stencil_rb_sizes(const GrGLInterface* gl, |
| 1224 GrGLStencilAttachment::Format* format) { | 1383 GrGLStencilAttachment::Format* format) { |
| (...skipping 1329 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2554 | 2713 |
| 2555 if (GrTextureParams::kMipMap_FilterMode == filterMode) { | 2714 if (GrTextureParams::kMipMap_FilterMode == filterMode) { |
| 2556 if (!this->caps()->mipMapSupport() || GrPixelConfigIsCompressed(texture- >config())) { | 2715 if (!this->caps()->mipMapSupport() || GrPixelConfigIsCompressed(texture- >config())) { |
| 2557 filterMode = GrTextureParams::kBilerp_FilterMode; | 2716 filterMode = GrTextureParams::kBilerp_FilterMode; |
| 2558 } | 2717 } |
| 2559 } | 2718 } |
| 2560 | 2719 |
| 2561 newTexParams.fMinFilter = glMinFilterModes[filterMode]; | 2720 newTexParams.fMinFilter = glMinFilterModes[filterMode]; |
| 2562 newTexParams.fMagFilter = glMagFilterModes[filterMode]; | 2721 newTexParams.fMagFilter = glMagFilterModes[filterMode]; |
| 2563 | 2722 |
| 2564 if (GrTextureParams::kMipMap_FilterMode == filterMode && | 2723 if (GrTextureParams::kMipMap_FilterMode == filterMode) { |
| 2565 texture->texturePriv().mipMapsAreDirty()) { | 2724 if (texture->texturePriv().mipMapsAreDirty()) { |
| 2566 GL_CALL(GenerateMipmap(target)); | 2725 GL_CALL(GenerateMipmap(target)); |
|
bsalomon
2016/01/14 13:57:23
Should we set the mip count here, if not already s
Chris Blume
2016/01/20 06:15:08
Done.
| |
| 2567 texture->texturePriv().dirtyMipMaps(false); | 2726 texture->texturePriv().dirtyMipMaps(false); |
| 2727 } | |
| 2728 if (texture->texturePriv().hasMipMaps()) { | |
| 2729 if (texture->texturePriv().isMaxMipMapLevelSpecified()) { | |
| 2730 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_LOD, 0)); | |
| 2731 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL, 0)); | |
| 2732 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LOD, | |
| 2733 static_cast<float>(texture->texturePriv(). maxMipMapLevel()))); | |
| 2734 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL, | |
| 2735 texture->texturePriv().maxMipMapLevel())); | |
| 2736 } | |
| 2737 } | |
| 2568 } | 2738 } |
| 2569 | 2739 |
| 2570 newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX()); | 2740 newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX()); |
| 2571 newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY()); | 2741 newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY()); |
| 2572 get_tex_param_swizzle(texture->config(), this->glCaps(), newTexParams.fSwizz leRGBA); | 2742 get_tex_param_swizzle(texture->config(), this->glCaps(), newTexParams.fSwizz leRGBA); |
| 2573 if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) { | 2743 if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) { |
| 2574 this->setTextureUnit(unitIdx); | 2744 this->setTextureUnit(unitIdx); |
| 2575 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newTexParams.fMa gFilter)); | 2745 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newTexParams.fMa gFilter)); |
| 2576 } | 2746 } |
| 2577 if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) { | 2747 if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) { |
| (...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2763 viewport->fWidth = surface->width(); | 2933 viewport->fWidth = surface->width(); |
| 2764 viewport->fHeight = surface->height(); | 2934 viewport->fHeight = surface->height(); |
| 2765 } else { | 2935 } else { |
| 2766 fStats.incRenderTargetBinds(); | 2936 fStats.incRenderTargetBinds(); |
| 2767 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, rt->renderFBO ID())); | 2937 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, rt->renderFBO ID())); |
| 2768 *viewport = rt->getViewport(); | 2938 *viewport = rt->getViewport(); |
| 2769 } | 2939 } |
| 2770 } | 2940 } |
| 2771 | 2941 |
| 2772 void GrGLGpu::unbindTextureFBOForCopy(GrGLenum fboTarget, GrSurface* surface) { | 2942 void GrGLGpu::unbindTextureFBOForCopy(GrGLenum fboTarget, GrSurface* surface) { |
| 2773 // bindSurfaceFBOForCopy temporarily binds textures that are not render targ ets to | 2943 // bindSurfaceFBOForCopy temporarily binds textures that are not render targ ets to |
| 2774 if (!surface->asRenderTarget()) { | 2944 if (!surface->asRenderTarget()) { |
| 2775 SkASSERT(surface->asTexture()); | 2945 SkASSERT(surface->asTexture()); |
| 2776 GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture()) ->target(); | 2946 GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture()) ->target(); |
| 2777 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget, | 2947 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget, |
| 2778 GR_GL_COLOR_ATTACHM ENT0, | 2948 GR_GL_COLOR_ATTACHM ENT0, |
| 2779 textureTarget, | 2949 textureTarget, |
| 2780 0, | 2950 0, |
| 2781 0)); | 2951 0)); |
| 2782 } | 2952 } |
| 2783 } | 2953 } |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2847 // None of our copy methods can handle a swizzle. TODO: Make copySurfaceAsDr aw handle the | 3017 // None of our copy methods can handle a swizzle. TODO: Make copySurfaceAsDr aw handle the |
| 2848 // swizzle. | 3018 // swizzle. |
| 2849 if (this->glCaps().glslCaps()->configOutputSwizzle(src->config()) != | 3019 if (this->glCaps().glslCaps()->configOutputSwizzle(src->config()) != |
| 2850 this->glCaps().glslCaps()->configOutputSwizzle(dst->config())) { | 3020 this->glCaps().glslCaps()->configOutputSwizzle(dst->config())) { |
| 2851 return false; | 3021 return false; |
| 2852 } | 3022 } |
| 2853 if (src->asTexture() && dst->asRenderTarget()) { | 3023 if (src->asTexture() && dst->asRenderTarget()) { |
| 2854 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint); | 3024 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint); |
| 2855 return true; | 3025 return true; |
| 2856 } | 3026 } |
| 2857 | 3027 |
| 2858 if (can_copy_texsubimage(dst, src, this)) { | 3028 if (can_copy_texsubimage(dst, src, this)) { |
| 2859 this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstPoint); | 3029 this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstPoint); |
| 2860 return true; | 3030 return true; |
| 2861 } | 3031 } |
| 2862 | 3032 |
| 2863 if (can_blit_framebuffer(dst, src, this)) { | 3033 if (can_blit_framebuffer(dst, src, this)) { |
| 2864 return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstPoint); | 3034 return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstPoint); |
| 2865 } | 3035 } |
| 2866 | 3036 |
| 2867 return false; | 3037 return false; |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 2890 | 3060 |
| 2891 SkString vshaderTxt(version); | 3061 SkString vshaderTxt(version); |
| 2892 aVertex.appendDecl(this->glCaps().glslCaps(), &vshaderTxt); | 3062 aVertex.appendDecl(this->glCaps().glslCaps(), &vshaderTxt); |
| 2893 vshaderTxt.append(";"); | 3063 vshaderTxt.append(";"); |
| 2894 uTexCoordXform.appendDecl(this->glCaps().glslCaps(), &vshaderTxt); | 3064 uTexCoordXform.appendDecl(this->glCaps().glslCaps(), &vshaderTxt); |
| 2895 vshaderTxt.append(";"); | 3065 vshaderTxt.append(";"); |
| 2896 uPosXform.appendDecl(this->glCaps().glslCaps(), &vshaderTxt); | 3066 uPosXform.appendDecl(this->glCaps().glslCaps(), &vshaderTxt); |
| 2897 vshaderTxt.append(";"); | 3067 vshaderTxt.append(";"); |
| 2898 vTexCoord.appendDecl(this->glCaps().glslCaps(), &vshaderTxt); | 3068 vTexCoord.appendDecl(this->glCaps().glslCaps(), &vshaderTxt); |
| 2899 vshaderTxt.append(";"); | 3069 vshaderTxt.append(";"); |
| 2900 | 3070 |
| 2901 vshaderTxt.append( | 3071 vshaderTxt.append( |
| 2902 "// Copy Program VS\n" | 3072 "// Copy Program VS\n" |
| 2903 "void main() {" | 3073 "void main() {" |
| 2904 " v_texCoord = a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.z w;" | 3074 " v_texCoord = a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.z w;" |
| 2905 " gl_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;" | 3075 " gl_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;" |
| 2906 " gl_Position.zw = vec2(0, 1);" | 3076 " gl_Position.zw = vec2(0, 1);" |
| 2907 "}" | 3077 "}" |
| 2908 ); | 3078 ); |
| 2909 | 3079 |
| 2910 SkString fshaderTxt(version); | 3080 SkString fshaderTxt(version); |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 2929 fsOutName = "gl_FragColor"; | 3099 fsOutName = "gl_FragColor"; |
| 2930 } | 3100 } |
| 2931 fshaderTxt.appendf( | 3101 fshaderTxt.appendf( |
| 2932 "// Copy Program FS\n" | 3102 "// Copy Program FS\n" |
| 2933 "void main() {" | 3103 "void main() {" |
| 2934 " %s = %s(u_texture, v_texCoord);" | 3104 " %s = %s(u_texture, v_texCoord);" |
| 2935 "}", | 3105 "}", |
| 2936 fsOutName, | 3106 fsOutName, |
| 2937 GrGLSLTexture2DFunctionName(kVec2f_GrSLType, this->glslGeneration()) | 3107 GrGLSLTexture2DFunctionName(kVec2f_GrSLType, this->glslGeneration()) |
| 2938 ); | 3108 ); |
| 2939 | 3109 |
| 2940 GL_CALL_RET(fCopyPrograms[i].fProgram, CreateProgram()); | 3110 GL_CALL_RET(fCopyPrograms[i].fProgram, CreateProgram()); |
| 2941 const char* str; | 3111 const char* str; |
| 2942 GrGLint length; | 3112 GrGLint length; |
| 2943 | 3113 |
| 2944 str = vshaderTxt.c_str(); | 3114 str = vshaderTxt.c_str(); |
| 2945 length = SkToInt(vshaderTxt.size()); | 3115 length = SkToInt(vshaderTxt.size()); |
| 2946 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms [i].fProgram, | 3116 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms [i].fProgram, |
| 2947 GR_GL_VERTEX_SHADER, &str, &length, 1, | 3117 GR_GL_VERTEX_SHADER, &str, &length, 1, |
| 2948 &fStats); | 3118 &fStats); |
| 2949 | 3119 |
| (...skipping 507 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3457 this->setVertexArrayID(gpu, 0); | 3627 this->setVertexArrayID(gpu, 0); |
| 3458 } | 3628 } |
| 3459 int attrCount = gpu->glCaps().maxVertexAttributes(); | 3629 int attrCount = gpu->glCaps().maxVertexAttributes(); |
| 3460 if (fDefaultVertexArrayAttribState.count() != attrCount) { | 3630 if (fDefaultVertexArrayAttribState.count() != attrCount) { |
| 3461 fDefaultVertexArrayAttribState.resize(attrCount); | 3631 fDefaultVertexArrayAttribState.resize(attrCount); |
| 3462 } | 3632 } |
| 3463 attribState = &fDefaultVertexArrayAttribState; | 3633 attribState = &fDefaultVertexArrayAttribState; |
| 3464 } | 3634 } |
| 3465 return attribState; | 3635 return attribState; |
| 3466 } | 3636 } |
| OLD | NEW |