Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(243)

Side by Side Diff: src/gpu/gl/GrGLGpu.cpp

Issue 1249543003: Creating functions for uploading a mipmapped texture. (Closed) Base URL: https://chromium.googlesource.com/skia.git@master
Patch Set: Fixing iOS. Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright 2011 Google Inc. 2 * Copyright 2011 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 8
9 #include "GrGLGpu.h" 9 #include "GrGLGpu.h"
10 #include "GrGLGLSL.h" 10 #include "GrGLGLSL.h"
11 #include "GrGLStencilAttachment.h" 11 #include "GrGLStencilAttachment.h"
12 #include "GrGLTextureRenderTarget.h" 12 #include "GrGLTextureRenderTarget.h"
13 #include "GrGpuResourcePriv.h" 13 #include "GrGpuResourcePriv.h"
14 #include "GrPipeline.h" 14 #include "GrPipeline.h"
15 #include "GrRenderTargetPriv.h" 15 #include "GrRenderTargetPriv.h"
16 #include "GrSurfacePriv.h" 16 #include "GrSurfacePriv.h"
17 #include "GrTexturePriv.h" 17 #include "GrTexturePriv.h"
18 #include "GrTypes.h" 18 #include "GrTypes.h"
19 #include "GrVertices.h" 19 #include "GrVertices.h"
20 #include "builders/GrGLShaderStringBuilder.h" 20 #include "builders/GrGLShaderStringBuilder.h"
21 #include "glsl/GrGLSL.h" 21 #include "glsl/GrGLSL.h"
22 #include "glsl/GrGLSLCaps.h" 22 #include "glsl/GrGLSLCaps.h"
23 #include "SkStrokeRec.h" 23 #include "SkStrokeRec.h"
24 #include "SkTemplates.h" 24 #include "SkTemplates.h"
25 #include "SkTypes.h"
25 26
26 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X) 27 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
27 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X) 28 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
28 29
29 #define SKIP_CACHE_CHECK true 30 #define SKIP_CACHE_CHECK true
30 31
31 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR 32 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
32 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface) 33 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface)
33 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call) 34 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call)
34 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface) 35 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface)
35 #else 36 #else
36 #define CLEAR_ERROR_BEFORE_ALLOC(iface) 37 #define CLEAR_ERROR_BEFORE_ALLOC(iface)
37 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) 38 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call)
38 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR 39 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
39 #endif 40 #endif
40 41
42 #if defined(GOOGLE3)
43 // Stack frame size is limited in GOOGLE3.
44 typedef SkAutoSMalloc<64 * 128> SkAutoSMallocTexels;
45 #else
46 typedef SkAutoSMalloc<128 * 128> SkAutoSMallocTexels;
47 #endif
41 48
42 /////////////////////////////////////////////////////////////////////////////// 49 ///////////////////////////////////////////////////////////////////////////////
43 50
44 51
45 static const GrGLenum gXfermodeEquation2Blend[] = { 52 static const GrGLenum gXfermodeEquation2Blend[] = {
46 // Basic OpenGL blend equations. 53 // Basic OpenGL blend equations.
47 GR_GL_FUNC_ADD, 54 GR_GL_FUNC_ADD,
48 GR_GL_FUNC_SUBTRACT, 55 GR_GL_FUNC_SUBTRACT,
49 GR_GL_FUNC_REVERSE_SUBTRACT, 56 GR_GL_FUNC_REVERSE_SUBTRACT,
50 57
(...skipping 407 matching lines...) Expand 10 before | Expand all | Expand 10 after
458 return nullptr; 465 return nullptr;
459 } 466 }
460 467
461 switch (ownership) { 468 switch (ownership) {
462 case kAdopt_GrWrapOwnership: 469 case kAdopt_GrWrapOwnership:
463 idDesc.fLifeCycle = GrGpuResource::kAdopted_LifeCycle; 470 idDesc.fLifeCycle = GrGpuResource::kAdopted_LifeCycle;
464 break; 471 break;
465 case kBorrow_GrWrapOwnership: 472 case kBorrow_GrWrapOwnership:
466 idDesc.fLifeCycle = GrGpuResource::kBorrowed_LifeCycle; 473 idDesc.fLifeCycle = GrGpuResource::kBorrowed_LifeCycle;
467 break; 474 break;
468 } 475 }
469 476
470 surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags; 477 surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags;
471 surfDesc.fWidth = desc.fWidth; 478 surfDesc.fWidth = desc.fWidth;
472 surfDesc.fHeight = desc.fHeight; 479 surfDesc.fHeight = desc.fHeight;
473 surfDesc.fConfig = desc.fConfig; 480 surfDesc.fConfig = desc.fConfig;
474 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount() ); 481 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount() );
475 // FIXME: this should be calling resolve_origin(), but Chrome code is curre ntly 482 // FIXME: this should be calling resolve_origin(), but Chrome code is curre ntly
476 // assuming the old behaviour, which is that backend textures are always 483 // assuming the old behaviour, which is that backend textures are always
477 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to: 484 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to:
478 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget); 485 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
(...skipping 27 matching lines...) Expand all
506 idDesc.fRTFBOID = static_cast<GrGLuint>(wrapDesc.fRenderTargetHandle); 513 idDesc.fRTFBOID = static_cast<GrGLuint>(wrapDesc.fRenderTargetHandle);
507 idDesc.fMSColorRenderbufferID = 0; 514 idDesc.fMSColorRenderbufferID = 0;
508 idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; 515 idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID;
509 switch (ownership) { 516 switch (ownership) {
510 case kAdopt_GrWrapOwnership: 517 case kAdopt_GrWrapOwnership:
511 idDesc.fLifeCycle = GrGpuResource::kAdopted_LifeCycle; 518 idDesc.fLifeCycle = GrGpuResource::kAdopted_LifeCycle;
512 break; 519 break;
513 case kBorrow_GrWrapOwnership: 520 case kBorrow_GrWrapOwnership:
514 idDesc.fLifeCycle = GrGpuResource::kBorrowed_LifeCycle; 521 idDesc.fLifeCycle = GrGpuResource::kBorrowed_LifeCycle;
515 break; 522 break;
516 } 523 }
517 idDesc.fSampleConfig = GrRenderTarget::kUnified_SampleConfig; 524 idDesc.fSampleConfig = GrRenderTarget::kUnified_SampleConfig;
518 525
519 GrSurfaceDesc desc; 526 GrSurfaceDesc desc;
520 desc.fConfig = wrapDesc.fConfig; 527 desc.fConfig = wrapDesc.fConfig;
521 desc.fFlags = kCheckAllocation_GrSurfaceFlag | kRenderTarget_GrSurfaceFlag; 528 desc.fFlags = kCheckAllocation_GrSurfaceFlag | kRenderTarget_GrSurfaceFlag;
522 desc.fWidth = wrapDesc.fWidth; 529 desc.fWidth = wrapDesc.fWidth;
523 desc.fHeight = wrapDesc.fHeight; 530 desc.fHeight = wrapDesc.fHeight;
524 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount() ); 531 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount() );
525 desc.fOrigin = resolve_origin(wrapDesc.fOrigin, true); 532 desc.fOrigin = resolve_origin(wrapDesc.fOrigin, true);
526 533
527 return GrGLRenderTarget::CreateWrapped(this, desc, idDesc, wrapDesc.fStencil Bits); 534 return GrGLRenderTarget::CreateWrapped(this, desc, idDesc, wrapDesc.fStencil Bits);
528 } 535 }
529 536
530 //////////////////////////////////////////////////////////////////////////////// 537 ////////////////////////////////////////////////////////////////////////////////
531 bool GrGLGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height, 538 bool GrGLGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
532 size_t rowBytes, GrPixelConfig srcConfig, 539 GrPixelConfig srcConfig,
533 DrawPreference* drawPreference, 540 DrawPreference* drawPreference,
534 WritePixelTempDrawInfo* tempDrawInfo) { 541 WritePixelTempDrawInfo* tempDrawInfo) {
535 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurf ace->config())) { 542 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurf ace->config())) {
536 return false; 543 return false;
537 } 544 }
538 545
539 // This subclass only allows writes to textures. If the dst is not a texture we have to draw 546 // This subclass only allows writes to textures. If the dst is not a texture we have to draw
540 // into it. We could use glDrawPixels on GLs that have it, but we don't toda y. 547 // into it. We could use glDrawPixels on GLs that have it, but we don't toda y.
541 if (!dstSurface->asTexture()) { 548 if (!dstSurface->asTexture()) {
542 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference); 549 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
607 // Write or transfer of pixels is only implemented for TEXTURE_2D textures 614 // Write or transfer of pixels is only implemented for TEXTURE_2D textures
608 if (GR_GL_TEXTURE_2D != glTex->target()) { 615 if (GR_GL_TEXTURE_2D != glTex->target()) {
609 return false; 616 return false;
610 } 617 }
611 618
612 return true; 619 return true;
613 } 620 }
614 621
615 bool GrGLGpu::onWritePixels(GrSurface* surface, 622 bool GrGLGpu::onWritePixels(GrSurface* surface,
616 int left, int top, int width, int height, 623 int left, int top, int width, int height,
617 GrPixelConfig config, const void* buffer, 624 GrPixelConfig config,
618 size_t rowBytes) { 625 const SkTArray<SkMipMapLevel>& texels) {
619 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture()); 626 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
620 627
621 if (!check_write_and_transfer_input(glTex, surface, config)) { 628 if (!check_write_and_transfer_input(glTex, surface, config)) {
622 return false; 629 return false;
623 } 630 }
624 631
625 this->setScratchTextureUnit(); 632 this->setScratchTextureUnit();
626 GL_CALL(BindTexture(glTex->target(), glTex->textureID())); 633 GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
627 634
628 bool success = false; 635 bool success = false;
629 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { 636 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
630 // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixel s() 637 // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixel s()
631 SkASSERT(config == glTex->desc().fConfig); 638 SkASSERT(config == glTex->desc().fConfig);
632 success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), buffer, 639 success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), texels, false,
633 kWrite_UploadType, left, top, wi dth, height); 640 kWrite_UploadType, left, top, wi dth, height);
634 } else { 641 } else {
635 success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_Upl oadType, 642 success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_Upl oadType,
636 left, top, width, height, config, buffer, rowBytes); 643 left, top, width, height, config, texels);
637 } 644 }
638 645
639 if (success) { 646 return success;
640 glTex->texturePriv().dirtyMipMaps(true);
641 return true;
642 }
643
644 return false;
645 } 647 }
646 648
647 bool GrGLGpu::onTransferPixels(GrSurface* surface, 649 bool GrGLGpu::onTransferPixels(GrSurface* surface,
648 int left, int top, int width, int height, 650 int left, int top, int width, int height,
649 GrPixelConfig config, GrTransferBuffer* buffer, 651 GrPixelConfig config, GrTransferBuffer* buffer,
650 size_t offset, size_t rowBytes) { 652 size_t offset, size_t rowBytes) {
651 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture()); 653 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
652 654
653 if (!check_write_and_transfer_input(glTex, surface, config)) { 655 if (!check_write_and_transfer_input(glTex, surface, config)) {
654 return false; 656 return false;
655 } 657 }
656 658
657 // For the moment, can't transfer compressed data 659 // For the moment, can't transfer compressed data
658 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { 660 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
659 return false; 661 return false;
660 } 662 }
661 663
662 this->setScratchTextureUnit(); 664 this->setScratchTextureUnit();
663 GL_CALL(BindTexture(glTex->target(), glTex->textureID())); 665 GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
664 666
665 SkASSERT(!buffer->isMapped()); 667 SkASSERT(!buffer->isMapped());
666 GrGLTransferBuffer* glBuffer = reinterpret_cast<GrGLTransferBuffer*>(buffer) ; 668 GrGLTransferBuffer* glBuffer = reinterpret_cast<GrGLTransferBuffer*>(buffer) ;
667 // bind the transfer buffer 669 // bind the transfer buffer
668 SkASSERT(GR_GL_PIXEL_UNPACK_BUFFER == glBuffer->bufferType() || 670 SkASSERT(GR_GL_PIXEL_UNPACK_BUFFER == glBuffer->bufferType() ||
669 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == glBuffer->bufferType ()); 671 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == glBuffer->bufferType ());
670 GL_CALL(BindBuffer(glBuffer->bufferType(), glBuffer->bufferID())); 672 GL_CALL(BindBuffer(glBuffer->bufferType(), glBuffer->bufferID()));
671 673
672 bool success = false; 674 bool success = false;
675 SkMipMapLevel mipLevel(buffer, rowBytes, width, height);
676 const int mipLevelCount = 1;
677 SkTArray<SkMipMapLevel> texels(mipLevelCount);
678 texels.push_back(mipLevel);
673 success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_Uplo adType, 679 success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_Uplo adType,
674 left, top, width, height, config, buffer, rowB ytes); 680 left, top, width, height, config, texels);
675 681
676 if (success) { 682 if (success) {
677 glTex->texturePriv().dirtyMipMaps(true); 683 glTex->texturePriv().dirtyMipMaps(true);
678 return true; 684 return true;
679 } 685 }
680 686
681 return false; 687 return false;
682 } 688 }
683 689
684 // For GL_[UN]PACK_ALIGNMENT. 690 // For GL_[UN]PACK_ALIGNMENT.
(...skipping 19 matching lines...) Expand all
704 710
705 static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc, 711 static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc,
706 const GrGLInterface* interface) { 712 const GrGLInterface* interface) {
707 if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) { 713 if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) {
708 return GR_GL_GET_ERROR(interface); 714 return GR_GL_GET_ERROR(interface);
709 } else { 715 } else {
710 return CHECK_ALLOC_ERROR(interface); 716 return CHECK_ALLOC_ERROR(interface);
711 } 717 }
712 } 718 }
713 719
720 /**
721 * Determines if TexStorage can be used when creating a texture.
722 *
723 * @param caps The capabilities of the GL device.
724 * @param standard The GL standard in use.
725 * @param desc The surface descriptor for the texture being created.
726 */
727 static bool can_use_tex_storage(const GrGLCaps& caps, const GrGLStandard& standa rd,
728 const GrSurfaceDesc& desc) {
729 bool useTexStorage = caps.texStorageSupport();
730 if (useTexStorage && kGL_GrGLStandard == standard) {
731 // 565 is not a sized internal format on desktop GL. So on desktop with
732 // 565 we always use an unsized internal format to let the system pick
733 // the best sized format to convert the 565 data to. Since TexStorage
734 // only allows sized internal formats we will instead use TexImage2D.
735 useTexStorage = desc.fConfig != kRGB_565_GrPixelConfig;
736 }
737
738 return useTexStorage;
739 }
740
741 /**
742 * Creates storage space for the texture and fills it with texels.
743 *
744 * @param desc The surface descriptor for the texture being created.
745 * @param interface The GL interface in use.
746 * @param useTexStorage The result of a call to can_use_tex_storage().
747 * @param internalFormat The data format used for the internal storage of the te xture.
748 * @param externalFormat The data format used for the external storage of the te xture.
749 * @param externalType The type of the data used for the external storage of t he texture.
750 * @param texels The texel data of the texture being created.
751 * @param succeeded Set to true if allocating and populating the texture co mpleted
752 * without error.
753 */
754 static void allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc ,
755 const GrGLInterface& inte rface,
756 GrGLenum target,
757 bool useTexStorage,
758 GrGLenum internalFormat,
759 GrGLenum externalFormat,
760 GrGLenum externalType,
761 const SkTArray<SkMipMapLe vel>& texels,
762 bool* succeeded) {
763 CLEAR_ERROR_BEFORE_ALLOC(&interface);
764 if (useTexStorage) {
765 // We never resize or change formats of textures.
766 GL_ALLOC_CALL(&interface,
767 TexStorage2D(target,
768 texels.count(),
769 internalFormat,
770 desc.fWidth, desc.fHeight));
771
772 GrGLenum error = check_alloc_error(desc, &interface);
773 if (error != GR_GL_NO_ERROR) {
774 *succeeded = false;
775 } else {
776 for (int currentMipLevel = 0; currentMipLevel < texels.count(); curr entMipLevel++) {
777 const void* currentMipData = texels[currentMipLevel].fTexelsOrOf fset;
778 if (currentMipData == nullptr) {
779 continue;
780 }
781
782 GR_GL_CALL(&interface,
783 TexSubImage2D(target,
784 currentMipLevel,
785 0, // left
786 0, // top
787 texels[currentMipLevel].fWidth,
788 texels[currentMipLevel].fHeight,
789 externalFormat, externalType,
790 currentMipData));
791 }
792 *succeeded = true;
793 }
794 } else {
795 *succeeded = true;
796 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentM ipLevel++) {
797 const void* currentMipData = texels[currentMipLevel].fTexelsOrOffset ;
798 // Even if curremtMipData is nullptr, continue to call TexImage2D.
799 // This will allocate texture memory which we can later populate.
800 GL_ALLOC_CALL(&interface,
801 TexImage2D(target,
802 currentMipLevel,
803 internalFormat,
804 texels[currentMipLevel].fWidth,
805 texels[currentMipLevel].fHeight,
806 0, // border
807 externalFormat, externalType,
808 currentMipData));
809 GrGLenum error = check_alloc_error(desc, &interface);
810 if (error != GR_GL_NO_ERROR) {
811 *succeeded = false;
812 break;
813 }
814 }
815
816 if (*succeeded == true) {
817 GR_GL_CALL(&interface,
818 TexParameteri(target, GR_GL_TEXTURE_MIN_LOD, 0));
819 GR_GL_CALL(&interface,
820 TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL, 0));
821 if (texels.count() > 1) {
822 // If we have mipmaps, we can go ahead and limit the max mipmap level.
823 // Otherwise, mipmaps may be generated later and we do not want to
824 // limit.
825 GR_GL_CALL(&interface,
826 TexParameteri(target, GR_GL_TEXTURE_MAX_LOD, static_c ast<float>(texels.count() - 1)));
827 GR_GL_CALL(&interface,
828 TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL, texels .count() - 1));
829 }
830
831 }
832 }
833 }
834
835 /**
836 * Creates storage space for the texture and fills it with texels.
837 *
838 * @param desc The surface descriptor for the texture being created.
839 * @param interface The GL interface in use.
840 * @param useTexStorage The result of a call to can_use_tex_storage().
841 * @param internalFormat The data format used for the internal storage of the te xture.
842 * @param texels The texel data of the texture being created.
843 */
844 static bool allocate_and_populate_compressed_texture(const GrSurfaceDesc& desc,
845 const GrGLInterface& interf ace,
846 GrGLenum target,
847 bool useTexStorage, GrGLenu m internalFormat,
848 const SkTArray<SkMipMapLeve l>& texels) {
849 CLEAR_ERROR_BEFORE_ALLOC(&interface);
850 if (useTexStorage) {
851 // We never resize or change formats of textures.
852 GL_ALLOC_CALL(&interface,
853 TexStorage2D(target,
854 texels.count(),
855 internalFormat,
856 desc.fWidth, desc.fHeight));
857 GrGLenum error = check_alloc_error(desc, &interface);
858 if (error != GR_GL_NO_ERROR) {
859 return false;
860 } else {
861 for (int currentMipLevel = 0; currentMipLevel < texels.count(); curr entMipLevel++) {
862 const void* currentMipData = texels[currentMipLevel].fTexelsOrOf fset;
863 if (currentMipData == nullptr) {
864 continue;
865 }
866
867 int width = texels[currentMipLevel].fWidth;
868 int height = texels[currentMipLevel].fHeight;
869
870 // Make sure that the width and height that we pass to OpenGL
871 // is a multiple of the block size.
872 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width , height);
873 GR_GL_CALL(&interface, CompressedTexSubImage2D(target,
874 currentMipLevel,
875 0, // left
876 0, // top
877 width,
878 height,
879 internalFormat, S kToInt(dataSize),
880 currentMipData));
881 }
882 }
883 } else {
884 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentM ipLevel++) {
885
886 int width = texels[currentMipLevel].fWidth;
887 int height = texels[currentMipLevel].fHeight;
888
889 // Make sure that the width and height that we pass to OpenGL
890 // is a multiple of the block size.
891 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, he ight);
892
893 GL_ALLOC_CALL(&interface,
894 CompressedTexImage2D(target,
895 currentMipLevel,
896 internalFormat,
897 width,
898 height,
899 0, // border
900 SkToInt(dataSize),
901 texels[currentMipLevel].fTexelsOr Offset));
902
903 GrGLenum error = check_alloc_error(desc, &interface);
904 if (error != GR_GL_NO_ERROR) {
905 return false;
906 }
907 }
908
909 GR_GL_CALL(&interface,
910 TexParameteri(target, GR_GL_TEXTURE_MIN_LOD, 0));
911 GR_GL_CALL(&interface,
912 TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL, 0));
913 if (texels.count() > 1) {
914 // If we have mipmaps, we can go ahead and limit the max mipmap leve l.
915 // Otherwise, mipmaps may be generated later and we do not want to
916 // limit.
917 GR_GL_CALL(&interface,
918 TexParameteri(target, GR_GL_TEXTURE_MAX_LOD, static_cast< float>(texels.count() - 1)));
919
920 GR_GL_CALL(&interface,
921 TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL, texels.cou nt() - 1));
922 }
923 }
924 return true;
925 }
926
927 /**
928 * After a texture is created, any state which was altered during its creation
929 * needs to be restored.
930 *
931 * @param interface The GL interface to use.
932 * @param caps The capabilities of the GL device.
933 * @param restoreGLRowLength Should the row length unpacking be restored?
934 * @param glFlipY Did GL flip the texture vertically?
935 */
936 static void restore_pixelstore_state(const GrGLInterface& interface, const GrGLC aps& caps,
937 bool restoreGLRowLength, bool glFlipY) {
938 if (restoreGLRowLength) {
939 SkASSERT(caps.unpackRowLengthSupport());
940 GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
941 }
942 if (glFlipY) {
943 GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
944 }
945 }
946
714 bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc, 947 bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
715 GrGLenum target, 948 GrGLenum target,
716 UploadType uploadType, 949 UploadType uploadType,
717 int left, int top, int width, int height, 950 int left, int top, int width, int height,
718 GrPixelConfig dataConfig, 951 GrPixelConfig dataConfig,
719 const void* dataOrOffset, 952 const SkTArray<SkMipMapLevel>& texels) {
720 size_t rowBytes) {
721 SkASSERT(dataOrOffset || kNewTexture_UploadType == uploadType ||
722 kTransfer_UploadType == uploadType);
723
724 // If we're uploading compressed data then we should be using uploadCompress edTexData 953 // If we're uploading compressed data then we should be using uploadCompress edTexData
725 SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); 954 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
726 955
727 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig)); 956 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
728 957
958 // texels is const.
959 // But we may need to flip the texture vertically to prepare it.
960 // Rather than flip in place and alter the incoming data,
961 // we allocate a new buffer to flip into.
962 // This means we need to make a non-const shallow copy of texels.
963 SkTArray<SkMipMapLevel> texelsShallowCopy(texels);
964
965 for (int currentMipLevel = texelsShallowCopy.count() - 1; currentMipLevel >= 0;
966 currentMipLevel--) {
967 SkASSERT(texelsShallowCopy[currentMipLevel].fTexelsOrOffset
968 || kNewTexture_UploadType == uploadType || kTransfer_UploadType == uploadType);
969 }
970
971
972 const GrGLInterface* interface = this->glInterface();
973 const GrGLCaps& caps = this->glCaps();
974 GrGLStandard standard = this->glStandard();
975
729 size_t bpp = GrBytesPerPixel(dataConfig); 976 size_t bpp = GrBytesPerPixel(dataConfig);
730 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, & left, &top, 977 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); c urrentMipLevel++) {
731 &width, &height, &dataOrOffset, & rowBytes)) { 978 if (texelsShallowCopy[currentMipLevel].fTexelsOrOffset == nullptr) {
732 return false; 979 continue;
733 } 980 }
734 size_t trimRowBytes = width * bpp; 981
735 982 if (texelsShallowCopy[currentMipLevel].fHeight > SK_MaxS32
736 // in case we need a temporary, trimmed copy of the src pixels 983 || texelsShallowCopy[currentMipLevel].fWidth > SK_MaxS32) {
737 #if defined(GOOGLE3) 984 return false;
738 // Stack frame size is limited in GOOGLE3. 985 }
739 SkAutoSMalloc<64 * 128> tempStorage; 986 int currentMipHeight = texelsShallowCopy[currentMipLevel].fHeight;
740 #else 987 int currentMipWidth = texelsShallowCopy[currentMipLevel].fWidth;
741 SkAutoSMalloc<128 * 128> tempStorage; 988 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bp p, &left, &top,
742 #endif 989 &currentMipWidth,
990 &currentMipHeight,
991 &texelsShallowCopy[currentMipLeve l].fTexelsOrOffset,
992 &texelsShallowCopy[currentMipLeve l].fRowBytes)) {
993 return false;
994 }
995 if (currentMipWidth < 0 || currentMipHeight < 0) {
996 return false;
997 }
998 texelsShallowCopy[currentMipLevel].fWidth = currentMipWidth;
999 texelsShallowCopy[currentMipLevel].fHeight = currentMipHeight;
1000 }
1001
1002 bool useTexStorage = can_use_tex_storage(caps, standard, desc);
1003 // We can only use TexStorage if we know we will not later change the storag e requirements.
1004 // This means if we may later want to generate mipmaps, we cannot use TexSto rage.
1005 // Right now, we cannot know if we will later generate mipmaps or not.
1006 // The only time we can use TexStorage is when we already have the mipmaps.
1007 useTexStorage &= texelsShallowCopy.count() > 1;
743 1008
744 // Internal format comes from the texture desc. 1009 // Internal format comes from the texture desc.
745 GrGLenum internalFormat = 1010 GrGLenum internalFormat = caps.configGLFormats(desc.fConfig).fInternalFormat TexImage;
746 this->glCaps().configGLFormats(desc.fConfig).fInternalFormatTexImage;
747 1011
748 // External format and type come from the upload data. 1012 // External format and type come from the upload data.
749 GrGLenum externalFormat = 1013 GrGLenum externalFormat = caps.configGLFormats(dataConfig).fExternalFormatFo rTexImage;
750 this->glCaps().configGLFormats(dataConfig).fExternalFormatForTexImage; 1014 GrGLenum externalType = caps.configGLFormats(dataConfig).fExternalType;
751 GrGLenum externalType = this->glCaps().configGLFormats(dataConfig).fExternal Type; 1015
752
753 /*
754 * Check whether to allocate a temporary buffer for flipping y or
755 * because our srcData has extra bytes past each row. If so, we need
756 * to trim those off here, since GL ES may not let us specify
757 * GL_UNPACK_ROW_LENGTH.
758 */
759 bool restoreGLRowLength = false;
760 bool swFlipY = false; 1016 bool swFlipY = false;
761 bool glFlipY = false; 1017 bool glFlipY = false;
762 if (dataOrOffset) { 1018
763 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { 1019 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
764 if (this->glCaps().unpackFlipYSupport()) { 1020 if (caps.unpackFlipYSupport()) {
765 glFlipY = true; 1021 glFlipY = true;
766 } else { 1022 } else {
767 swFlipY = true; 1023 swFlipY = true;
768 } 1024 }
769 } 1025 }
770 if (this->glCaps().unpackRowLengthSupport() && !swFlipY) { 1026
1027 bool restoreGLRowLength = false;
1028
1029 // in case we need a temporary, trimmed copy of the src pixels
1030 SkAutoSMallocTexels tempStorage;
1031
1032 // find the combined size of all the mip levels and the relative offset of
1033 // each into the collective buffer
1034 size_t combined_buffer_size = 0;
1035 SkTArray<size_t> individual_mip_offsets(texelsShallowCopy.count());
1036 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); c urrentMipLevel++) {
1037 const size_t trimmedSize = texels[currentMipLevel].fWidth * bpp *
1038 texelsShallowCopy[currentMipLevel].fHeight;
1039 individual_mip_offsets.push_back(combined_buffer_size);
1040 combined_buffer_size += trimmedSize;
1041 }
1042 char* buffer = (char*)tempStorage.reset(combined_buffer_size);
1043
1044 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); c urrentMipLevel++) {
1045 if (texelsShallowCopy[currentMipLevel].fTexelsOrOffset == nullptr) {
1046 continue;
1047 }
1048
1049 const size_t trimRowBytes = texelsShallowCopy[currentMipLevel].fWidth * bpp;
1050
1051 /*
1052 * check whether to allocate a temporary buffer for flipping y or
1053 * because our srcData has extra bytes past each row. If so, we need
1054 * to trim those off here, since GL ES may not let us specify
1055 * GL_UNPACK_ROW_LENGTH.
1056 */
1057 restoreGLRowLength = false;
1058
1059 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
1060 if (caps.unpackRowLengthSupport() && !swFlipY) {
771 // can't use this for flipping, only non-neg values allowed. :( 1061 // can't use this for flipping, only non-neg values allowed. :(
772 if (rowBytes != trimRowBytes) { 1062 if (rowBytes != trimRowBytes) {
773 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp); 1063 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
774 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength)); 1064 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLe ngth));
775 restoreGLRowLength = true; 1065 restoreGLRowLength = true;
776 } 1066 }
777 } else if (kTransfer_UploadType != uploadType) { 1067 } else if (kTransfer_UploadType != uploadType) {
778 if (trimRowBytes != rowBytes || swFlipY) { 1068 if (trimRowBytes != rowBytes || swFlipY) {
1069 const int height = texelsShallowCopy[currentMipLevel].fHeight;
779 // copy data into our new storage, skipping the trailing bytes 1070 // copy data into our new storage, skipping the trailing bytes
780 size_t trimSize = height * trimRowBytes; 1071 const char* src = (const char*)texelsShallowCopy[currentMipLevel ].fTexelsOrOffset;
781 const char* src = (const char*)dataOrOffset; 1072 if (swFlipY && height >= 1) {
782 if (swFlipY) {
783 src += (height - 1) * rowBytes; 1073 src += (height - 1) * rowBytes;
784 } 1074 }
785 char* dst = (char*)tempStorage.reset(trimSize); 1075 char* dst = buffer + individual_mip_offsets[currentMipLevel];
786 for (int y = 0; y < height; y++) { 1076 for (int y = 0; y < height; y++) {
787 memcpy(dst, src, trimRowBytes); 1077 memcpy(dst, src, trimRowBytes);
788 if (swFlipY) { 1078 if (swFlipY) {
789 src -= rowBytes; 1079 src -= rowBytes;
790 } else { 1080 } else {
791 src += rowBytes; 1081 src += rowBytes;
792 } 1082 }
793 dst += trimRowBytes; 1083 dst += trimRowBytes;
794 } 1084 }
795 // now point data to our copied version 1085 // now point data to our copied version
796 dataOrOffset = tempStorage.get(); 1086 texelsShallowCopy[currentMipLevel] =
1087 SkMipMapLevel(buffer + individual_mip_offsets[currentMipLeve l],
1088 trimRowBytes,
1089 texelsShallowCopy[currentMipLevel].fWidth,
1090 texelsShallowCopy[currentMipLevel].fHeight);
797 } 1091 }
798 } else { 1092 } else {
799 return false; 1093 return false;
800 } 1094 }
801 if (glFlipY) { 1095 if (glFlipY) {
802 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE)); 1096 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
803 } 1097 }
804 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(dataConfig) )); 1098 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ALIGNMENT,
1099 config_alignment(desc.fConfig)));
805 } 1100 }
1101
806 bool succeeded = true; 1102 bool succeeded = true;
807 if (kNewTexture_UploadType == uploadType) { 1103 if (kNewTexture_UploadType == uploadType &&
808 if (dataOrOffset && 1104 0 == left && 0 == top &&
809 !(0 == left && 0 == top && desc.fWidth == width && desc.fHeight == h eight)) { 1105 desc.fWidth == width && desc.fHeight == height) {
810 succeeded = false; 1106 allocate_and_populate_uncompressed_texture(desc, *interface, target, use TexStorage,
811 } else { 1107 internalFormat, externalForma t, externalType,
812 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1108 texelsShallowCopy, &succeeded );
813 GL_ALLOC_CALL(this->glInterface(), TexImage2D(target, 0, internalFor mat, desc.fWidth,
814 desc.fHeight, 0, exte rnalFormat,
815 externalType, dataOrOf fset));
816 GrGLenum error = check_alloc_error(desc, this->glInterface());
817 if (error != GR_GL_NO_ERROR) {
818 succeeded = false;
819 }
820 }
821 } else { 1109 } else {
822 if (swFlipY || glFlipY) { 1110 if (swFlipY || glFlipY) {
823 top = desc.fHeight - (top + height); 1111 top = desc.fHeight - (top + height);
824 } 1112 }
825 GL_CALL(TexSubImage2D(target, 1113 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count( );
826 0, // level 1114 currentMipLevel++) {
827 left, top, 1115 if (texelsShallowCopy[currentMipLevel].fTexelsOrOffset == nullptr) {
828 width, height, 1116 continue;
829 externalFormat, externalType, dataOrOffset)); 1117 }
1118
1119 GL_CALL(TexSubImage2D(target,
1120 currentMipLevel,
1121 left, top,
1122 texelsShallowCopy[currentMipLevel].fWidth,
1123 texelsShallowCopy[currentMipLevel].fHeight,
1124 externalFormat, externalType,
1125 texelsShallowCopy[currentMipLevel].fTexelsOrOf fset));
1126 }
830 } 1127 }
831 1128
832 if (restoreGLRowLength) { 1129 restore_pixelstore_state(*interface, caps, restoreGLRowLength, glFlipY);
833 SkASSERT(this->glCaps().unpackRowLengthSupport()); 1130
834 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
835 }
836 if (glFlipY) {
837 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
838 }
839 return succeeded; 1131 return succeeded;
840 } 1132 }
841 1133
842 // TODO: This function is using a lot of wonky semantics like, if width == -1 1134 // TODO: This function is using a lot of wonky semantics like, if width == -1
843 // then set width = desc.fWdith ... blah. A better way to do it might be to 1135 // then set width = desc.fWdith ... blah. A better way to do it might be to
844 // create a CompressedTexData struct that takes a desc/ptr and figures out 1136 // create a CompressedTexData struct that takes a desc/ptr and figures out
845 // the proper upload semantics. Then users can construct this function how they 1137 // the proper upload semantics. Then users can construct this function how they
846 // see fit if they want to go against the "standard" way to do it. 1138 // see fit if they want to go against the "standard" way to do it.
847 bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc, 1139 bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
848 GrGLenum target, 1140 GrGLenum target,
849 const void* data, 1141 const SkTArray<SkMipMapLevel>& texels,
1142 bool isNewTexture,
850 UploadType uploadType, 1143 UploadType uploadType,
851 int left, int top, int width, int height) { 1144 int left, int top, int width, int height) {
852 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig)); 1145 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
853 SkASSERT(kTransfer_UploadType != uploadType && 1146 SkASSERT(kTransfer_UploadType != uploadType &&
854 (data || kNewTexture_UploadType != uploadType)); 1147 (texels[0].fTexelsOrOffset || kNewTexture_UploadType != uploadType) );
855 1148
856 // No support for software flip y, yet... 1149 // No support for software flip y, yet...
857 SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin); 1150 SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin);
858 1151
1152 const GrGLInterface* interface = this->glInterface();
1153 const GrGLCaps& caps = this->glCaps();
1154 GrGLStandard standard = this->glStandard();
1155
859 if (-1 == width) { 1156 if (-1 == width) {
860 width = desc.fWidth; 1157 width = desc.fWidth;
861 } 1158 }
862 #ifdef SK_DEBUG 1159 #ifdef SK_DEBUG
863 else { 1160 else {
864 SkASSERT(width <= desc.fWidth); 1161 SkASSERT(width <= desc.fWidth);
865 } 1162 }
866 #endif 1163 #endif
867 1164
868 if (-1 == height) { 1165 if (-1 == height) {
869 height = desc.fHeight; 1166 height = desc.fHeight;
870 } 1167 }
871 #ifdef SK_DEBUG 1168 #ifdef SK_DEBUG
872 else { 1169 else {
873 SkASSERT(height <= desc.fHeight); 1170 SkASSERT(height <= desc.fHeight);
874 } 1171 }
875 #endif 1172 #endif
876 1173
877 // Make sure that the width and height that we pass to OpenGL 1174 bool useTexStorage = can_use_tex_storage(caps, standard, desc);
878 // is a multiple of the block size. 1175 // We can only use TexStorage if we know we will not later change the storag e requirements.
879 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height); 1176 // This means if we may later want to generate mipmaps, we cannot use TexSto rage.
1177 // Right now, we cannot know if we will later generate mipmaps or not.
1178 // The only time we can use TexStorage is when we already have the mipmaps.
1179 useTexStorage &= texels.count() > 1;
880 1180
881 // We only need the internal format for compressed 2D textures. There is on 1181 // We only need the internal format for compressed 2D textures. There is on
882 // sized vs base internal format distinction for compressed textures. 1182 // sized vs base internal format distinction for compressed textures.
883 GrGLenum internalFormat =this->glCaps().configGLFormats(desc.fConfig).fSized InternalFormat; 1183 GrGLenum internalFormat = caps.configGLFormats(desc.fConfig).fInternalFormat TexImage;
884 1184
885 if (kNewTexture_UploadType == uploadType) { 1185 if (kNewTexture_UploadType == uploadType) {
886 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1186 return allocate_and_populate_compressed_texture(desc, *interface, target , useTexStorage,
887 GL_ALLOC_CALL(this->glInterface(), 1187 internalFormat, texels);
888 CompressedTexImage2D(target,
889 0, // level
890 internalFormat,
891 width, height,
892 0, // border
893 SkToInt(dataSize),
894 data));
895 GrGLenum error = check_alloc_error(desc, this->glInterface());
896 if (error != GR_GL_NO_ERROR) {
897 return false;
898 }
899 } else { 1188 } else {
900 // Paletted textures can't be updated. 1189 // Paletted textures can't be updated.
901 if (GR_GL_PALETTE8_RGBA8 == internalFormat) { 1190 if (GR_GL_PALETTE8_RGBA8 == internalFormat) {
902 return false; 1191 return false;
903 } 1192 }
904 GL_CALL(CompressedTexSubImage2D(target, 1193 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentM ipLevel++) {
905 0, // level 1194 if (texels[currentMipLevel].fTexelsOrOffset == nullptr) {
906 left, top, 1195 continue;
907 width, height, 1196 }
908 internalFormat, 1197
909 SkToInt(dataSize), 1198 // Make sure that the width and height that we pass to OpenGL
910 data)); 1199 // is a multiple of the block size.
1200 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig,
1201 texels[currentMipLevel] .fWidth,
1202 texels[currentMipLevel] .fHeight);
1203 GL_CALL(CompressedTexSubImage2D(target,
1204 currentMipLevel,
1205 left, top,
1206 texels[currentMipLevel].fWidth,
1207 texels[currentMipLevel].fHeight,
1208 internalFormat,
1209 dataSize,
1210 texels[currentMipLevel].fTexelsOrOff set));
1211 }
911 } 1212 }
912 1213
913 return true; 1214 return true;
914 } 1215 }
915 1216
916 static bool renderbuffer_storage_msaa(const GrGLContext& ctx, 1217 static bool renderbuffer_storage_msaa(const GrGLContext& ctx,
917 int sampleCount, 1218 int sampleCount,
918 GrGLenum format, 1219 GrGLenum format,
919 int width, int height) { 1220 int width, int height) {
920 CLEAR_ERROR_BEFORE_ALLOC(ctx.interface()); 1221 CLEAR_ERROR_BEFORE_ALLOC(ctx.interface());
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
1067 // SkDEBUGFAIL("null texture"); 1368 // SkDEBUGFAIL("null texture");
1068 return nullptr; 1369 return nullptr;
1069 } 1370 }
1070 1371
1071 #if 0 && defined(SK_DEBUG) 1372 #if 0 && defined(SK_DEBUG)
1072 static size_t as_size_t(int x) { 1373 static size_t as_size_t(int x) {
1073 return x; 1374 return x;
1074 } 1375 }
1075 #endif 1376 #endif
1076 1377
1378 static GrGLTexture::IDDesc generate_gl_texture(const GrGLInterface* interface,
1379 GrGpuResource::LifeCycle lifeCycl e) {
1380 GrGLTexture::IDDesc idDesc;
1381 idDesc.fInfo.fID = 0;
1382 GR_GL_CALL(interface, GenTextures(1, &idDesc.fInfo.fID));
1383 idDesc.fLifeCycle = lifeCycle;
1384 // We only support GL_TEXTURE_2D at the moment.
1385 idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
1386 return idDesc;
1387 }
1388
1389 static GrGLTexture::TexParams set_initial_texture_params(const GrGLInterface* in terface,
1390 GrGLTexture::IDDesc idD esc) {
1391 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
1392 // drivers have a bug where an FBO won't be complete if it includes a
1393 // texture that is not mipmap complete (considering the filter in use).
1394 GrGLTexture::TexParams initialTexParams;
1395 // we only set a subset here so invalidate first
1396 initialTexParams.invalidate();
1397 initialTexParams.fMinFilter = GR_GL_NEAREST;
1398 initialTexParams.fMagFilter = GR_GL_NEAREST;
1399 initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
1400 initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
1401 GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget,
1402 GR_GL_TEXTURE_MAG_FILTER,
1403 initialTexParams.fMagFilter));
1404 GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget,
1405 GR_GL_TEXTURE_MIN_FILTER,
1406 initialTexParams.fMinFilter));
1407 GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget,
1408 GR_GL_TEXTURE_WRAP_S,
1409 initialTexParams.fWrapS));
1410 GR_GL_CALL(interface, TexParameteri(idDesc.fInfo.fTarget,
1411 GR_GL_TEXTURE_WRAP_T,
1412 initialTexParams.fWrapT));
1413 return initialTexParams;
1414 }
1415
1077 GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc, 1416 GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
1078 GrGpuResource::LifeCycle lifeCycle, 1417 GrGpuResource::LifeCycle lifeCycle,
1079 const void* srcData, size_t rowBytes) { 1418 const SkTArray<SkMipMapLevel>& texels) {
1080 // We fail if the MSAA was requested and is not available. 1419 // We fail if the MSAA was requested and is not available.
1081 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleC nt) { 1420 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleC nt) {
1082 //SkDebugf("MSAA RT requested but not supported on this platform."); 1421 //SkDebugf("MSAA RT requested but not supported on this platform.");
1083 return return_null_texture(); 1422 return return_null_texture();
1084 } 1423 }
1085 1424
1086 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); 1425 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
1087 1426
1088 GrGLTexture::IDDesc idDesc; 1427 GrGLTexture::IDDesc idDesc = generate_gl_texture(this->glInterface(), lifeCy cle);
1089 idDesc.fInfo.fID = 0;
1090 GL_CALL(GenTextures(1, &idDesc.fInfo.fID));
1091 idDesc.fLifeCycle = lifeCycle;
1092 // We only support GL_TEXTURE_2D at the moment.
1093 idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
1094
1095 if (!idDesc.fInfo.fID) { 1428 if (!idDesc.fInfo.fID) {
1096 return return_null_texture(); 1429 return return_null_texture();
1097 } 1430 }
1098 1431
1099 this->setScratchTextureUnit(); 1432 this->setScratchTextureUnit();
1100 GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID)); 1433 GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID));
1101 1434
1102 if (renderTarget && this->glCaps().textureUsageSupport()) { 1435 if (renderTarget && this->glCaps().textureUsageSupport()) {
1103 // provides a hint about how this texture will be used 1436 // provides a hint about how this texture will be used
1104 GL_CALL(TexParameteri(idDesc.fInfo.fTarget, 1437 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1105 GR_GL_TEXTURE_USAGE, 1438 GR_GL_TEXTURE_USAGE,
1106 GR_GL_FRAMEBUFFER_ATTACHMENT)); 1439 GR_GL_FRAMEBUFFER_ATTACHMENT));
1107 } 1440 }
1108 1441
1109 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some 1442 GrGLTexture::TexParams initialTexParams = set_initial_texture_params(this->g lInterface(),
1110 // drivers have a bug where an FBO won't be complete if it includes a 1443 idDesc) ;
1111 // texture that is not mipmap complete (considering the filter in use). 1444
1112 GrGLTexture::TexParams initialTexParams;
1113 // we only set a subset here so invalidate first
1114 initialTexParams.invalidate();
1115 initialTexParams.fMinFilter = GR_GL_NEAREST;
1116 initialTexParams.fMagFilter = GR_GL_NEAREST;
1117 initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
1118 initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
1119 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1120 GR_GL_TEXTURE_MAG_FILTER,
1121 initialTexParams.fMagFilter));
1122 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1123 GR_GL_TEXTURE_MIN_FILTER,
1124 initialTexParams.fMinFilter));
1125 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1126 GR_GL_TEXTURE_WRAP_S,
1127 initialTexParams.fWrapS));
1128 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1129 GR_GL_TEXTURE_WRAP_T,
1130 initialTexParams.fWrapT));
1131 if (!this->uploadTexData(desc, idDesc.fInfo.fTarget, kNewTexture_UploadType, 0, 0, 1445 if (!this->uploadTexData(desc, idDesc.fInfo.fTarget, kNewTexture_UploadType, 0, 0,
1132 desc.fWidth, desc.fHeight, 1446 desc.fWidth, desc.fHeight,
1133 desc.fConfig, srcData, rowBytes)) { 1447 desc.fConfig, texels)) {
1134 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); 1448 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
1135 return return_null_texture(); 1449 return return_null_texture();
1136 } 1450 }
1137 1451
1138 GrGLTexture* tex; 1452 GrGLTexture* tex;
1139 if (renderTarget) { 1453 if (renderTarget) {
1140 // unbind the texture from the texture unit before binding it to the fra me buffer 1454 // unbind the texture from the texture unit before binding it to the fra me buffer
1141 GL_CALL(BindTexture(idDesc.fInfo.fTarget, 0)); 1455 GL_CALL(BindTexture(idDesc.fInfo.fTarget, 0));
1142 GrGLRenderTarget::IDDesc rtIDDesc; 1456 GrGLRenderTarget::IDDesc rtIDDesc;
1143 1457
1144 if (!this->createRenderTargetObjects(desc, lifeCycle, idDesc.fInfo, &rtI DDesc)) { 1458 if (!this->createRenderTargetObjects(desc, lifeCycle, idDesc.fInfo, &rtI DDesc)) {
1145 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); 1459 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
1146 return return_null_texture(); 1460 return return_null_texture();
1147 } 1461 }
1148 tex = new GrGLTextureRenderTarget(this, desc, idDesc, rtIDDesc); 1462 tex = new GrGLTextureRenderTarget(this, desc, idDesc, rtIDDesc);
1149 } else { 1463 } else {
1150 tex = new GrGLTexture(this, desc, idDesc); 1464 bool wasMipMapDataProvided = false;
1465 if (texels.count() > 1) {
1466 wasMipMapDataProvided = true;
1467 }
1468 tex = new GrGLTexture(this, desc, idDesc, wasMipMapDataProvided);
1151 } 1469 }
1152 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); 1470 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
1153 #ifdef TRACE_TEXTURE_CREATION 1471 #ifdef TRACE_TEXTURE_CREATION
1154 SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n", 1472 SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n",
1155 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig); 1473 glTexDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig);
1156 #endif 1474 #endif
1157 return tex; 1475 return tex;
1158 } 1476 }
1159 1477
1160 GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc, 1478 GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc,
1161 GrGpuResource::LifeCycle lifeCycle , 1479 GrGpuResource::LifeCycle lifeCycle ,
1162 const void* srcData) { 1480 const SkTArray<SkMipMapLevel>& tex els) {
1163 // Make sure that we're not flipping Y. 1481 // Make sure that we're not flipping Y.
1164 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { 1482 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
1165 return return_null_texture(); 1483 return return_null_texture();
1166 } 1484 }
1167 1485
1168 GrGLTexture::IDDesc idDesc; 1486 GrGLTexture::IDDesc idDesc = generate_gl_texture(this->glInterface(), lifeCy cle);
1169 idDesc.fInfo.fID = 0;
1170 GL_CALL(GenTextures(1, &idDesc.fInfo.fID));
1171 idDesc.fLifeCycle = lifeCycle;
1172 // We only support GL_TEXTURE_2D at the moment.
1173 idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
1174
1175 if (!idDesc.fInfo.fID) { 1487 if (!idDesc.fInfo.fID) {
1176 return return_null_texture(); 1488 return return_null_texture();
1177 } 1489 }
1178 1490
1179 this->setScratchTextureUnit(); 1491 this->setScratchTextureUnit();
1180 GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID)); 1492 GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID));
1181 1493
1182 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some 1494 GrGLTexture::TexParams initialTexParams = set_initial_texture_params(this->g lInterface(),
1183 // drivers have a bug where an FBO won't be complete if it includes a 1495 idDesc) ;
1184 // texture that is not mipmap complete (considering the filter in use).
1185 GrGLTexture::TexParams initialTexParams;
1186 // we only set a subset here so invalidate first
1187 initialTexParams.invalidate();
1188 initialTexParams.fMinFilter = GR_GL_NEAREST;
1189 initialTexParams.fMagFilter = GR_GL_NEAREST;
1190 initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
1191 initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
1192 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1193 GR_GL_TEXTURE_MAG_FILTER,
1194 initialTexParams.fMagFilter));
1195 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1196 GR_GL_TEXTURE_MIN_FILTER,
1197 initialTexParams.fMinFilter));
1198 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1199 GR_GL_TEXTURE_WRAP_S,
1200 initialTexParams.fWrapS));
1201 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1202 GR_GL_TEXTURE_WRAP_T,
1203 initialTexParams.fWrapT));
1204 1496
1205 if (!this->uploadCompressedTexData(desc, idDesc.fInfo.fTarget, srcData)) { 1497 if (!this->uploadCompressedTexData(desc, idDesc.fInfo.fTarget, texels)) {
1206 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); 1498 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
1207 return return_null_texture(); 1499 return return_null_texture();
1208 } 1500 }
1209 1501
1210 GrGLTexture* tex; 1502 GrGLTexture* tex;
1211 tex = new GrGLTexture(this, desc, idDesc); 1503 tex = new GrGLTexture(this, desc, idDesc);
1212 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp()); 1504 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
1213 #ifdef TRACE_TEXTURE_CREATION 1505 #ifdef TRACE_TEXTURE_CREATION
1214 SkDebugf("--- new compressed texture [%d] size=(%d %d) config=%d\n", 1506 SkDebugf("--- new compressed texture [%d] size=(%d %d) config=%d\n",
1215 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig); 1507 glTexDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig);
1216 #endif 1508 #endif
1217 return tex; 1509 return tex;
1218 } 1510 }
1219 1511
1220 namespace { 1512 namespace {
1221 1513
1222 const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount; 1514 const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount;
1223 1515
1224 void inline get_stencil_rb_sizes(const GrGLInterface* gl, 1516 void inline get_stencil_rb_sizes(const GrGLInterface* gl,
1225 GrGLStencilAttachment::Format* format) { 1517 GrGLStencilAttachment::Format* format) {
(...skipping 1526 matching lines...) Expand 10 before | Expand all | Expand 10 after
2752 viewport->fWidth = surface->width(); 3044 viewport->fWidth = surface->width();
2753 viewport->fHeight = surface->height(); 3045 viewport->fHeight = surface->height();
2754 } else { 3046 } else {
2755 fStats.incRenderTargetBinds(); 3047 fStats.incRenderTargetBinds();
2756 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, rt->renderFBO ID())); 3048 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, rt->renderFBO ID()));
2757 *viewport = rt->getViewport(); 3049 *viewport = rt->getViewport();
2758 } 3050 }
2759 } 3051 }
2760 3052
2761 void GrGLGpu::unbindTextureFBOForCopy(GrGLenum fboTarget, GrSurface* surface) { 3053 void GrGLGpu::unbindTextureFBOForCopy(GrGLenum fboTarget, GrSurface* surface) {
2762 // bindSurfaceFBOForCopy temporarily binds textures that are not render targ ets to 3054 // bindSurfaceFBOForCopy temporarily binds textures that are not render targ ets to
2763 if (!surface->asRenderTarget()) { 3055 if (!surface->asRenderTarget()) {
2764 SkASSERT(surface->asTexture()); 3056 SkASSERT(surface->asTexture());
2765 GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture()) ->target(); 3057 GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture()) ->target();
2766 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget, 3058 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
2767 GR_GL_COLOR_ATTACHM ENT0, 3059 GR_GL_COLOR_ATTACHM ENT0,
2768 textureTarget, 3060 textureTarget,
2769 0, 3061 0,
2770 0)); 3062 0));
2771 } 3063 }
2772 } 3064 }
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
2830 } 3122 }
2831 3123
2832 bool GrGLGpu::onCopySurface(GrSurface* dst, 3124 bool GrGLGpu::onCopySurface(GrSurface* dst,
2833 GrSurface* src, 3125 GrSurface* src,
2834 const SkIRect& srcRect, 3126 const SkIRect& srcRect,
2835 const SkIPoint& dstPoint) { 3127 const SkIPoint& dstPoint) {
2836 if (src->asTexture() && dst->asRenderTarget()) { 3128 if (src->asTexture() && dst->asRenderTarget()) {
2837 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint); 3129 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
2838 return true; 3130 return true;
2839 } 3131 }
2840 3132
2841 if (can_copy_texsubimage(dst, src, this)) { 3133 if (can_copy_texsubimage(dst, src, this)) {
2842 this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstPoint); 3134 this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstPoint);
2843 return true; 3135 return true;
2844 } 3136 }
2845 3137
2846 if (can_blit_framebuffer(dst, src, this)) { 3138 if (can_blit_framebuffer(dst, src, this)) {
2847 return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstPoint); 3139 return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstPoint);
2848 } 3140 }
2849 3141
2850 return false; 3142 return false;
(...skipping 22 matching lines...) Expand all
2873 3165
2874 SkString vshaderTxt(version); 3166 SkString vshaderTxt(version);
2875 aVertex.appendDecl(this->glCaps().glslCaps(), &vshaderTxt); 3167 aVertex.appendDecl(this->glCaps().glslCaps(), &vshaderTxt);
2876 vshaderTxt.append(";"); 3168 vshaderTxt.append(";");
2877 uTexCoordXform.appendDecl(this->glCaps().glslCaps(), &vshaderTxt); 3169 uTexCoordXform.appendDecl(this->glCaps().glslCaps(), &vshaderTxt);
2878 vshaderTxt.append(";"); 3170 vshaderTxt.append(";");
2879 uPosXform.appendDecl(this->glCaps().glslCaps(), &vshaderTxt); 3171 uPosXform.appendDecl(this->glCaps().glslCaps(), &vshaderTxt);
2880 vshaderTxt.append(";"); 3172 vshaderTxt.append(";");
2881 vTexCoord.appendDecl(this->glCaps().glslCaps(), &vshaderTxt); 3173 vTexCoord.appendDecl(this->glCaps().glslCaps(), &vshaderTxt);
2882 vshaderTxt.append(";"); 3174 vshaderTxt.append(";");
2883 3175
2884 vshaderTxt.append( 3176 vshaderTxt.append(
2885 "// Copy Program VS\n" 3177 "// Copy Program VS\n"
2886 "void main() {" 3178 "void main() {"
2887 " v_texCoord = a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.z w;" 3179 " v_texCoord = a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.z w;"
2888 " gl_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;" 3180 " gl_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;"
2889 " gl_Position.zw = vec2(0, 1);" 3181 " gl_Position.zw = vec2(0, 1);"
2890 "}" 3182 "}"
2891 ); 3183 );
2892 3184
2893 SkString fshaderTxt(version); 3185 SkString fshaderTxt(version);
(...skipping 18 matching lines...) Expand all
2912 fsOutName = "gl_FragColor"; 3204 fsOutName = "gl_FragColor";
2913 } 3205 }
2914 fshaderTxt.appendf( 3206 fshaderTxt.appendf(
2915 "// Copy Program FS\n" 3207 "// Copy Program FS\n"
2916 "void main() {" 3208 "void main() {"
2917 " %s = %s(u_texture, v_texCoord);" 3209 " %s = %s(u_texture, v_texCoord);"
2918 "}", 3210 "}",
2919 fsOutName, 3211 fsOutName,
2920 GrGLSLTexture2DFunctionName(kVec2f_GrSLType, this->glslGeneration()) 3212 GrGLSLTexture2DFunctionName(kVec2f_GrSLType, this->glslGeneration())
2921 ); 3213 );
2922 3214
2923 GL_CALL_RET(fCopyPrograms[i].fProgram, CreateProgram()); 3215 GL_CALL_RET(fCopyPrograms[i].fProgram, CreateProgram());
2924 const char* str; 3216 const char* str;
2925 GrGLint length; 3217 GrGLint length;
2926 3218
2927 str = vshaderTxt.c_str(); 3219 str = vshaderTxt.c_str();
2928 length = SkToInt(vshaderTxt.size()); 3220 length = SkToInt(vshaderTxt.size());
2929 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms [i].fProgram, 3221 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms [i].fProgram,
2930 GR_GL_VERTEX_SHADER, &str, &length, 1, 3222 GR_GL_VERTEX_SHADER, &str, &length, 1,
2931 &fStats); 3223 &fStats);
2932 3224
(...skipping 494 matching lines...) Expand 10 before | Expand all | Expand 10 after
3427 this->setVertexArrayID(gpu, 0); 3719 this->setVertexArrayID(gpu, 0);
3428 } 3720 }
3429 int attrCount = gpu->glCaps().maxVertexAttributes(); 3721 int attrCount = gpu->glCaps().maxVertexAttributes();
3430 if (fDefaultVertexArrayAttribState.count() != attrCount) { 3722 if (fDefaultVertexArrayAttribState.count() != attrCount) {
3431 fDefaultVertexArrayAttribState.resize(attrCount); 3723 fDefaultVertexArrayAttribState.resize(attrCount);
3432 } 3724 }
3433 attribState = &fDefaultVertexArrayAttribState; 3725 attribState = &fDefaultVertexArrayAttribState;
3434 } 3726 }
3435 return attribState; 3727 return attribState;
3436 } 3728 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698