Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(48)

Side by Side Diff: src/gpu/gl/GrGLGpu.cpp

Issue 1534123003: More framework support for TransferBuffers (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Address more comments Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/gl/GrGLGpu.h ('k') | src/gpu/gl/GrGLTransferBuffer.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2011 Google Inc. 2 * Copyright 2011 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 8
9 #include "GrGLGpu.h" 9 #include "GrGLGpu.h"
10 #include "GrGLGLSL.h" 10 #include "GrGLGLSL.h"
(...skipping 575 matching lines...) Expand 10 before | Expand all | Expand 10 after
586 } 586 }
587 587
588 if (!this->glCaps().unpackFlipYSupport() && 588 if (!this->glCaps().unpackFlipYSupport() &&
589 kBottomLeft_GrSurfaceOrigin == dstSurface->origin()) { 589 kBottomLeft_GrSurfaceOrigin == dstSurface->origin()) {
590 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference); 590 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
591 } 591 }
592 592
593 return true; 593 return true;
594 } 594 }
595 595
596 bool GrGLGpu::onWritePixels(GrSurface* surface, 596 static bool check_write_and_transfer_input(GrGLTexture* glTex, GrSurface* surfac e,
597 int left, int top, int width, int height, 597 GrPixelConfig config) {
598 GrPixelConfig config, const void* buffer,
599 size_t rowBytes) {
600 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
601 if (!glTex) { 598 if (!glTex) {
602 return false; 599 return false;
603 } 600 }
604 601
605 // OpenGL doesn't do sRGB <-> linear conversions when reading and writing pi xels. 602 // OpenGL doesn't do sRGB <-> linear conversions when reading and writing pi xels.
606 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) { 603 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
607 return false; 604 return false;
608 } 605 }
609 606
610 // Write pixels is only implemented for TEXTURE_2D textures 607 // Write or transfer of pixels is only implemented for TEXTURE_2D textures
611 if (GR_GL_TEXTURE_2D != glTex->target()) { 608 if (GR_GL_TEXTURE_2D != glTex->target()) {
612 return false; 609 return false;
613 } 610 }
614 611
612 return true;
613 }
614
615 bool GrGLGpu::onWritePixels(GrSurface* surface,
616 int left, int top, int width, int height,
617 GrPixelConfig config, const void* buffer,
618 size_t rowBytes) {
619 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
620
621 if (!check_write_and_transfer_input(glTex, surface, config)) {
622 return false;
623 }
624
615 this->setScratchTextureUnit(); 625 this->setScratchTextureUnit();
616 GL_CALL(BindTexture(glTex->target(), glTex->textureID())); 626 GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
617 627
618 bool success = false; 628 bool success = false;
619 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) { 629 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
620 // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixel s() 630 // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixel s()
621 SkASSERT(config == glTex->desc().fConfig); 631 SkASSERT(config == glTex->desc().fConfig);
622 success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), buffer, false, left, 632 success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), buffer,
623 top, width, height); 633 kWrite_UploadType, left, top, wi dth, height);
624 } else { 634 } else {
625 success = this->uploadTexData(glTex->desc(), glTex->target(), false, lef t, top, width, 635 success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_Upl oadType,
626 height, config, buffer, rowBytes); 636 left, top, width, height, config, buffer, rowBytes);
627 } 637 }
628 638
629 if (success) { 639 if (success) {
630 glTex->texturePriv().dirtyMipMaps(true); 640 glTex->texturePriv().dirtyMipMaps(true);
631 return true; 641 return true;
632 } 642 }
633 643
634 return false; 644 return false;
635 } 645 }
636 646
647 bool GrGLGpu::onTransferPixels(GrSurface* surface,
648 int left, int top, int width, int height,
649 GrPixelConfig config, GrTransferBuffer* buffer,
650 size_t offset, size_t rowBytes) {
651 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
652
653 if (!check_write_and_transfer_input(glTex, surface, config)) {
654 return false;
655 }
656
657 // For the moment, can't transfer compressed data
658 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
659 return false;
660 }
661
662 this->setScratchTextureUnit();
663 GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
664
665 SkASSERT(!buffer->isMapped());
666 GrGLTransferBuffer* glBuffer = reinterpret_cast<GrGLTransferBuffer*>(buffer) ;
667 // bind the transfer buffer
668 SkASSERT(GR_GL_PIXEL_UNPACK_BUFFER == glBuffer->bufferType() ||
669 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == glBuffer->bufferType ());
670 GL_CALL(BindBuffer(glBuffer->bufferType(), glBuffer->bufferID()));
671
672 bool success = false;
673 success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_Uplo adType,
674 left, top, width, height, config, buffer, rowB ytes);
675
676 if (success) {
677 glTex->texturePriv().dirtyMipMaps(true);
678 return true;
679 }
680
681 return false;
682 }
683
637 // For GL_[UN]PACK_ALIGNMENT. 684 // For GL_[UN]PACK_ALIGNMENT.
638 static inline GrGLint config_alignment(GrPixelConfig config) { 685 static inline GrGLint config_alignment(GrPixelConfig config) {
639 SkASSERT(!GrPixelConfigIsCompressed(config)); 686 SkASSERT(!GrPixelConfigIsCompressed(config));
640 switch (config) { 687 switch (config) {
641 case kAlpha_8_GrPixelConfig: 688 case kAlpha_8_GrPixelConfig:
642 return 1; 689 return 1;
643 case kRGB_565_GrPixelConfig: 690 case kRGB_565_GrPixelConfig:
644 case kRGBA_4444_GrPixelConfig: 691 case kRGBA_4444_GrPixelConfig:
645 case kAlpha_half_GrPixelConfig: 692 case kAlpha_half_GrPixelConfig:
646 case kRGBA_half_GrPixelConfig: 693 case kRGBA_half_GrPixelConfig:
(...skipping 12 matching lines...) Expand all
659 const GrGLInterface* interface) { 706 const GrGLInterface* interface) {
660 if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) { 707 if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) {
661 return GR_GL_GET_ERROR(interface); 708 return GR_GL_GET_ERROR(interface);
662 } else { 709 } else {
663 return CHECK_ALLOC_ERROR(interface); 710 return CHECK_ALLOC_ERROR(interface);
664 } 711 }
665 } 712 }
666 713
667 bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc, 714 bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
668 GrGLenum target, 715 GrGLenum target,
669 bool isNewTexture, 716 UploadType uploadType,
670 int left, int top, int width, int height, 717 int left, int top, int width, int height,
671 GrPixelConfig dataConfig, 718 GrPixelConfig dataConfig,
672 const void* data, 719 const void* dataOrOffset,
673 size_t rowBytes) { 720 size_t rowBytes) {
674 SkASSERT(data || isNewTexture); 721 SkASSERT(dataOrOffset || kNewTexture_UploadType == uploadType ||
722 kTransfer_UploadType == uploadType);
675 723
676 // If we're uploading compressed data then we should be using uploadCompress edTexData 724 // If we're uploading compressed data then we should be using uploadCompress edTexData
677 SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); 725 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
678 726
679 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig)); 727 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
680 728
681 size_t bpp = GrBytesPerPixel(dataConfig); 729 size_t bpp = GrBytesPerPixel(dataConfig);
682 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, & left, &top, 730 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, & left, &top,
683 &width, &height, &data, &rowBytes )) { 731 &width, &height, &dataOrOffset, & rowBytes)) {
684 return false; 732 return false;
685 } 733 }
686 size_t trimRowBytes = width * bpp; 734 size_t trimRowBytes = width * bpp;
687 735
688 // in case we need a temporary, trimmed copy of the src pixels 736 // in case we need a temporary, trimmed copy of the src pixels
689 #if defined(GOOGLE3) 737 #if defined(GOOGLE3)
690 // Stack frame size is limited in GOOGLE3. 738 // Stack frame size is limited in GOOGLE3.
691 SkAutoSMalloc<64 * 128> tempStorage; 739 SkAutoSMalloc<64 * 128> tempStorage;
692 #else 740 #else
693 SkAutoSMalloc<128 * 128> tempStorage; 741 SkAutoSMalloc<128 * 128> tempStorage;
(...skipping 10 matching lines...) Expand all
704 752
705 /* 753 /*
706 * Check whether to allocate a temporary buffer for flipping y or 754 * Check whether to allocate a temporary buffer for flipping y or
707 * because our srcData has extra bytes past each row. If so, we need 755 * because our srcData has extra bytes past each row. If so, we need
708 * to trim those off here, since GL ES may not let us specify 756 * to trim those off here, since GL ES may not let us specify
709 * GL_UNPACK_ROW_LENGTH. 757 * GL_UNPACK_ROW_LENGTH.
710 */ 758 */
711 bool restoreGLRowLength = false; 759 bool restoreGLRowLength = false;
712 bool swFlipY = false; 760 bool swFlipY = false;
713 bool glFlipY = false; 761 bool glFlipY = false;
714 if (data) { 762 if (dataOrOffset) {
715 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { 763 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
716 if (this->glCaps().unpackFlipYSupport()) { 764 if (this->glCaps().unpackFlipYSupport()) {
717 glFlipY = true; 765 glFlipY = true;
718 } else { 766 } else {
719 swFlipY = true; 767 swFlipY = true;
720 } 768 }
721 } 769 }
722 if (this->glCaps().unpackRowLengthSupport() && !swFlipY) { 770 if (this->glCaps().unpackRowLengthSupport() && !swFlipY) {
723 // can't use this for flipping, only non-neg values allowed. :( 771 // can't use this for flipping, only non-neg values allowed. :(
724 if (rowBytes != trimRowBytes) { 772 if (rowBytes != trimRowBytes) {
725 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp); 773 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
726 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength)); 774 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
727 restoreGLRowLength = true; 775 restoreGLRowLength = true;
728 } 776 }
729 } else { 777 } else if (kTransfer_UploadType != uploadType) {
730 if (trimRowBytes != rowBytes || swFlipY) { 778 if (trimRowBytes != rowBytes || swFlipY) {
731 // copy data into our new storage, skipping the trailing bytes 779 // copy data into our new storage, skipping the trailing bytes
732 size_t trimSize = height * trimRowBytes; 780 size_t trimSize = height * trimRowBytes;
733 const char* src = (const char*)data; 781 const char* src = (const char*)dataOrOffset;
734 if (swFlipY) { 782 if (swFlipY) {
735 src += (height - 1) * rowBytes; 783 src += (height - 1) * rowBytes;
736 } 784 }
737 char* dst = (char*)tempStorage.reset(trimSize); 785 char* dst = (char*)tempStorage.reset(trimSize);
738 for (int y = 0; y < height; y++) { 786 for (int y = 0; y < height; y++) {
739 memcpy(dst, src, trimRowBytes); 787 memcpy(dst, src, trimRowBytes);
740 if (swFlipY) { 788 if (swFlipY) {
741 src -= rowBytes; 789 src -= rowBytes;
742 } else { 790 } else {
743 src += rowBytes; 791 src += rowBytes;
744 } 792 }
745 dst += trimRowBytes; 793 dst += trimRowBytes;
746 } 794 }
747 // now point data to our copied version 795 // now point data to our copied version
748 data = tempStorage.get(); 796 dataOrOffset = tempStorage.get();
749 } 797 }
798 } else {
799 return false;
750 } 800 }
751 if (glFlipY) { 801 if (glFlipY) {
752 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE)); 802 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
753 } 803 }
754 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(dataConfig) )); 804 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(dataConfig) ));
755 } 805 }
756 bool succeeded = true; 806 bool succeeded = true;
757 if (isNewTexture) { 807 if (kNewTexture_UploadType == uploadType) {
758 if (data && !(0 == left && 0 == top && desc.fWidth == width && desc.fHei ght == height)) { 808 if (dataOrOffset &&
809 !(0 == left && 0 == top && desc.fWidth == width && desc.fHeight == h eight)) {
759 succeeded = false; 810 succeeded = false;
760 } else { 811 } else {
761 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 812 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
762 GL_ALLOC_CALL(this->glInterface(), TexImage2D(target, 0, internalFor mat, desc.fWidth, 813 GL_ALLOC_CALL(this->glInterface(), TexImage2D(target, 0, internalFor mat, desc.fWidth,
763 desc.fHeight, 0, exte rnalFormat, 814 desc.fHeight, 0, exte rnalFormat,
764 externalType, data)); 815 externalType, dataOrOf fset));
765 GrGLenum error = check_alloc_error(desc, this->glInterface()); 816 GrGLenum error = check_alloc_error(desc, this->glInterface());
766 if (error != GR_GL_NO_ERROR) { 817 if (error != GR_GL_NO_ERROR) {
767 succeeded = false; 818 succeeded = false;
768 } 819 }
769 } 820 }
770 } else { 821 } else {
771 if (swFlipY || glFlipY) { 822 if (swFlipY || glFlipY) {
772 top = desc.fHeight - (top + height); 823 top = desc.fHeight - (top + height);
773 } 824 }
774 GL_CALL(TexSubImage2D(target, 825 GL_CALL(TexSubImage2D(target,
775 0, // level 826 0, // level
776 left, top, 827 left, top,
777 width, height, 828 width, height,
778 externalFormat, externalType, data)); 829 externalFormat, externalType, dataOrOffset));
779 } 830 }
780 831
781 if (restoreGLRowLength) { 832 if (restoreGLRowLength) {
782 SkASSERT(this->glCaps().unpackRowLengthSupport()); 833 SkASSERT(this->glCaps().unpackRowLengthSupport());
783 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); 834 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
784 } 835 }
785 if (glFlipY) { 836 if (glFlipY) {
786 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE)); 837 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
787 } 838 }
788 return succeeded; 839 return succeeded;
789 } 840 }
790 841
791 // TODO: This function is using a lot of wonky semantics like, if width == -1 842 // TODO: This function is using a lot of wonky semantics like, if width == -1
792 // then set width = desc.fWdith ... blah. A better way to do it might be to 843 // then set width = desc.fWdith ... blah. A better way to do it might be to
793 // create a CompressedTexData struct that takes a desc/ptr and figures out 844 // create a CompressedTexData struct that takes a desc/ptr and figures out
794 // the proper upload semantics. Then users can construct this function how they 845 // the proper upload semantics. Then users can construct this function how they
795 // see fit if they want to go against the "standard" way to do it. 846 // see fit if they want to go against the "standard" way to do it.
796 bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc, 847 bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
797 GrGLenum target, 848 GrGLenum target,
798 const void* data, 849 const void* data,
799 bool isNewTexture, 850 UploadType uploadType,
800 int left, int top, int width, int height) { 851 int left, int top, int width, int height) {
801 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig)); 852 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
802 SkASSERT(data || isNewTexture); 853 SkASSERT(kTransfer_UploadType != uploadType &&
854 (data || kNewTexture_UploadType != uploadType));
803 855
804 // No support for software flip y, yet... 856 // No support for software flip y, yet...
805 SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin); 857 SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin);
806 858
807 if (-1 == width) { 859 if (-1 == width) {
808 width = desc.fWidth; 860 width = desc.fWidth;
809 } 861 }
810 #ifdef SK_DEBUG 862 #ifdef SK_DEBUG
811 else { 863 else {
812 SkASSERT(width <= desc.fWidth); 864 SkASSERT(width <= desc.fWidth);
(...skipping 10 matching lines...) Expand all
823 #endif 875 #endif
824 876
825 // Make sure that the width and height that we pass to OpenGL 877 // Make sure that the width and height that we pass to OpenGL
826 // is a multiple of the block size. 878 // is a multiple of the block size.
827 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height); 879 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height);
828 880
829 // We only need the internal format for compressed 2D textures. There is on 881 // We only need the internal format for compressed 2D textures. There is on
830 // sized vs base internal format distinction for compressed textures. 882 // sized vs base internal format distinction for compressed textures.
831 GrGLenum internalFormat =this->glCaps().configGLFormats(desc.fConfig).fSized InternalFormat; 883 GrGLenum internalFormat =this->glCaps().configGLFormats(desc.fConfig).fSized InternalFormat;
832 884
833 if (isNewTexture) { 885 if (kNewTexture_UploadType == uploadType) {
834 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 886 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
835 GL_ALLOC_CALL(this->glInterface(), 887 GL_ALLOC_CALL(this->glInterface(),
836 CompressedTexImage2D(target, 888 CompressedTexImage2D(target,
837 0, // level 889 0, // level
838 internalFormat, 890 internalFormat,
839 width, height, 891 width, height,
840 0, // border 892 0, // border
841 SkToInt(dataSize), 893 SkToInt(dataSize),
842 data)); 894 data));
843 GrGLenum error = check_alloc_error(desc, this->glInterface()); 895 GrGLenum error = check_alloc_error(desc, this->glInterface());
(...skipping 225 matching lines...) Expand 10 before | Expand all | Expand 10 after
1069 initialTexParams.fMagFilter)); 1121 initialTexParams.fMagFilter));
1070 GL_CALL(TexParameteri(idDesc.fInfo.fTarget, 1122 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1071 GR_GL_TEXTURE_MIN_FILTER, 1123 GR_GL_TEXTURE_MIN_FILTER,
1072 initialTexParams.fMinFilter)); 1124 initialTexParams.fMinFilter));
1073 GL_CALL(TexParameteri(idDesc.fInfo.fTarget, 1125 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1074 GR_GL_TEXTURE_WRAP_S, 1126 GR_GL_TEXTURE_WRAP_S,
1075 initialTexParams.fWrapS)); 1127 initialTexParams.fWrapS));
1076 GL_CALL(TexParameteri(idDesc.fInfo.fTarget, 1128 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1077 GR_GL_TEXTURE_WRAP_T, 1129 GR_GL_TEXTURE_WRAP_T,
1078 initialTexParams.fWrapT)); 1130 initialTexParams.fWrapT));
1079 if (!this->uploadTexData(desc, idDesc.fInfo.fTarget, true, 0, 0, 1131 if (!this->uploadTexData(desc, idDesc.fInfo.fTarget, kNewTexture_UploadType, 0, 0,
1080 desc.fWidth, desc.fHeight, 1132 desc.fWidth, desc.fHeight,
1081 desc.fConfig, srcData, rowBytes)) { 1133 desc.fConfig, srcData, rowBytes)) {
1082 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID)); 1134 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
1083 return return_null_texture(); 1135 return return_null_texture();
1084 } 1136 }
1085 1137
1086 GrGLTexture* tex; 1138 GrGLTexture* tex;
1087 if (renderTarget) { 1139 if (renderTarget) {
1088 // unbind the texture from the texture unit before binding it to the fra me buffer 1140 // unbind the texture from the texture unit before binding it to the fra me buffer
1089 GL_CALL(BindTexture(idDesc.fInfo.fTarget, 0)); 1141 GL_CALL(BindTexture(idDesc.fInfo.fTarget, 0));
(...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after
1431 GrGLTransferBuffer::Desc desc; 1483 GrGLTransferBuffer::Desc desc;
1432 bool toGpu = (kCpuToGpu_TransferType == xferType); 1484 bool toGpu = (kCpuToGpu_TransferType == xferType);
1433 desc.fUsage = toGpu ? GrGLBufferImpl::kStreamDraw_Usage : GrGLBufferImpl::kS treamRead_Usage; 1485 desc.fUsage = toGpu ? GrGLBufferImpl::kStreamDraw_Usage : GrGLBufferImpl::kS treamRead_Usage;
1434 1486
1435 desc.fSizeInBytes = size; 1487 desc.fSizeInBytes = size;
1436 desc.fID = 0; 1488 desc.fID = 0;
1437 GL_CALL(GenBuffers(1, &desc.fID)); 1489 GL_CALL(GenBuffers(1, &desc.fID));
1438 if (desc.fID) { 1490 if (desc.fID) {
1439 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface()); 1491 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1440 // make sure driver can allocate memory for this bmapuffer 1492 // make sure driver can allocate memory for this bmapuffer
1441 GrGLenum type; 1493 GrGLenum target;
1442 if (GrGLCaps::kChromium_TransferBufferType == xferBufferType) { 1494 if (GrGLCaps::kChromium_TransferBufferType == xferBufferType) {
1443 type = toGpu ? GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM 1495 target = toGpu ? GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM
1444 : GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM; 1496 : GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
1445 } else { 1497 } else {
1446 SkASSERT(GrGLCaps::kPBO_TransferBufferType == xferBufferType); 1498 SkASSERT(GrGLCaps::kPBO_TransferBufferType == xferBufferType);
1447 type = toGpu ? GR_GL_PIXEL_UNPACK_BUFFER : GR_GL_PIXEL_PACK_BUFFER; 1499 target = toGpu ? GR_GL_PIXEL_UNPACK_BUFFER : GR_GL_PIXEL_PACK_BUFFER ;
1448 } 1500 }
1449 GL_ALLOC_CALL(this->glInterface(), 1501 GL_CALL(BindBuffer(target, desc.fID));
1450 BufferData(type, 1502 GL_ALLOC_CALL(this->glInterface(),
1503 BufferData(target,
1451 (GrGLsizeiptr) desc.fSizeInBytes, 1504 (GrGLsizeiptr) desc.fSizeInBytes,
1452 nullptr, // data ptr 1505 nullptr, // data ptr
1453 (toGpu ? GR_GL_STREAM_DRAW : GR_GL_STREAM_READ) )); 1506 (toGpu ? GR_GL_STREAM_DRAW : GR_GL_STREAM_READ) ));
1454 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) { 1507 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
1455 GL_CALL(DeleteBuffers(1, &desc.fID)); 1508 GL_CALL(DeleteBuffers(1, &desc.fID));
1456 return nullptr; 1509 return nullptr;
1457 } 1510 }
1458 GrTransferBuffer* transferBuffer = new GrGLTransferBuffer(this, desc, ty pe); 1511 GrTransferBuffer* transferBuffer = new GrGLTransferBuffer(this, desc, ta rget);
1459 return transferBuffer; 1512 return transferBuffer;
1460 } 1513 }
1461 1514
1462 return nullptr; 1515 return nullptr;
1463 } 1516 }
1464 1517
1465 void GrGLGpu::flushScissor(const GrScissorState& scissorState, 1518 void GrGLGpu::flushScissor(const GrScissorState& scissorState,
1466 const GrGLIRect& rtViewport, 1519 const GrGLIRect& rtViewport,
1467 GrSurfaceOrigin rtOrigin) { 1520 GrSurfaceOrigin rtOrigin) {
1468 if (scissorState.enabled()) { 1521 if (scissorState.enabled()) {
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after
1648 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage)); 1701 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
1649 } 1702 }
1650 GL_CALL_RET(mapPtr, MapBuffer(type, readOnly ? GR_GL_READ_ONLY : GR_ GL_WRITE_ONLY)); 1703 GL_CALL_RET(mapPtr, MapBuffer(type, readOnly ? GR_GL_READ_ONLY : GR_ GL_WRITE_ONLY));
1651 break; 1704 break;
1652 case GrGLCaps::kMapBufferRange_MapBufferType: { 1705 case GrGLCaps::kMapBufferRange_MapBufferType: {
1653 this->bindBuffer(id, type); 1706 this->bindBuffer(id, type);
1654 // Make sure the GL buffer size agrees with fDesc before mapping. 1707 // Make sure the GL buffer size agrees with fDesc before mapping.
1655 if (currentSize != requestedSize) { 1708 if (currentSize != requestedSize) {
1656 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage)); 1709 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
1657 } 1710 }
1658 static const GrGLbitfield kWriteAccess = GR_GL_MAP_INVALIDATE_BUFFER _BIT | 1711 GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
1659 GR_GL_MAP_WRITE_BIT; 1712 // TODO: allow the client to specify invalidation in the stream draw case
1713 if (GrGLBufferImpl::kStreamDraw_Usage != usage) {
1714 writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
1715 }
1660 GL_CALL_RET(mapPtr, MapBufferRange(type, 0, requestedSize, readOnly ? 1716 GL_CALL_RET(mapPtr, MapBufferRange(type, 0, requestedSize, readOnly ?
1661 GR_GL_MAP _READ_BIT : 1717 GR_GL_MAP _READ_BIT :
1662 kWriteAcc ess)); 1718 writeAcce ss));
1663 break; 1719 break;
1664 } 1720 }
1665 case GrGLCaps::kChromium_MapBufferType: 1721 case GrGLCaps::kChromium_MapBufferType:
1666 this->bindBuffer(id, type); 1722 this->bindBuffer(id, type);
1667 // Make sure the GL buffer size agrees with fDesc before mapping. 1723 // Make sure the GL buffer size agrees with fDesc before mapping.
1668 if (currentSize != requestedSize) { 1724 if (currentSize != requestedSize) {
1669 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage)); 1725 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
1670 } 1726 }
1671 GL_CALL_RET(mapPtr, MapBufferSubData(type, 0, requestedSize, readOnl y ? 1727 GL_CALL_RET(mapPtr, MapBufferSubData(type, 0, requestedSize, readOnl y ?
1672 GR_GL_R EAD_ONLY : 1728 GR_GL_R EAD_ONLY :
(...skipping 1699 matching lines...) Expand 10 before | Expand all | Expand 10 after
3372 this->setVertexArrayID(gpu, 0); 3428 this->setVertexArrayID(gpu, 0);
3373 } 3429 }
3374 int attrCount = gpu->glCaps().maxVertexAttributes(); 3430 int attrCount = gpu->glCaps().maxVertexAttributes();
3375 if (fDefaultVertexArrayAttribState.count() != attrCount) { 3431 if (fDefaultVertexArrayAttribState.count() != attrCount) {
3376 fDefaultVertexArrayAttribState.resize(attrCount); 3432 fDefaultVertexArrayAttribState.resize(attrCount);
3377 } 3433 }
3378 attribState = &fDefaultVertexArrayAttribState; 3434 attribState = &fDefaultVertexArrayAttribState;
3379 } 3435 }
3380 return attribState; 3436 return attribState;
3381 } 3437 }
OLDNEW
« no previous file with comments | « src/gpu/gl/GrGLGpu.h ('k') | src/gpu/gl/GrGLTransferBuffer.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698