Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(22)

Side by Side Diff: src/gpu/vk/GrVkGpu.cpp

Issue 1906623002: Update min Vulkan version to 1.0.8.0, and fix various bugs (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: rebase Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/gpu/vk/GrVkCommandBuffer.cpp ('k') | src/gpu/vk/GrVkImage.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2015 Google Inc. 2 * Copyright 2015 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GrVkGpu.h" 8 #include "GrVkGpu.h"
9 9
10 #include "GrContextOptions.h" 10 #include "GrContextOptions.h"
(...skipping 601 matching lines...) Expand 10 before | Expand all | Expand 10 after
612 return nullptr; 612 return nullptr;
613 } 613 }
614 } 614 }
615 return tgt; 615 return tgt;
616 } 616 }
617 617
618 //////////////////////////////////////////////////////////////////////////////// 618 ////////////////////////////////////////////////////////////////////////////////
619 619
620 void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc, 620 void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
621 const GrNonInstancedMesh& mesh) { 621 const GrNonInstancedMesh& mesh) {
622 // There is no need to put any memory barriers to make sure host writes have finished here.
623 // When a command buffer is submitted to a queue, there is an implicit memor y barrier that
624 // occurs for all host writes. Additionally, BufferMemoryBarriers are not al lowed inside of
625 // an active RenderPass.
622 GrVkVertexBuffer* vbuf; 626 GrVkVertexBuffer* vbuf;
623 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer(); 627 vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer();
624 SkASSERT(vbuf); 628 SkASSERT(vbuf);
625 SkASSERT(!vbuf->isMapped()); 629 SkASSERT(!vbuf->isMapped());
626 630
627 vbuf->addMemoryBarrier(this,
628 VK_ACCESS_HOST_WRITE_BIT,
629 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
630 VK_PIPELINE_STAGE_HOST_BIT,
631 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
632 false);
633
634 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf); 631 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
635 632
636 if (mesh.isIndexed()) { 633 if (mesh.isIndexed()) {
637 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer(); 634 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)mesh.indexBuffer();
638 SkASSERT(ibuf); 635 SkASSERT(ibuf);
639 SkASSERT(!ibuf->isMapped()); 636 SkASSERT(!ibuf->isMapped());
640 637
641 ibuf->addMemoryBarrier(this,
642 VK_ACCESS_HOST_WRITE_BIT,
643 VK_ACCESS_INDEX_READ_BIT,
644 VK_PIPELINE_STAGE_HOST_BIT,
645 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
646 false);
647
648 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf); 638 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
649 } 639 }
650 } 640 }
651 641
652 //////////////////////////////////////////////////////////////////////////////// 642 ////////////////////////////////////////////////////////////////////////////////
653 643
654 GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRen derTarget* rt, 644 GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRen derTarget* rt,
655 int width, 645 int width,
656 int height) { 646 int height) {
657 SkASSERT(width >= rt->width()); 647 SkASSERT(width >= rt->width());
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
776 // TODO: Add support for copying to optimal tiling 766 // TODO: Add support for copying to optimal tiling
777 SkASSERT(false); 767 SkASSERT(false);
778 } 768 }
779 } 769 }
780 770
781 GrVkTextureInfo* info = new GrVkTextureInfo; 771 GrVkTextureInfo* info = new GrVkTextureInfo;
782 info->fImage = image; 772 info->fImage = image;
783 info->fAlloc = alloc; 773 info->fAlloc = alloc;
784 info->fImageTiling = imageTiling; 774 info->fImageTiling = imageTiling;
785 info->fImageLayout = initialLayout; 775 info->fImageLayout = initialLayout;
776 info->fFormat = pixelFormat;
786 777
787 return (GrBackendObject)info; 778 return (GrBackendObject)info;
788 } 779 }
789 780
790 bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const { 781 bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
791 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id ); 782 const GrVkTextureInfo* backend = reinterpret_cast<const GrVkTextureInfo*>(id );
792 783
793 if (backend && backend->fImage && backend->fAlloc) { 784 if (backend && backend->fImage && backend->fAlloc) {
794 VkMemoryRequirements req; 785 VkMemoryRequirements req;
795 memset(&req, 0, sizeof(req)); 786 memset(&req, 0, sizeof(req));
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
926 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue)); 917 memset(&vkStencilColor, 0, sizeof(VkClearDepthStencilValue));
927 if (insideClip) { 918 if (insideClip) {
928 vkStencilColor.stencil = (1 << (stencilBitCount - 1)); 919 vkStencilColor.stencil = (1 << (stencilBitCount - 1));
929 } else { 920 } else {
930 vkStencilColor.stencil = 0; 921 vkStencilColor.stencil = 0;
931 } 922 }
932 923
933 VkImageLayout origDstLayout = vkStencil->currentLayout(); 924 VkImageLayout origDstLayout = vkStencil->currentLayout();
934 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayou t); 925 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayou t);
935 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; 926 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
936 VkPipelineStageFlags srcStageMask = 927 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(o rigDstLayout);
937 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
938 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; 928 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
939 vkStencil->setImageLayout(this, 929 vkStencil->setImageLayout(this,
940 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, 930 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
941 srcAccessMask, 931 srcAccessMask,
942 dstAccessMask, 932 dstAccessMask,
943 srcStageMask, 933 srcStageMask,
944 dstStageMask, 934 dstStageMask,
945 false); 935 false);
946 936
937 // Change layout of our render target so it can be used as the color attachm ent. This is what
938 // the render pass expects when it begins.
939 VkImageLayout layout = vkRT->currentLayout();
940 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
941 dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
942 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
943 dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
944 vkRT->setImageLayout(this,
945 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
946 srcAccessMask,
947 dstAccessMask,
948 srcStageMask,
949 dstStageMask,
950 false);
951
947 VkClearRect clearRect; 952 VkClearRect clearRect;
948 // Flip rect if necessary 953 // Flip rect if necessary
949 SkIRect vkRect = rect; 954 SkIRect vkRect = rect;
950 955
951 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) { 956 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
952 vkRect.fTop = vkRT->height() - rect.fBottom; 957 vkRect.fTop = vkRT->height() - rect.fBottom;
953 vkRect.fBottom = vkRT->height() - rect.fTop; 958 vkRect.fBottom = vkRT->height() - rect.fTop;
954 } 959 }
955 960
956 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop }; 961 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
(...skipping 26 matching lines...) Expand all
983 988
984 VkClearColorValue vkColor; 989 VkClearColorValue vkColor;
985 GrColorToRGBAFloat(color, vkColor.float32); 990 GrColorToRGBAFloat(color, vkColor.float32);
986 991
987 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target); 992 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target);
988 VkImageLayout origDstLayout = vkRT->currentLayout(); 993 VkImageLayout origDstLayout = vkRT->currentLayout();
989 994
990 if (rect.width() != target->width() || rect.height() != target->height()) { 995 if (rect.width() != target->width() || rect.height() != target->height()) {
991 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstL ayout); 996 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstL ayout);
992 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; 997 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
993 VkPipelineStageFlags srcStageMask = 998 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFla gs(origDstLayout);
994 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
995 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; 999 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
996 vkRT->setImageLayout(this, 1000 vkRT->setImageLayout(this,
997 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 1001 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
998 srcAccessMask, 1002 srcAccessMask,
999 dstAccessMask, 1003 dstAccessMask,
1000 srcStageMask, 1004 srcStageMask,
1001 dstStageMask, 1005 dstStageMask,
1002 false); 1006 false);
1003 1007
1008 // If we are using a stencil attachment we also need to change its layou t to what the render
1009 // pass is expecting.
1010 if (GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAt tachment()) {
1011 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1012 origDstLayout = vkStencil->currentLayout();
1013 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
1014 dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
1015 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
1016 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout) ;
1017 dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1018 vkStencil->setImageLayout(this,
1019 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_O PTIMAL,
1020 srcAccessMask,
1021 dstAccessMask,
1022 srcStageMask,
1023 dstStageMask,
1024 false);
1025 }
1026
1004 VkClearRect clearRect; 1027 VkClearRect clearRect;
1005 // Flip rect if necessary 1028 // Flip rect if necessary
1006 SkIRect vkRect = rect; 1029 SkIRect vkRect = rect;
1007 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) { 1030 if (kBottomLeft_GrSurfaceOrigin == vkRT->origin()) {
1008 vkRect.fTop = vkRT->height() - rect.fBottom; 1031 vkRect.fTop = vkRT->height() - rect.fBottom;
1009 vkRect.fBottom = vkRT->height() - rect.fTop; 1032 vkRect.fBottom = vkRT->height() - rect.fTop;
1010 } 1033 }
1011 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop }; 1034 clearRect.rect.offset = { vkRect.fLeft, vkRect.fTop };
1012 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.hei ght() }; 1035 clearRect.rect.extent = { (uint32_t)vkRect.width(), (uint32_t)vkRect.hei ght() };
1013 clearRect.baseArrayLayer = 0; 1036 clearRect.baseArrayLayer = 0;
(...skipping 462 matching lines...) Expand 10 before | Expand all | Expand 10 after
1476 const GrMesh* meshes, 1499 const GrMesh* meshes,
1477 int meshCount) { 1500 int meshCount) {
1478 if (!meshCount) { 1501 if (!meshCount) {
1479 return; 1502 return;
1480 } 1503 }
1481 GrRenderTarget* rt = pipeline.getRenderTarget(); 1504 GrRenderTarget* rt = pipeline.getRenderTarget();
1482 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt); 1505 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt);
1483 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass(); 1506 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
1484 SkASSERT(renderPass); 1507 SkASSERT(renderPass);
1485 1508
1486 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1487 1509
1488 GrPrimitiveType primitiveType = meshes[0].primitiveType(); 1510 GrPrimitiveType primitiveType = meshes[0].primitiveType();
1489 sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline, 1511 sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline,
1490 primProc, 1512 primProc,
1491 primitiveTyp e, 1513 primitiveTyp e,
1492 *renderPass) ; 1514 *renderPass) ;
1493 if (!pipelineState) { 1515 if (!pipelineState) {
1494 return; 1516 return;
1495 } 1517 }
1496 1518
1497 // Change layout of our render target so it can be used as the color attachm ent 1519 // Change layout of our render target so it can be used as the color attachm ent
1498 VkImageLayout layout = vkRT->currentLayout(); 1520 VkImageLayout layout = vkRT->currentLayout();
1499 // Our color attachment is purely a destination and won't be read so don't n eed to flush or
1500 // invalidate any caches
1501 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(l ayout); 1521 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(l ayout);
1502 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; 1522 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1503 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout); 1523 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
1504 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; 1524 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1505 vkRT->setImageLayout(this, 1525 vkRT->setImageLayout(this,
1506 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 1526 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
1507 srcAccessMask, 1527 srcAccessMask,
1508 dstAccessMask, 1528 dstAccessMask,
1509 srcStageMask, 1529 srcStageMask,
1510 dstStageMask, 1530 dstStageMask,
1511 false); 1531 false);
1512 1532
1513 // If we are using a stencil attachment we also need to update its layout 1533 // If we are using a stencil attachment we also need to update its layout
1514 if (!pipeline.getStencil().isDisabled()) { 1534 if (GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttach ment()) {
1515 GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttach ment();
1516 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil; 1535 GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
1517 VkImageLayout origDstLayout = vkStencil->currentLayout(); 1536 VkImageLayout origDstLayout = vkStencil->currentLayout();
1518 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstL ayout); 1537 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstL ayout);
1519 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_B IT | 1538 VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_B IT |
1520 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; 1539 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
1521 VkPipelineStageFlags srcStageMask = 1540 VkPipelineStageFlags srcStageMask =
1522 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout); 1541 GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
1523 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; 1542 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
1524 vkStencil->setImageLayout(this, 1543 vkStencil->setImageLayout(this,
1525 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIM AL, 1544 VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIM AL,
1526 srcAccessMask, 1545 srcAccessMask,
1527 dstAccessMask, 1546 dstAccessMask,
1528 srcStageMask, 1547 srcStageMask,
1529 dstStageMask, 1548 dstStageMask,
1530 false); 1549 false);
1531 } 1550 }
1532 1551
1552 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1533 1553
1534 for (int i = 0; i < meshCount; ++i) { 1554 for (int i = 0; i < meshCount; ++i) {
1535 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps ())) {
1536 this->xferBarrier(pipeline.getRenderTarget(), barrierType);
1537 }
1538
1539 const GrMesh& mesh = meshes[i]; 1555 const GrMesh& mesh = meshes[i];
1540 GrMesh::Iterator iter; 1556 GrMesh::Iterator iter;
1541 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh); 1557 const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh);
1542 do { 1558 do {
1543 if (nonIdxMesh->primitiveType() != primitiveType) { 1559 if (nonIdxMesh->primitiveType() != primitiveType) {
1544 // Technically we don't have to call this here (since there is a safety check in 1560 // Technically we don't have to call this here (since there is a safety check in
1545 // pipelineState:setData but this will allow for quicker freeing of resources if the 1561 // pipelineState:setData but this will allow for quicker freeing of resources if the
1546 // pipelineState sits in a cache for a while. 1562 // pipelineState sits in a cache for a while.
1547 pipelineState->freeTempResources(this); 1563 pipelineState->freeTempResources(this);
1548 SkDEBUGCODE(pipelineState = nullptr); 1564 SkDEBUGCODE(pipelineState = nullptr);
1549 primitiveType = nonIdxMesh->primitiveType(); 1565 primitiveType = nonIdxMesh->primitiveType();
1566 // It is illegal for us to have the necessary memory barriers fo r when we write and
1567 // update the uniform buffers in prepareDrawState while in an ac tive render pass.
1568 // Thus we must end the current one and then start it up again.
1569 fCurrentCmdBuffer->endRenderPass(this);
1550 pipelineState = this->prepareDrawState(pipeline, 1570 pipelineState = this->prepareDrawState(pipeline,
1551 primProc, 1571 primProc,
1552 primitiveType, 1572 primitiveType,
1553 *renderPass); 1573 *renderPass);
1554 if (!pipelineState) { 1574 if (!pipelineState) {
1555 return; 1575 return;
1556 } 1576 }
1577 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
1557 } 1578 }
1558 SkASSERT(pipelineState); 1579 SkASSERT(pipelineState);
1559 this->bindGeometry(primProc, *nonIdxMesh); 1580 this->bindGeometry(primProc, *nonIdxMesh);
1560 1581
1561 if (nonIdxMesh->isIndexed()) { 1582 if (nonIdxMesh->isIndexed()) {
1562 fCurrentCmdBuffer->drawIndexed(this, 1583 fCurrentCmdBuffer->drawIndexed(this,
1563 nonIdxMesh->indexCount(), 1584 nonIdxMesh->indexCount(),
1564 1, 1585 1,
1565 nonIdxMesh->startIndex(), 1586 nonIdxMesh->startIndex(),
1566 nonIdxMesh->startVertex(), 1587 nonIdxMesh->startVertex(),
(...skipping 23 matching lines...) Expand all
1590 aglSwapBuffers(aglGetCurrentContext()); 1611 aglSwapBuffers(aglGetCurrentContext());
1591 int set_a_break_pt_here = 9; 1612 int set_a_break_pt_here = 9;
1592 aglSwapBuffers(aglGetCurrentContext()); 1613 aglSwapBuffers(aglGetCurrentContext());
1593 #elif defined(SK_BUILD_FOR_WIN32) 1614 #elif defined(SK_BUILD_FOR_WIN32)
1594 SwapBuf(); 1615 SwapBuf();
1595 int set_a_break_pt_here = 9; 1616 int set_a_break_pt_here = 9;
1596 SwapBuf(); 1617 SwapBuf();
1597 #endif 1618 #endif
1598 #endif 1619 #endif
1599 } 1620 }
OLDNEW
« no previous file with comments | « src/gpu/vk/GrVkCommandBuffer.cpp ('k') | src/gpu/vk/GrVkImage.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698