| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrVkGpu.h" | 8 #include "GrVkGpu.h" |
| 9 | 9 |
| 10 #include "GrContextOptions.h" | 10 #include "GrContextOptions.h" |
| (...skipping 313 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 324 VkResult err; | 324 VkResult err; |
| 325 | 325 |
| 326 const GrVkInterface* interface = this->vkInterface(); | 326 const GrVkInterface* interface = this->vkInterface(); |
| 327 | 327 |
| 328 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice, | 328 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice, |
| 329 tex->image(), | 329 tex->image(), |
| 330 &subres, | 330 &subres, |
| 331 &layout)); | 331 &layout)); |
| 332 | 332 |
| 333 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - t
op - height : top; | 333 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - t
op - height : top; |
| 334 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp; | 334 const GrVkAlloc& alloc = tex->alloc(); |
| 335 VkDeviceSize offset = alloc.fOffset + texTop*layout.rowPitch + left*bpp; |
| 335 VkDeviceSize size = height*layout.rowPitch; | 336 VkDeviceSize size = height*layout.rowPitch; |
| 336 void* mapPtr; | 337 void* mapPtr; |
| 337 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->memory(), offset, size,
0, &mapPtr)); | 338 err = GR_VK_CALL(interface, MapMemory(fDevice, alloc.fMemory, offset, size,
0, &mapPtr)); |
| 338 if (err) { | 339 if (err) { |
| 339 return false; | 340 return false; |
| 340 } | 341 } |
| 341 | 342 |
| 342 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { | 343 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { |
| 343 // copy into buffer by rows | 344 // copy into buffer by rows |
| 344 const char* srcRow = reinterpret_cast<const char*>(data); | 345 const char* srcRow = reinterpret_cast<const char*>(data); |
| 345 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPi
tch; | 346 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPi
tch; |
| 346 for (int y = 0; y < height; y++) { | 347 for (int y = 0; y < height; y++) { |
| 347 memcpy(dstRow, srcRow, trimRowBytes); | 348 memcpy(dstRow, srcRow, trimRowBytes); |
| 348 srcRow += rowBytes; | 349 srcRow += rowBytes; |
| 349 dstRow -= layout.rowPitch; | 350 dstRow -= layout.rowPitch; |
| 350 } | 351 } |
| 351 } else { | 352 } else { |
| 352 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch)
we can memcpy | 353 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch)
we can memcpy |
| 353 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) { | 354 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) { |
| 354 memcpy(mapPtr, data, trimRowBytes * height); | 355 memcpy(mapPtr, data, trimRowBytes * height); |
| 355 } else { | 356 } else { |
| 356 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, row
Bytes, | 357 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, row
Bytes, |
| 357 trimRowBytes, height); | 358 trimRowBytes, height); |
| 358 } | 359 } |
| 359 } | 360 } |
| 360 | 361 |
| 361 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->memory())); | 362 GR_VK_CALL(interface, UnmapMemory(fDevice, alloc.fMemory)); |
| 362 | 363 |
| 363 return true; | 364 return true; |
| 364 } | 365 } |
| 365 | 366 |
| 366 bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex, | 367 bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex, |
| 367 int left, int top, int width, int height, | 368 int left, int top, int width, int height, |
| 368 GrPixelConfig dataConfig, | 369 GrPixelConfig dataConfig, |
| 369 const SkTArray<GrMipLevel>& texels) { | 370 const SkTArray<GrMipLevel>& texels) { |
| 370 SkASSERT(!tex->isLinearTiled()); | 371 SkASSERT(!tex->isLinearTiled()); |
| 371 // The assumption is either that we have no mipmaps, or that our rect is the
entire texture | 372 // The assumption is either that we have no mipmaps, or that our rect is the
entire texture |
| (...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 610 if (0 == desc.fTextureHandle) { | 611 if (0 == desc.fTextureHandle) { |
| 611 return nullptr; | 612 return nullptr; |
| 612 } | 613 } |
| 613 | 614 |
| 614 int maxSize = this->caps()->maxTextureSize(); | 615 int maxSize = this->caps()->maxTextureSize(); |
| 615 if (desc.fWidth > maxSize || desc.fHeight > maxSize) { | 616 if (desc.fWidth > maxSize || desc.fHeight > maxSize) { |
| 616 return nullptr; | 617 return nullptr; |
| 617 } | 618 } |
| 618 | 619 |
| 619 const GrVkImageInfo* info = reinterpret_cast<const GrVkImageInfo*>(desc.fTex
tureHandle); | 620 const GrVkImageInfo* info = reinterpret_cast<const GrVkImageInfo*>(desc.fTex
tureHandle); |
| 620 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc) { | 621 if (VK_NULL_HANDLE == info->fImage || VK_NULL_HANDLE == info->fAlloc.fMemory
) { |
| 621 return nullptr; | 622 return nullptr; |
| 622 } | 623 } |
| 623 #ifdef SK_DEBUG | 624 #ifdef SK_DEBUG |
| 624 VkFormat format; | 625 VkFormat format; |
| 625 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) { | 626 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) { |
| 626 return nullptr; | 627 return nullptr; |
| 627 } | 628 } |
| 628 SkASSERT(format == info->fFormat); | 629 SkASSERT(format == info->fFormat); |
| 629 #endif | 630 #endif |
| 630 | 631 |
| (...skipping 22 matching lines...) Expand all Loading... |
| 653 | 654 |
| 654 return texture; | 655 return texture; |
| 655 } | 656 } |
| 656 | 657 |
| 657 GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe
sc& wrapDesc, | 658 GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe
sc& wrapDesc, |
| 658 GrWrapOwnership ownership) { | 659 GrWrapOwnership ownership) { |
| 659 | 660 |
| 660 const GrVkImageInfo* info = | 661 const GrVkImageInfo* info = |
| 661 reinterpret_cast<const GrVkImageInfo*>(wrapDesc.fRenderTargetHandle); | 662 reinterpret_cast<const GrVkImageInfo*>(wrapDesc.fRenderTargetHandle); |
| 662 if (VK_NULL_HANDLE == info->fImage || | 663 if (VK_NULL_HANDLE == info->fImage || |
| 663 (VK_NULL_HANDLE == info->fAlloc && kAdopt_GrWrapOwnership == ownership))
{ | 664 (VK_NULL_HANDLE == info->fAlloc.fMemory && kAdopt_GrWrapOwnership == own
ership)) { |
| 664 return nullptr; | 665 return nullptr; |
| 665 } | 666 } |
| 666 | 667 |
| 667 GrSurfaceDesc desc; | 668 GrSurfaceDesc desc; |
| 668 desc.fConfig = wrapDesc.fConfig; | 669 desc.fConfig = wrapDesc.fConfig; |
| 669 desc.fFlags = kCheckAllocation_GrSurfaceFlag; | 670 desc.fFlags = kCheckAllocation_GrSurfaceFlag; |
| 670 desc.fWidth = wrapDesc.fWidth; | 671 desc.fWidth = wrapDesc.fWidth; |
| 671 desc.fHeight = wrapDesc.fHeight; | 672 desc.fHeight = wrapDesc.fHeight; |
| 672 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount()
); | 673 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount()
); |
| 673 | 674 |
| (...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 866 } | 867 } |
| 867 | 868 |
| 868 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT; | 869 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT; |
| 869 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; | 870 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; |
| 870 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT; | 871 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT; |
| 871 | 872 |
| 872 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIB
LE_BIT : | 873 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIB
LE_BIT : |
| 873 VK_MEMORY_PROPERTY_DEVICE_LOC
AL_BIT; | 874 VK_MEMORY_PROPERTY_DEVICE_LOC
AL_BIT; |
| 874 | 875 |
| 875 VkImage image = VK_NULL_HANDLE; | 876 VkImage image = VK_NULL_HANDLE; |
| 876 VkDeviceMemory alloc = VK_NULL_HANDLE; | 877 GrVkAlloc alloc = { VK_NULL_HANDLE, 0 }; |
| 877 | 878 |
| 878 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE
_TILING_OPTIMAL; | 879 VkImageTiling imageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE
_TILING_OPTIMAL; |
| 879 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling) | 880 VkImageLayout initialLayout = (VK_IMAGE_TILING_LINEAR == imageTiling) |
| 880 ? VK_IMAGE_LAYOUT_PREINITIALIZED | 881 ? VK_IMAGE_LAYOUT_PREINITIALIZED |
| 881 : VK_IMAGE_LAYOUT_UNDEFINED; | 882 : VK_IMAGE_LAYOUT_UNDEFINED; |
| 882 | 883 |
| 883 // Create Image | 884 // Create Image |
| 884 VkSampleCountFlagBits vkSamples; | 885 VkSampleCountFlagBits vkSamples; |
| 885 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) { | 886 if (!GrSampleCountToVkSampleCount(1, &vkSamples)) { |
| 886 return 0; | 887 return 0; |
| (...skipping 30 matching lines...) Expand all Loading... |
| 917 VK_IMAGE_ASPECT_COLOR_BIT, | 918 VK_IMAGE_ASPECT_COLOR_BIT, |
| 918 0, // mipLevel | 919 0, // mipLevel |
| 919 0, // arraySlice | 920 0, // arraySlice |
| 920 }; | 921 }; |
| 921 VkSubresourceLayout layout; | 922 VkSubresourceLayout layout; |
| 922 VkResult err; | 923 VkResult err; |
| 923 | 924 |
| 924 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout))
; | 925 VK_CALL(GetImageSubresourceLayout(fDevice, image, &subres, &layout))
; |
| 925 | 926 |
| 926 void* mapPtr; | 927 void* mapPtr; |
| 927 err = VK_CALL(MapMemory(fDevice, alloc, 0, layout.rowPitch * h, 0, &
mapPtr)); | 928 err = VK_CALL(MapMemory(fDevice, alloc.fMemory, alloc.fOffset, layou
t.rowPitch * h, |
| 929 0, &mapPtr)); |
| 928 if (err) { | 930 if (err) { |
| 929 VK_CALL(FreeMemory(this->device(), alloc, nullptr)); | 931 GrVkMemory::FreeImageMemory(this, alloc); |
| 930 VK_CALL(DestroyImage(this->device(), image, nullptr)); | 932 VK_CALL(DestroyImage(this->device(), image, nullptr)); |
| 931 return 0; | 933 return 0; |
| 932 } | 934 } |
| 933 | 935 |
| 934 size_t bpp = GrBytesPerPixel(config); | 936 size_t bpp = GrBytesPerPixel(config); |
| 935 size_t rowCopyBytes = bpp * w; | 937 size_t rowCopyBytes = bpp * w; |
| 936 // If there is no padding on dst (layout.rowPitch) we can do a singl
e memcopy. | 938 // If there is no padding on dst (layout.rowPitch) we can do a singl
e memcopy. |
| 937 // This assumes the srcData comes in with no padding. | 939 // This assumes the srcData comes in with no padding. |
| 938 if (rowCopyBytes == layout.rowPitch) { | 940 if (rowCopyBytes == layout.rowPitch) { |
| 939 memcpy(mapPtr, srcData, rowCopyBytes * h); | 941 memcpy(mapPtr, srcData, rowCopyBytes * h); |
| 940 } else { | 942 } else { |
| 941 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcDa
ta, rowCopyBytes, | 943 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), srcDa
ta, rowCopyBytes, |
| 942 rowCopyBytes, h); | 944 rowCopyBytes, h); |
| 943 } | 945 } |
| 944 VK_CALL(UnmapMemory(fDevice, alloc)); | 946 VK_CALL(UnmapMemory(fDevice, alloc.fMemory)); |
| 945 } else { | 947 } else { |
| 946 // TODO: Add support for copying to optimal tiling | 948 // TODO: Add support for copying to optimal tiling |
| 947 SkASSERT(false); | 949 SkASSERT(false); |
| 948 } | 950 } |
| 949 } | 951 } |
| 950 | 952 |
| 951 GrVkImageInfo* info = new GrVkImageInfo; | 953 GrVkImageInfo* info = new GrVkImageInfo; |
| 952 info->fImage = image; | 954 info->fImage = image; |
| 953 info->fAlloc = alloc; | 955 info->fAlloc = alloc; |
| 954 info->fImageTiling = imageTiling; | 956 info->fImageTiling = imageTiling; |
| 955 info->fImageLayout = initialLayout; | 957 info->fImageLayout = initialLayout; |
| 956 info->fFormat = pixelFormat; | 958 info->fFormat = pixelFormat; |
| 957 info->fLevelCount = 1; | 959 info->fLevelCount = 1; |
| 958 | 960 |
| 959 return (GrBackendObject)info; | 961 return (GrBackendObject)info; |
| 960 } | 962 } |
| 961 | 963 |
| 962 bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const { | 964 bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const { |
| 963 const GrVkImageInfo* backend = reinterpret_cast<const GrVkImageInfo*>(id); | 965 const GrVkImageInfo* backend = reinterpret_cast<const GrVkImageInfo*>(id); |
| 964 | 966 |
| 965 if (backend && backend->fImage && backend->fAlloc) { | 967 if (backend && backend->fImage && backend->fAlloc.fMemory) { |
| 966 VkMemoryRequirements req; | 968 VkMemoryRequirements req; |
| 967 memset(&req, 0, sizeof(req)); | 969 memset(&req, 0, sizeof(req)); |
| 968 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice, | 970 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice, |
| 969 backend->fIma
ge, | 971 backend->fIma
ge, |
| 970 &req)); | 972 &req)); |
| 971 // TODO: find a better check | 973 // TODO: find a better check |
| 972 // This will probably fail with a different driver | 974 // This will probably fail with a different driver |
| 973 return (req.size > 0) && (req.size <= 8192 * 8192); | 975 return (req.size > 0) && (req.size <= 8192 * 8192); |
| 974 } | 976 } |
| 975 | 977 |
| 976 return false; | 978 return false; |
| 977 } | 979 } |
| 978 | 980 |
| 979 void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon)
{ | 981 void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon)
{ |
| 980 const GrVkImageInfo* backend = reinterpret_cast<const GrVkImageInfo*>(id); | 982 const GrVkImageInfo* backend = reinterpret_cast<const GrVkImageInfo*>(id); |
| 981 | 983 |
| 982 if (backend) { | 984 if (backend) { |
| 983 if (!abandon) { | 985 if (!abandon) { |
| 984 // something in the command buffer may still be using this, so force
submit | 986 // something in the command buffer may still be using this, so force
submit |
| 985 this->submitCommandBuffer(kForce_SyncQueue); | 987 this->submitCommandBuffer(kForce_SyncQueue); |
| 986 | 988 |
| 987 VK_CALL(FreeMemory(this->device(), backend->fAlloc, nullptr)); | 989 GrVkMemory::FreeImageMemory(this, backend->fAlloc); |
| 988 VK_CALL(DestroyImage(this->device(), backend->fImage, nullptr)); | 990 VK_CALL(DestroyImage(this->device(), backend->fImage, nullptr)); |
| 989 } | 991 } |
| 990 delete backend; | 992 delete backend; |
| 991 } | 993 } |
| 992 } | 994 } |
| 993 | 995 |
| 994 //////////////////////////////////////////////////////////////////////////////// | 996 //////////////////////////////////////////////////////////////////////////////// |
| 995 | 997 |
| 996 void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask, | 998 void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask, |
| 997 VkPipelineStageFlags dstStageMask, | 999 VkPipelineStageFlags dstStageMask, |
| (...skipping 687 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1685 aglSwapBuffers(aglGetCurrentContext()); | 1687 aglSwapBuffers(aglGetCurrentContext()); |
| 1686 int set_a_break_pt_here = 9; | 1688 int set_a_break_pt_here = 9; |
| 1687 aglSwapBuffers(aglGetCurrentContext()); | 1689 aglSwapBuffers(aglGetCurrentContext()); |
| 1688 #elif defined(SK_BUILD_FOR_WIN32) | 1690 #elif defined(SK_BUILD_FOR_WIN32) |
| 1689 SwapBuf(); | 1691 SwapBuf(); |
| 1690 int set_a_break_pt_here = 9; | 1692 int set_a_break_pt_here = 9; |
| 1691 SwapBuf(); | 1693 SwapBuf(); |
| 1692 #endif | 1694 #endif |
| 1693 #endif | 1695 #endif |
| 1694 } | 1696 } |
| OLD | NEW |