| Index: src/gpu/vk/GrVkGpu.cpp
|
| diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp
|
| index c14321937fe9004ab044b607d95aa0fc4ecc8e6b..e4314deeead02d891027948ba478e89ac4432705 100644
|
| --- a/src/gpu/vk/GrVkGpu.cpp
|
| +++ b/src/gpu/vk/GrVkGpu.cpp
|
| @@ -249,11 +249,13 @@ GrBuffer* GrVkGpu::onCreateBuffer(size_t size, GrBufferType type, GrAccessPatter
|
| buff = GrVkIndexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
|
| break;
|
| case kXferCpuToGpu_GrBufferType:
|
| - SkASSERT(kStream_GrAccessPattern == accessPattern);
|
| + SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
|
| + kStream_GrAccessPattern == accessPattern);
|
| buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyRead_Type);
|
| break;
|
| case kXferGpuToCpu_GrBufferType:
|
| - SkASSERT(kStream_GrAccessPattern == accessPattern);
|
| + SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
|
| + kStream_GrAccessPattern == accessPattern);
|
| buff = GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyWrite_Type);
|
| break;
|
| default:
|
| @@ -375,6 +377,86 @@ bool GrVkGpu::onWritePixels(GrSurface* surface,
|
| return success;
|
| }
|
|
|
| +bool GrVkGpu::onTransferPixels(GrTexture* texture,
|
| + int left, int top, int width, int height,
|
| + GrPixelConfig config, GrBuffer* transferBuffer,
|
| + size_t bufferOffset, size_t rowBytes) {
|
| + GrVkTexture* vkTex = static_cast<GrVkTexture*>(texture);
|
| + if (!vkTex) {
|
| + return false;
|
| + }
|
| + GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer);
|
| + if (!vkBuffer) {
|
| + return false;
|
| + }
|
| +
|
| + // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
|
| + if (GrPixelConfigIsSRGB(texture->config()) != GrPixelConfigIsSRGB(config)) {
|
| + return false;
|
| + }
|
| +
|
| + // TODO: Handle y axis flip via copy to temp image, then blit to final
|
| + if (kBottomLeft_GrSurfaceOrigin == vkTex->origin()) {
|
| + return false;
|
| + }
|
| +
|
| + bool success = false;
|
| + if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
|
| + // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
|
| + SkASSERT(config == vkTex->desc().fConfig);
|
| + // TODO: add compressed texture support
|
| + // delete the following two lines and uncomment the two after that when ready
|
| + vkTex->unref();
|
| + return false;
|
| + //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
|
| + // height);
|
| + } else {
|
| + // make sure the unmap has finished
|
| + vkBuffer->addMemoryBarrier(this,
|
| + VK_ACCESS_HOST_WRITE_BIT,
|
| + VK_ACCESS_TRANSFER_READ_BIT,
|
| + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
|
| + VK_PIPELINE_STAGE_TRANSFER_BIT,
|
| + false);
|
| +
|
| + // Set up copy region
|
| + size_t bpp = GrBytesPerPixel(config);
|
| +
|
| + VkBufferImageCopy region;
|
| + memset(®ion, 0, sizeof(VkBufferImageCopy));
|
| + region.bufferOffset = bufferOffset;
|
| + region.bufferRowLength = (uint32_t)(rowBytes/bpp);
|
| + region.bufferImageHeight = 0;
|
| + region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
|
| + region.imageOffset = { left, top, 0 };
|
| + region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
|
| +
|
| + // Change layout of our target so it can be copied to
|
| + vkTex->setImageLayout(this,
|
| + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
| + VK_ACCESS_TRANSFER_WRITE_BIT,
|
| + VK_PIPELINE_STAGE_TRANSFER_BIT,
|
| + false);
|
| +
|
| + // Copy the buffer to the image
|
| + fCurrentCmdBuffer->copyBufferToImage(this,
|
| + vkBuffer,
|
| + vkTex,
|
| + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
| + 1,
|
| + ®ion);
|
| +
|
| + success = true;
|
| + }
|
| +
|
| + if (success) {
|
| + vkTex->texturePriv().dirtyMipMaps(true);
|
| + return true;
|
| + }
|
| +
|
| + return false;
|
| +}
|
| +
|
| void GrVkGpu::resolveImage(GrVkRenderTarget* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
|
| const SkIPoint& dstPoint) {
|
| SkASSERT(dst);
|
| @@ -1888,3 +1970,32 @@ void GrVkGpu::submitSecondaryCommandBuffer(GrVkSecondaryCommandBuffer* buffer,
|
| this->didWriteToSurface(target, &bounds);
|
| }
|
|
|
| +GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() const {
|
| + VkFenceCreateInfo createInfo;
|
| + memset(&createInfo, 0, sizeof(VkFenceCreateInfo));
|
| + createInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
|
| + createInfo.pNext = nullptr;
|
| + createInfo.flags = 0;
|
| + VkFence fence = VK_NULL_HANDLE;
|
| + VkResult result = GR_VK_CALL(this->vkInterface(), CreateFence(this->device(), &createInfo,
|
| + nullptr, &fence));
|
| + // TODO: verify that all QueueSubmits before this will finish before this fence signals
|
| + if (VK_SUCCESS == result) {
|
| + GR_VK_CALL(this->vkInterface(), QueueSubmit(this->queue(), 0, nullptr, fence));
|
| + }
|
| + return (GrFence)fence;
|
| +}
|
| +
|
| +bool GrVkGpu::waitFence(GrFence fence) const {
|
| + const uint64_t kTimeout = 1000;
|
| + VkResult result = GR_VK_CALL(this->vkInterface(), WaitForFences(this->device(), 1,
|
| + (VkFence*)&fence,
|
| + VK_TRUE,
|
| + kTimeout));
|
| + return (VK_SUCCESS == result);
|
| +}
|
| +
|
| +void GrVkGpu::deleteFence(GrFence fence) const {
|
| + GR_VK_CALL(this->vkInterface(), DestroyFence(this->device(), (VkFence)fence, nullptr));
|
| +}
|
| +
|
|
|