OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2015 Google Inc. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. |
| 6 */ |
| 7 |
| 8 #include "GrVkGpu.h" |
| 9 |
| 10 #include "GrContextOptions.h" |
| 11 #include "GrGeometryProcessor.h" |
| 12 #include "GrGpuResourceCacheAccess.h" |
| 13 #include "GrPipeline.h" |
| 14 #include "GrRenderTargetPriv.h" |
| 15 #include "GrSurfacePriv.h" |
| 16 #include "GrTexturePriv.h" |
| 17 #include "GrVertices.h" |
| 18 |
| 19 #include "GrVkCommandBuffer.h" |
| 20 #include "GrVkImage.h" |
| 21 #include "GrVkIndexBuffer.h" |
| 22 #include "GrVkMemory.h" |
| 23 #include "GrVkPipeline.h" |
| 24 #include "GrVkProgram.h" |
| 25 #include "GrVkProgramBuilder.h" |
| 26 #include "GrVkProgramDesc.h" |
| 27 #include "GrVkRenderPass.h" |
| 28 #include "GrVkResourceProvider.h" |
| 29 #include "GrVkTexture.h" |
| 30 #include "GrVkTextureRenderTarget.h" |
| 31 #include "GrVkTransferBuffer.h" |
| 32 #include "GrVkVertexBuffer.h" |
| 33 |
| 34 #include "SkConfig8888.h" |
| 35 |
| 36 #include "vk/GrVkInterface.h" |
| 37 |
| 38 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X) |
| 39 #define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X) |
| 40 #define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X) |
| 41 |
| 42 //////////////////////////////////////////////////////////////////////////////// |
| 43 // Stuff used to set up a GrVkGpu secrectly for now. |
| 44 |
| 45 // For now the VkGpuCreate is using the same signature as GL. This is mostly for
ease of |
| 46 // hiding this code from offical skia. In the end the VkGpuCreate will not take
a GrBackendContext |
| 47 // and mostly likely would take an optional device and queues to use. |
| 48 GrGpu* vk_gpu_create(GrBackendContext backendContext, const GrContextOptions& op
tions, |
| 49 GrContext* context) { |
| 50 // Below is Vulkan setup code that normal would be done by a client, but wil
l do here for now |
| 51 // for testing purposes. |
| 52 VkPhysicalDevice physDev; |
| 53 VkDevice device; |
| 54 VkInstance inst; |
| 55 VkResult err; |
| 56 |
| 57 const VkApplicationInfo app_info = { |
| 58 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType |
| 59 nullptr, // pNext |
| 60 "vktest", // pApplicationName |
| 61 0, // applicationVersion |
| 62 "vktest", // pEngineName |
| 63 0, // engineVerison |
| 64 VK_API_VERSION, // apiVersion |
| 65 }; |
| 66 const VkInstanceCreateInfo instance_create = { |
| 67 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType |
| 68 nullptr, // pNext |
| 69 0, // flags |
| 70 &app_info, // pApplicationInfo |
| 71 0, // enabledLayerNameCount |
| 72 nullptr, // ppEnabledLayerNames |
| 73 0, // enabledExtensionNameCount |
| 74 nullptr, // ppEnabledExtensionNames |
| 75 }; |
| 76 err = vkCreateInstance(&instance_create, nullptr, &inst); |
| 77 if (err < 0) { |
| 78 SkDebugf("vkCreateInstanced failed: %d\n", err); |
| 79 SkFAIL("failing"); |
| 80 } |
| 81 |
| 82 uint32_t gpuCount; |
| 83 err = vkEnumeratePhysicalDevices(inst, &gpuCount, nullptr); |
| 84 if (err) { |
| 85 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); |
| 86 SkFAIL("failing"); |
| 87 } |
| 88 SkASSERT(gpuCount > 0); |
| 89 // Just returning the first physical device instead of getting the whole arr
ay. |
| 90 gpuCount = 1; |
| 91 err = vkEnumeratePhysicalDevices(inst, &gpuCount, &physDev); |
| 92 if (err) { |
| 93 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); |
| 94 SkFAIL("failing"); |
| 95 } |
| 96 |
| 97 // query to get the initial queue props size |
| 98 uint32_t queueCount; |
| 99 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr); |
| 100 SkASSERT(queueCount >= 1); |
| 101 |
| 102 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties)); |
| 103 // now get the actual queue props |
| 104 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAl
loc.get(); |
| 105 |
| 106 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps); |
| 107 |
| 108 // iterate to find the graphics queue |
| 109 uint32_t graphicsQueueIndex = -1; |
| 110 for (uint32_t i = 0; i < queueCount; i++) { |
| 111 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { |
| 112 graphicsQueueIndex = i; |
| 113 break; |
| 114 } |
| 115 } |
| 116 SkASSERT(graphicsQueueIndex < queueCount); |
| 117 |
| 118 float queuePriorities[1] = { 0.0 }; |
| 119 const VkDeviceQueueCreateInfo queueInfo = { |
| 120 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType |
| 121 nullptr, // pNext |
| 122 0, // VkDeviceQueueCreateFlags |
| 123 0, // queueFamilyIndex |
| 124 1, // queueCount |
| 125 queuePriorities, // pQueuePriorities |
| 126 }; |
| 127 const VkDeviceCreateInfo deviceInfo = { |
| 128 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType |
| 129 nullptr, // pNext |
| 130 0, // VkDeviceCreateFlags |
| 131 1, // queueCreateInfoCount |
| 132 &queueInfo, // pQueueCreateInfos |
| 133 0, // layerCount |
| 134 nullptr, // ppEnabledLayerNames |
| 135 0, // extensionCount |
| 136 nullptr, // ppEnabledExtensionNames |
| 137 nullptr // ppEnabledFeatures |
| 138 }; |
| 139 |
| 140 err = vkCreateDevice(physDev, &deviceInfo, nullptr, &device); |
| 141 if (err) { |
| 142 SkDebugf("CreateDevice failed: %d\n", err); |
| 143 SkFAIL("failing"); |
| 144 } |
| 145 |
| 146 VkQueue queue; |
| 147 vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue); |
| 148 |
| 149 const VkCommandPoolCreateInfo cmdPoolInfo = { |
| 150 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType |
| 151 nullptr, // pNext |
| 152 0, // CmdPoolCreateFlags |
| 153 graphicsQueueIndex, // queueFamilyIndex |
| 154 }; |
| 155 |
| 156 VkCommandPool cmdPool; |
| 157 err = vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &cmdPool); |
| 158 if (err) { |
| 159 SkDebugf("CreateCommandPool failed: %d\n", err); |
| 160 SkFAIL("failing"); |
| 161 } |
| 162 |
| 163 return new GrVkGpu(context, options, physDev, device, queue, cmdPool, inst); |
| 164 } |
| 165 |
| 166 //////////////////////////////////////////////////////////////////////////////// |
| 167 |
| 168 GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options, |
| 169 VkPhysicalDevice physDev, VkDevice device, VkQueue queue, VkCom
mandPool cmdPool, |
| 170 VkInstance inst) |
| 171 : INHERITED(context) |
| 172 , fDevice(device) |
| 173 , fQueue(queue) |
| 174 , fCmdPool(cmdPool) |
| 175 , fResourceProvider(this) |
| 176 , fVkInstance(inst) { |
| 177 fInterface.reset(GrVkCreateInterface(fVkInstance)); |
| 178 fCompiler = shaderc_compiler_initialize(); |
| 179 |
| 180 fVkCaps.reset(new GrVkCaps(options, fInterface, physDev)); |
| 181 fCaps.reset(SkRef(fVkCaps.get())); |
| 182 |
| 183 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer(); |
| 184 SkASSERT(fCurrentCmdBuffer); |
| 185 fCurrentCmdBuffer->begin(this); |
| 186 VK_CALL(GetPhysicalDeviceMemoryProperties(physDev, &fPhysDevMemProps)); |
| 187 |
| 188 } |
| 189 |
| 190 GrVkGpu::~GrVkGpu() { |
| 191 shaderc_compiler_release(fCompiler); |
| 192 fCurrentCmdBuffer->end(this); |
| 193 fCurrentCmdBuffer->unref(this); |
| 194 |
| 195 // wait for all commands to finish |
| 196 VK_CALL(QueueWaitIdle(fQueue)); |
| 197 |
| 198 // must call this just before we destroy the VkDevice |
| 199 fResourceProvider.destroyResources(); |
| 200 |
| 201 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr)); |
| 202 VK_CALL(DestroyDevice(fDevice, nullptr)); |
| 203 VK_CALL(DestroyInstance(fVkInstance, nullptr)); |
| 204 } |
| 205 |
| 206 /////////////////////////////////////////////////////////////////////////////// |
| 207 |
| 208 void GrVkGpu::submitCommandBuffer(SyncQueue sync) { |
| 209 SkASSERT(fCurrentCmdBuffer); |
| 210 fCurrentCmdBuffer->end(this); |
| 211 |
| 212 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync); |
| 213 fResourceProvider.checkCommandBuffers(); |
| 214 |
| 215 // Release old command buffer and create a new one |
| 216 fCurrentCmdBuffer->unref(this); |
| 217 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer(); |
| 218 SkASSERT(fCurrentCmdBuffer); |
| 219 |
| 220 fCurrentCmdBuffer->begin(this); |
| 221 } |
| 222 |
| 223 /////////////////////////////////////////////////////////////////////////////// |
| 224 GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) { |
| 225 return GrVkVertexBuffer::Create(this, size, dynamic); |
| 226 } |
| 227 |
| 228 GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) { |
| 229 return GrVkIndexBuffer::Create(this, size, dynamic); |
| 230 } |
| 231 |
| 232 GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type
) { |
| 233 GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead
_Type |
| 234 : GrVkBuffer::kCopyWrit
e_Type; |
| 235 return GrVkTransferBuffer::Create(this, size, bufferType); |
| 236 } |
| 237 |
| 238 //////////////////////////////////////////////////////////////////////////////// |
| 239 bool GrVkGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height, |
| 240 GrPixelConfig srcConfig, DrawPreference* draw
Preference, |
| 241 WritePixelTempDrawInfo* tempDrawInfo) { |
| 242 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurf
ace->config())) { |
| 243 return false; |
| 244 } |
| 245 |
| 246 // Currently we don't handle draws, so if the caller wants/needs to do a dra
w we need to fail |
| 247 if (kNoDraw_DrawPreference != *drawPreference) { |
| 248 return false; |
| 249 } |
| 250 |
| 251 if (dstSurface->config() != srcConfig) { |
| 252 // TODO: This should fall back to drawing or copying to change config of
dstSurface to |
| 253 // match that of srcConfig. |
| 254 return false; |
| 255 } |
| 256 |
| 257 return true; |
| 258 } |
| 259 |
| 260 bool GrVkGpu::onWritePixels(GrSurface* surface, |
| 261 int left, int top, int width, int height, |
| 262 GrPixelConfig config, const void* buffer, |
| 263 size_t rowBytes) { |
| 264 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture()); |
| 265 if (!vkTex) { |
| 266 return false; |
| 267 } |
| 268 |
| 269 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and
writing pixels. |
| 270 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) { |
| 271 return false; |
| 272 } |
| 273 |
| 274 bool success = false; |
| 275 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) { |
| 276 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo() |
| 277 SkASSERT(config == vkTex->desc().fConfig); |
| 278 // TODO: add compressed texture support |
| 279 // delete the following two lines and uncomment the two after that when
ready |
| 280 vkTex->unref(); |
| 281 return false; |
| 282 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false,
left, top, width, |
| 283 // height); |
| 284 } else { |
| 285 bool linearTiling = vkTex->isLinearTiled(); |
| 286 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayo
ut()) { |
| 287 // Need to change the layout to general in order to perform a host w
rite |
| 288 VkImageLayout layout = vkTex->currentLayout(); |
| 289 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStag
eFlags(layout); |
| 290 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT; |
| 291 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layo
ut); |
| 292 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT; |
| 293 vkTex->setImageLayout(this, |
| 294 VK_IMAGE_LAYOUT_GENERAL, |
| 295 srcAccessMask, |
| 296 dstAccessMask, |
| 297 srcStageMask, |
| 298 dstStageMask, |
| 299 false); |
| 300 } |
| 301 success = this->uploadTexData(vkTex, left, top, width, height, config, |
| 302 buffer, rowBytes); |
| 303 } |
| 304 |
| 305 if (success) { |
| 306 vkTex->texturePriv().dirtyMipMaps(true); |
| 307 return true; |
| 308 } |
| 309 |
| 310 return false; |
| 311 } |
| 312 |
| 313 bool GrVkGpu::uploadTexData(GrVkTexture* tex, |
| 314 int left, int top, int width, int height, |
| 315 GrPixelConfig dataConfig, |
| 316 const void* data, |
| 317 size_t rowBytes) { |
| 318 SkASSERT(data); |
| 319 |
| 320 // If we're uploading compressed data then we should be using uploadCompress
edTexData |
| 321 SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); |
| 322 |
| 323 bool linearTiling = tex->isLinearTiled(); |
| 324 |
| 325 size_t bpp = GrBytesPerPixel(dataConfig); |
| 326 |
| 327 const GrSurfaceDesc& desc = tex->desc(); |
| 328 |
| 329 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &
left, &top, |
| 330 &width, &height, &data, &rowBytes
)) { |
| 331 return false; |
| 332 } |
| 333 size_t trimRowBytes = width * bpp; |
| 334 |
| 335 if (linearTiling) { |
| 336 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() || |
| 337 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout()); |
| 338 const VkImageSubresource subres = { |
| 339 VK_IMAGE_ASPECT_COLOR_BIT, |
| 340 0, // mipLevel |
| 341 0, // arraySlice |
| 342 }; |
| 343 VkSubresourceLayout layout; |
| 344 VkResult err; |
| 345 |
| 346 const GrVkInterface* interface = this->vkInterface(); |
| 347 |
| 348 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice, |
| 349 tex->textureImage(), |
| 350 &subres, |
| 351 &layout)); |
| 352 |
| 353 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height()
- top - height |
| 354 : top; |
| 355 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp; |
| 356 VkDeviceSize size = height*layout.rowPitch; |
| 357 void* mapPtr; |
| 358 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), off
set, size, 0, |
| 359 &mapPtr)); |
| 360 if (err) { |
| 361 return false; |
| 362 } |
| 363 |
| 364 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { |
| 365 // copy into buffer by rows |
| 366 const char* srcRow = reinterpret_cast<const char*>(data); |
| 367 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.r
owPitch; |
| 368 for (int y = 0; y < height; y++) { |
| 369 memcpy(dstRow, srcRow, trimRowBytes); |
| 370 srcRow += rowBytes; |
| 371 dstRow -= layout.rowPitch; |
| 372 } |
| 373 } else { |
| 374 // If there is no padding on the src (rowBytes) or dst (layout.rowPi
tch) we can memcpy |
| 375 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) { |
| 376 memcpy(mapPtr, data, trimRowBytes * height); |
| 377 } else { |
| 378 SkRectMemcpy(mapPtr, layout.rowPitch, data, rowBytes, trimRowByt
es, height); |
| 379 } |
| 380 } |
| 381 |
| 382 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory())); |
| 383 } else { |
| 384 GrVkTransferBuffer* transferBuffer = |
| 385 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer::
kCopyRead_Type); |
| 386 |
| 387 void* mapPtr = transferBuffer->map(); |
| 388 |
| 389 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { |
| 390 // copy into buffer by rows |
| 391 const char* srcRow = reinterpret_cast<const char*>(data); |
| 392 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowB
ytes; |
| 393 for (int y = 0; y < height; y++) { |
| 394 memcpy(dstRow, srcRow, trimRowBytes); |
| 395 srcRow += rowBytes; |
| 396 dstRow -= trimRowBytes; |
| 397 } |
| 398 } else { |
| 399 // If there is no padding on the src data rows, we can do a single m
emcpy |
| 400 if (trimRowBytes == rowBytes) { |
| 401 memcpy(mapPtr, data, trimRowBytes * height); |
| 402 } else { |
| 403 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes,
height); |
| 404 } |
| 405 } |
| 406 |
| 407 transferBuffer->unmap(); |
| 408 |
| 409 // make sure the unmap has finished |
| 410 transferBuffer->addMemoryBarrier(this, |
| 411 VK_ACCESS_HOST_WRITE_BIT, |
| 412 VK_ACCESS_TRANSFER_READ_BIT, |
| 413 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, |
| 414 VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 415 false); |
| 416 |
| 417 // Set up copy region |
| 418 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin(); |
| 419 VkOffset3D offset = { |
| 420 left, |
| 421 flipY ? tex->height() - top - height : top, |
| 422 0 |
| 423 }; |
| 424 |
| 425 VkBufferImageCopy region; |
| 426 memset(®ion, 0, sizeof(VkBufferImageCopy)); |
| 427 region.bufferOffset = 0; |
| 428 region.bufferRowLength = width; |
| 429 region.bufferImageHeight = height; |
| 430 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; |
| 431 region.imageOffset = offset; |
| 432 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 }; |
| 433 |
| 434 // Change layout of our target so it can be copied to |
| 435 VkImageLayout layout = tex->currentLayout(); |
| 436 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFla
gs(layout); |
| 437 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; |
| 438 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout); |
| 439 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; |
| 440 tex->setImageLayout(this, |
| 441 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 442 srcAccessMask, |
| 443 dstAccessMask, |
| 444 srcStageMask, |
| 445 dstStageMask, |
| 446 false); |
| 447 |
| 448 // Copy the buffer to the image |
| 449 fCurrentCmdBuffer->copyBufferToImage(this, |
| 450 transferBuffer, |
| 451 tex, |
| 452 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMA
L, |
| 453 1, |
| 454 ®ion); |
| 455 |
| 456 // Submit the current command buffer to the Queue |
| 457 this->submitCommandBuffer(kSkip_SyncQueue); |
| 458 |
| 459 transferBuffer->unref(); |
| 460 } |
| 461 |
| 462 return true; |
| 463 } |
| 464 |
| 465 //////////////////////////////////////////////////////////////////////////////// |
| 466 GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, GrGpuResource::Li
feCycle lifeCycle, |
| 467 const void* srcData, size_t rowBytes) { |
| 468 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); |
| 469 |
| 470 VkFormat pixelFormat; |
| 471 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) { |
| 472 return nullptr; |
| 473 } |
| 474 |
| 475 if (!fVkCaps->isConfigTexturable(desc.fConfig)) { |
| 476 return nullptr; |
| 477 } |
| 478 |
| 479 bool linearTiling = false; |
| 480 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) { |
| 481 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) && |
| 482 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig,
false))) { |
| 483 linearTiling = true; |
| 484 } else { |
| 485 return nullptr; |
| 486 } |
| 487 } |
| 488 |
| 489 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT; |
| 490 if (renderTarget) { |
| 491 usageFlags |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; |
| 492 } |
| 493 |
| 494 // For now we will set the VK_IMAGE_USAGE_TRANSFER_DESTINATION_BIT and |
| 495 // VK_IMAGE_USAGE_TRANSFER_SOURCE_BIT on every texture since we do not know
whether or not we |
| 496 // will be using this texture in some copy or not. Also this assumes, as is
the current case, |
| 497 // that all render targets in vulkan are also texutres. If we change this pr
actice of setting |
| 498 // both bits, we must make sure to set the destination bit if we are uploadi
ng srcData to the |
| 499 // texture. |
| 500 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_
BIT; |
| 501 |
| 502 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIB
LE_BIT : |
| 503 VK_MEMORY_PROPERTY_DEVICE_LOC
AL_BIT; |
| 504 |
| 505 // This ImageDesc refers to the texture that will be read by the client. Thu
s even if msaa is |
| 506 // requested, this ImageDesc describes the resolved texutre. Therefore we al
ways have samples set |
| 507 // to 1. |
| 508 GrVkImage::ImageDesc imageDesc; |
| 509 imageDesc.fImageType = VK_IMAGE_TYPE_2D; |
| 510 imageDesc.fFormat = pixelFormat; |
| 511 imageDesc.fWidth = desc.fWidth; |
| 512 imageDesc.fHeight = desc.fHeight; |
| 513 imageDesc.fLevels = 1; |
| 514 imageDesc.fSamples = 1; |
| 515 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TI
LING_OPTIMAL; |
| 516 imageDesc.fUsageFlags = usageFlags; |
| 517 imageDesc.fMemProps = memProps; |
| 518 |
| 519 GrVkTexture* tex; |
| 520 if (renderTarget) { |
| 521 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, desc,
lifeCycle, |
| 522 imageDesc); |
| 523 } else { |
| 524 tex = GrVkTexture::CreateNewTexture(this, desc, lifeCycle, imageDesc); |
| 525 } |
| 526 |
| 527 if (!tex) { |
| 528 return nullptr; |
| 529 } |
| 530 |
| 531 if (srcData) { |
| 532 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fCon
fig, srcData, |
| 533 rowBytes)) { |
| 534 tex->unref(); |
| 535 return nullptr; |
| 536 } |
| 537 } |
| 538 |
| 539 return tex; |
| 540 } |
| 541 |
| 542 //////////////////////////////////////////////////////////////////////////////// |
| 543 |
| 544 static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin) { |
| 545 // By default, all textures in Vk use TopLeft |
| 546 if (kDefault_GrSurfaceOrigin == origin) { |
| 547 return kTopLeft_GrSurfaceOrigin; |
| 548 } else { |
| 549 return origin; |
| 550 } |
| 551 } |
| 552 |
| 553 GrTexture* GrVkGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc, |
| 554 GrWrapOwnership ownership) { |
| 555 VkFormat format; |
| 556 if (!GrPixelConfigToVkFormat(desc.fConfig, &format)) { |
| 557 return nullptr; |
| 558 } |
| 559 |
| 560 if (0 == desc.fTextureHandle) { |
| 561 return nullptr; |
| 562 } |
| 563 |
| 564 int maxSize = this->caps()->maxTextureSize(); |
| 565 if (desc.fWidth > maxSize || desc.fHeight > maxSize) { |
| 566 return nullptr; |
| 567 } |
| 568 |
| 569 // TODO: determine what format Chrome will actually send us and turn it into
a Resource |
| 570 GrVkImage::Resource* imageRsrc = reinterpret_cast<GrVkImage::Resource*>(desc
.fTextureHandle); |
| 571 |
| 572 GrGpuResource::LifeCycle lifeCycle; |
| 573 switch (ownership) { |
| 574 case kAdopt_GrWrapOwnership: |
| 575 lifeCycle = GrGpuResource::kAdopted_LifeCycle; |
| 576 break; |
| 577 case kBorrow_GrWrapOwnership: |
| 578 lifeCycle = GrGpuResource::kBorrowed_LifeCycle; |
| 579 break; |
| 580 } |
| 581 |
| 582 GrSurfaceDesc surfDesc; |
| 583 // next line relies on GrBackendTextureDesc's flags matching GrTexture's |
| 584 surfDesc.fFlags = (GrSurfaceFlags)desc.fFlags; |
| 585 surfDesc.fWidth = desc.fWidth; |
| 586 surfDesc.fHeight = desc.fHeight; |
| 587 surfDesc.fConfig = desc.fConfig; |
| 588 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount()
); |
| 589 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFla
g); |
| 590 // In GL, Chrome assumes all textures are BottomLeft |
| 591 // In VK, we don't have this restriction |
| 592 surfDesc.fOrigin = resolve_origin(desc.fOrigin); |
| 593 |
| 594 GrVkTexture* texture = nullptr; |
| 595 if (renderTarget) { |
| 596 texture = GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(this
, surfDesc, |
| 597 life
Cycle, format, |
| 598 imag
eRsrc); |
| 599 } else { |
| 600 texture = GrVkTexture::CreateWrappedTexture(this, surfDesc, lifeCycle, f
ormat, imageRsrc); |
| 601 } |
| 602 if (!texture) { |
| 603 return nullptr; |
| 604 } |
| 605 |
| 606 return texture; |
| 607 } |
| 608 |
| 609 GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe
sc& wrapDesc, |
| 610 GrWrapOwnership ownership) { |
| 611 |
| 612 // TODO: determine what format Chrome will actually send us and turn it into
a Resource |
| 613 GrVkImage::Resource* imageRsrc = |
| 614 reinterpret_cast<GrVkImage::Resource*>(wrapDesc.fRenderTargetHandle); |
| 615 |
| 616 GrGpuResource::LifeCycle lifeCycle; |
| 617 switch (ownership) { |
| 618 case kAdopt_GrWrapOwnership: |
| 619 lifeCycle = GrGpuResource::kAdopted_LifeCycle; |
| 620 break; |
| 621 case kBorrow_GrWrapOwnership: |
| 622 lifeCycle = GrGpuResource::kBorrowed_LifeCycle; |
| 623 break; |
| 624 } |
| 625 |
| 626 GrSurfaceDesc desc; |
| 627 desc.fConfig = wrapDesc.fConfig; |
| 628 desc.fFlags = kCheckAllocation_GrSurfaceFlag; |
| 629 desc.fWidth = wrapDesc.fWidth; |
| 630 desc.fHeight = wrapDesc.fHeight; |
| 631 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount()
); |
| 632 |
| 633 desc.fOrigin = resolve_origin(wrapDesc.fOrigin); |
| 634 |
| 635 GrVkRenderTarget* tgt = GrVkRenderTarget::CreateWrappedRenderTarget(this, de
sc, |
| 636 lifeCycl
e, imageRsrc); |
| 637 if (tgt && wrapDesc.fStencilBits) { |
| 638 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeig
ht)) { |
| 639 tgt->unref(); |
| 640 return nullptr; |
| 641 } |
| 642 } |
| 643 return tgt; |
| 644 } |
| 645 |
| 646 //////////////////////////////////////////////////////////////////////////////// |
| 647 |
| 648 void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc, |
| 649 const GrNonInstancedVertices& vertices) { |
| 650 GrVkVertexBuffer* vbuf; |
| 651 vbuf = (GrVkVertexBuffer*)vertices.vertexBuffer(); |
| 652 SkASSERT(vbuf); |
| 653 SkASSERT(!vbuf->isMapped()); |
| 654 |
| 655 vbuf->addMemoryBarrier(this, |
| 656 VK_ACCESS_HOST_WRITE_BIT, |
| 657 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, |
| 658 VK_PIPELINE_STAGE_HOST_BIT, |
| 659 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, |
| 660 false); |
| 661 |
| 662 fCurrentCmdBuffer->bindVertexBuffer(this, vbuf); |
| 663 |
| 664 if (vertices.isIndexed()) { |
| 665 GrVkIndexBuffer* ibuf = (GrVkIndexBuffer*)vertices.indexBuffer(); |
| 666 SkASSERT(ibuf); |
| 667 SkASSERT(!ibuf->isMapped()); |
| 668 |
| 669 ibuf->addMemoryBarrier(this, |
| 670 VK_ACCESS_HOST_WRITE_BIT, |
| 671 VK_ACCESS_INDEX_READ_BIT, |
| 672 VK_PIPELINE_STAGE_HOST_BIT, |
| 673 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, |
| 674 false); |
| 675 |
| 676 fCurrentCmdBuffer->bindIndexBuffer(this, ibuf); |
| 677 } |
| 678 } |
| 679 |
| 680 void GrVkGpu::buildProgramDesc(GrProgramDesc* desc, |
| 681 const GrPrimitiveProcessor& primProc, |
| 682 const GrPipeline& pipeline) const { |
| 683 if (!GrVkProgramDescBuilder::Build(desc, primProc, pipeline, *this->vkCaps()
.glslCaps())) { |
| 684 SkDEBUGFAIL("Failed to generate GL program descriptor"); |
| 685 } |
| 686 } |
| 687 |
| 688 //////////////////////////////////////////////////////////////////////////////// |
| 689 |
| 690 GrStencilAttachment* GrVkGpu::createStencilAttachmentForRenderTarget(const GrRen
derTarget* rt, |
| 691 int width, |
| 692 int height)
{ |
| 693 SkASSERT(rt->asTexture()); |
| 694 SkASSERT(width >= rt->width()); |
| 695 SkASSERT(height >= rt->height()); |
| 696 |
| 697 int samples = rt->numStencilSamples(); |
| 698 |
| 699 SkASSERT(this->vkCaps().stencilFormats().count()); |
| 700 const GrVkCaps::StencilFormat& sFmt = this->vkCaps().stencilFormats()[0]; |
| 701 |
| 702 GrVkStencilAttachment* stencil(GrVkStencilAttachment::Create(this, |
| 703 GrGpuResource::
kCached_LifeCycle, |
| 704 width, |
| 705 height, |
| 706 samples, |
| 707 sFmt)); |
| 708 fStats.incStencilAttachmentCreates(); |
| 709 return stencil; |
| 710 } |
| 711 |
| 712 //////////////////////////////////////////////////////////////////////////////// |
| 713 |
| 714 GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i
nt h, |
| 715 GrPixelConfig config) { |
| 716 |
| 717 VkFormat pixelFormat; |
| 718 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) { |
| 719 return 0; |
| 720 } |
| 721 |
| 722 bool linearTiling = false; |
| 723 if (!fVkCaps->isConfigTexturable(config)) { |
| 724 return 0; |
| 725 } |
| 726 |
| 727 if (fVkCaps->isConfigTexurableLinearly(config)) { |
| 728 linearTiling = true; |
| 729 } |
| 730 |
| 731 // Currently this is not supported since it requires a copy which has not ye
t been implemented. |
| 732 if (srcData && !linearTiling) { |
| 733 return 0; |
| 734 } |
| 735 |
| 736 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT; |
| 737 usageFlags |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; |
| 738 usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT; |
| 739 |
| 740 VkFlags memProps = (srcData && linearTiling) ? VK_MEMORY_PROPERTY_HOST_VISIB
LE_BIT : |
| 741 VK_MEMORY_PROPERTY_DEVICE_LOC
AL_BIT; |
| 742 |
| 743 // This ImageDesc refers to the texture that will be read by the client. Thu
s even if msaa is |
| 744 // requested, this ImageDesc describes the resolved texutre. Therefore we al
ways have samples set |
| 745 // to 1. |
| 746 GrVkImage::ImageDesc imageDesc; |
| 747 imageDesc.fImageType = VK_IMAGE_TYPE_2D; |
| 748 imageDesc.fFormat = pixelFormat; |
| 749 imageDesc.fWidth = w; |
| 750 imageDesc.fHeight = h; |
| 751 imageDesc.fLevels = 1; |
| 752 imageDesc.fSamples = 1; |
| 753 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TI
LING_OPTIMAL; |
| 754 imageDesc.fUsageFlags = usageFlags; |
| 755 imageDesc.fMemProps = memProps; |
| 756 |
| 757 const GrVkImage::Resource* imageRsrc = GrVkImage::CreateResource(this, image
Desc); |
| 758 if (!imageRsrc) { |
| 759 return 0; |
| 760 } |
| 761 |
| 762 if (srcData) { |
| 763 if (linearTiling) { |
| 764 const VkImageSubresource subres = { |
| 765 VK_IMAGE_ASPECT_COLOR_BIT, |
| 766 0, // mipLevel |
| 767 0, // arraySlice |
| 768 }; |
| 769 VkSubresourceLayout layout; |
| 770 VkResult err; |
| 771 |
| 772 const GrVkInterface* interface = this->vkInterface(); |
| 773 |
| 774 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice, |
| 775 imageRsrc->fImage, |
| 776 &subres, |
| 777 &layout)); |
| 778 |
| 779 void* mapPtr; |
| 780 err = GR_VK_CALL(interface, MapMemory(fDevice, |
| 781 imageRsrc->fAlloc, |
| 782 0, |
| 783 layout.rowPitch * h, |
| 784 0, |
| 785 &mapPtr)); |
| 786 if (err) { |
| 787 imageRsrc->unref(this); |
| 788 return 0; |
| 789 } |
| 790 |
| 791 size_t bpp = GrBytesPerPixel(config); |
| 792 size_t rowCopyBytes = bpp * w; |
| 793 // If there is no padding on dst (layout.rowPitch) we can do a singl
e memcopy. |
| 794 // This assumes the srcData comes in with no padding. |
| 795 if (rowCopyBytes == layout.rowPitch) { |
| 796 memcpy(mapPtr, srcData, rowCopyBytes * h); |
| 797 } else { |
| 798 SkRectMemcpy(mapPtr, layout.rowPitch, srcData, w, rowCopyBytes,
h); |
| 799 } |
| 800 GR_VK_CALL(interface, UnmapMemory(fDevice, imageRsrc->fAlloc)); |
| 801 } else { |
| 802 // TODO: Add support for copying to optimal tiling |
| 803 SkASSERT(false); |
| 804 } |
| 805 } |
| 806 |
| 807 return (GrBackendObject)imageRsrc; |
| 808 } |
| 809 |
| 810 bool GrVkGpu::isTestingOnlyBackendTexture(GrBackendObject id) const { |
| 811 GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id); |
| 812 |
| 813 if (backend && backend->fImage && backend->fAlloc) { |
| 814 VkMemoryRequirements req; |
| 815 memset(&req, 0, sizeof(req)); |
| 816 GR_VK_CALL(this->vkInterface(), GetImageMemoryRequirements(fDevice, |
| 817 backend->fIma
ge, |
| 818 &req)); |
| 819 // TODO: find a better check |
| 820 // This will probably fail with a different driver |
| 821 return (req.size > 0) && (req.size <= 8192 * 8192); |
| 822 } |
| 823 |
| 824 return false; |
| 825 } |
| 826 |
| 827 void GrVkGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandon)
{ |
| 828 GrVkImage::Resource* backend = reinterpret_cast<GrVkImage::Resource*>(id); |
| 829 |
| 830 if (backend) { |
| 831 if (!abandon) { |
| 832 backend->unref(this); |
| 833 } else { |
| 834 backend->unrefAndAbandon(); |
| 835 } |
| 836 } |
| 837 } |
| 838 |
| 839 //////////////////////////////////////////////////////////////////////////////// |
| 840 |
| 841 void GrVkGpu::addMemoryBarrier(VkPipelineStageFlags srcStageMask, |
| 842 VkPipelineStageFlags dstStageMask, |
| 843 bool byRegion, |
| 844 VkMemoryBarrier* barrier) const { |
| 845 SkASSERT(fCurrentCmdBuffer); |
| 846 fCurrentCmdBuffer->pipelineBarrier(this, |
| 847 srcStageMask, |
| 848 dstStageMask, |
| 849 byRegion, |
| 850 GrVkCommandBuffer::kMemory_BarrierType, |
| 851 barrier); |
| 852 } |
| 853 |
| 854 void GrVkGpu::addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask, |
| 855 VkPipelineStageFlags dstStageMask, |
| 856 bool byRegion, |
| 857 VkBufferMemoryBarrier* barrier) const { |
| 858 SkASSERT(fCurrentCmdBuffer); |
| 859 fCurrentCmdBuffer->pipelineBarrier(this, |
| 860 srcStageMask, |
| 861 dstStageMask, |
| 862 byRegion, |
| 863 GrVkCommandBuffer::kBufferMemory_BarrierT
ype, |
| 864 barrier); |
| 865 } |
| 866 |
| 867 void GrVkGpu::addImageMemoryBarrier(VkPipelineStageFlags srcStageMask, |
| 868 VkPipelineStageFlags dstStageMask, |
| 869 bool byRegion, |
| 870 VkImageMemoryBarrier* barrier) const { |
| 871 SkASSERT(fCurrentCmdBuffer); |
| 872 fCurrentCmdBuffer->pipelineBarrier(this, |
| 873 srcStageMask, |
| 874 dstStageMask, |
| 875 byRegion, |
| 876 GrVkCommandBuffer::kImageMemory_BarrierTy
pe, |
| 877 barrier); |
| 878 } |
| 879 |
| 880 void GrVkGpu::finishDrawTarget() { |
| 881 // Submit the current command buffer to the Queue |
| 882 this->submitCommandBuffer(kSkip_SyncQueue); |
| 883 } |
| 884 |
| 885 void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color
) { |
| 886 // parent class should never let us get here with no RT |
| 887 SkASSERT(target); |
| 888 |
| 889 VkClearColorValue vkColor; |
| 890 GrColorToRGBAFloat(color, vkColor.float32); |
| 891 |
| 892 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(target); |
| 893 VkImageLayout origDstLayout = vkRT->currentLayout(); |
| 894 |
| 895 if (rect.width() != target->width() || rect.height() != target->height()) { |
| 896 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstL
ayout); |
| 897 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; |
| 898 VkPipelineStageFlags srcStageMask = |
| 899 GrVkMemory::LayoutToPipelineStageFlags(vkRT->currentLayout()); |
| 900 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; |
| 901 vkRT->setImageLayout(this, |
| 902 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, |
| 903 srcAccessMask, |
| 904 dstAccessMask, |
| 905 srcStageMask, |
| 906 dstStageMask, |
| 907 false); |
| 908 |
| 909 VkClearRect clearRect; |
| 910 clearRect.rect.offset = { rect.fLeft, rect.fTop }; |
| 911 clearRect.rect.extent = { (uint32_t)rect.width(), (uint32_t)rect.height(
) }; |
| 912 clearRect.baseArrayLayer = 0; |
| 913 clearRect.layerCount = 1; |
| 914 |
| 915 |
| 916 |
| 917 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass(); |
| 918 SkASSERT(renderPass); |
| 919 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT); |
| 920 |
| 921 uint32_t colorIndex; |
| 922 SkAssertResult(renderPass->colorAttachmentIndex(&colorIndex)); |
| 923 |
| 924 VkClearAttachment attachment; |
| 925 attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; |
| 926 attachment.colorAttachment = colorIndex; |
| 927 attachment.clearValue.color = vkColor; |
| 928 |
| 929 fCurrentCmdBuffer->clearAttachments(this, 1, &attachment, 1, &clearRect)
; |
| 930 fCurrentCmdBuffer->endRenderPass(this); |
| 931 return; |
| 932 } |
| 933 |
| 934 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(o
rigDstLayout); |
| 935 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; |
| 936 |
| 937 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayou
t);; |
| 938 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; |
| 939 |
| 940 vkRT->setImageLayout(this, |
| 941 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 942 srcAccessMask, |
| 943 dstAccessMask, |
| 944 srcStageMask, |
| 945 dstStageMask, |
| 946 false); |
| 947 |
| 948 |
| 949 VkImageSubresourceRange subRange; |
| 950 memset(&subRange, 0, sizeof(VkImageSubresourceRange)); |
| 951 subRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; |
| 952 subRange.baseMipLevel = 0; |
| 953 subRange.levelCount = 1; |
| 954 subRange.baseArrayLayer = 0; |
| 955 subRange.layerCount = 1; |
| 956 |
| 957 // In the future we may not actually be doing this type of clear at all. If
we are inside a |
| 958 // render pass or doing a non full clear then we will use CmdClearColorAttac
hment. The more |
| 959 // common use case will be clearing an attachment at the start of a render p
ass, in which case |
| 960 // we will use the clear load ops. |
| 961 fCurrentCmdBuffer->clearColorImage(this, |
| 962 vkRT, |
| 963 &vkColor, |
| 964 1, &subRange); |
| 965 } |
| 966 |
| 967 inline bool can_copy_image(const GrSurface* dst, |
| 968 const GrSurface* src, |
| 969 const GrVkGpu* gpu) { |
| 970 if (src->asTexture() && |
| 971 dst->asTexture() && |
| 972 src->origin() == dst->origin() && |
| 973 src->config() == dst->config()) { |
| 974 return true; |
| 975 } |
| 976 |
| 977 // How does msaa play into this? If a VkTexture is multisampled, are we copy
ing the multisampled |
| 978 // or the resolved image here? |
| 979 |
| 980 return false; |
| 981 } |
| 982 |
| 983 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst, |
| 984 GrSurface* src, |
| 985 const SkIRect& srcRect, |
| 986 const SkIPoint& dstPoint) { |
| 987 SkASSERT(can_copy_image(dst, src, this)); |
| 988 |
| 989 // Insert memory barriers to switch src and dst to transfer_source and trans
fer_dst layouts |
| 990 GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture()); |
| 991 GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture()); |
| 992 |
| 993 VkImageLayout origDstLayout = dstTex->currentLayout(); |
| 994 VkImageLayout origSrcLayout = srcTex->currentLayout(); |
| 995 |
| 996 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(o
rigDstLayout); |
| 997 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; |
| 998 |
| 999 // These flags are for flushing/invalidating caches and for the dst image it
doesn't matter if |
| 1000 // the cache is flushed since it is only being written to. |
| 1001 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayou
t);; |
| 1002 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; |
| 1003 |
| 1004 dstTex->setImageLayout(this, |
| 1005 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 1006 srcAccessMask, |
| 1007 dstAccessMask, |
| 1008 srcStageMask, |
| 1009 dstStageMask, |
| 1010 false); |
| 1011 |
| 1012 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout); |
| 1013 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; |
| 1014 |
| 1015 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout); |
| 1016 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; |
| 1017 |
| 1018 srcTex->setImageLayout(this, |
| 1019 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 1020 srcAccessMask, |
| 1021 dstAccessMask, |
| 1022 srcStageMask, |
| 1023 dstStageMask, |
| 1024 false); |
| 1025 |
| 1026 // Flip rect if necessary |
| 1027 SkIRect srcVkRect = srcRect; |
| 1028 int32_t dstY = dstPoint.fY; |
| 1029 |
| 1030 if (kBottomLeft_GrSurfaceOrigin == src->origin()) { |
| 1031 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin()); |
| 1032 srcVkRect.fTop = src->height() - srcRect.fBottom; |
| 1033 srcVkRect.fBottom = src->height() - srcRect.fTop; |
| 1034 dstY = dst->height() - dstPoint.fY - srcVkRect.height(); |
| 1035 } |
| 1036 |
| 1037 VkImageCopy copyRegion; |
| 1038 memset(©Region, 0, sizeof(VkImageCopy)); |
| 1039 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; |
| 1040 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 }; |
| 1041 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; |
| 1042 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 }; |
| 1043 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.heigh
t(), 0 }; |
| 1044 |
| 1045 fCurrentCmdBuffer->copyImage(this, |
| 1046 srcTex, |
| 1047 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 1048 dstTex, |
| 1049 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 1050 1, |
| 1051 ©Region); |
| 1052 } |
| 1053 |
| 1054 inline bool can_copy_as_draw(const GrSurface* dst, |
| 1055 const GrSurface* src, |
| 1056 const GrVkGpu* gpu) { |
| 1057 return false; |
| 1058 } |
| 1059 |
| 1060 void GrVkGpu::copySurfaceAsDraw(GrSurface* dst, |
| 1061 GrSurface* src, |
| 1062 const SkIRect& srcRect, |
| 1063 const SkIPoint& dstPoint) { |
| 1064 SkASSERT(false); |
| 1065 } |
| 1066 |
| 1067 bool GrVkGpu::onCopySurface(GrSurface* dst, |
| 1068 GrSurface* src, |
| 1069 const SkIRect& srcRect, |
| 1070 const SkIPoint& dstPoint) { |
| 1071 if (can_copy_image(dst, src, this)) { |
| 1072 this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint); |
| 1073 return true; |
| 1074 } |
| 1075 |
| 1076 if (can_copy_as_draw(dst, src, this)) { |
| 1077 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint); |
| 1078 return true; |
| 1079 } |
| 1080 |
| 1081 return false; |
| 1082 } |
| 1083 |
| 1084 bool GrVkGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height,
size_t rowBytes, |
| 1085 GrPixelConfig readConfig, DrawPreference* draw
Preference, |
| 1086 ReadPixelTempDrawInfo* tempDrawInfo) { |
| 1087 // Currently we don't handle draws, so if the caller wants/needs to do a dra
w we need to fail |
| 1088 if (kNoDraw_DrawPreference != *drawPreference) { |
| 1089 return false; |
| 1090 } |
| 1091 |
| 1092 if (srcSurface->config() != readConfig) { |
| 1093 // TODO: This should fall back to drawing or copying to change config of
srcSurface to match |
| 1094 // that of readConfig. |
| 1095 return false; |
| 1096 } |
| 1097 |
| 1098 return true; |
| 1099 } |
| 1100 |
| 1101 bool GrVkGpu::onReadPixels(GrSurface* surface, |
| 1102 int left, int top, int width, int height, |
| 1103 GrPixelConfig config, |
| 1104 void* buffer, |
| 1105 size_t rowBytes) { |
| 1106 VkFormat pixelFormat; |
| 1107 if (!GrPixelConfigToVkFormat(config, &pixelFormat)) { |
| 1108 return false; |
| 1109 } |
| 1110 |
| 1111 GrVkTexture* tgt = static_cast<GrVkTexture*>(surface->asTexture()); |
| 1112 if (!tgt) { |
| 1113 return false; |
| 1114 } |
| 1115 |
| 1116 // Change layout of our target so it can be used as copy |
| 1117 VkImageLayout layout = tgt->currentLayout(); |
| 1118 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(l
ayout); |
| 1119 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; |
| 1120 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout); |
| 1121 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; |
| 1122 tgt->setImageLayout(this, |
| 1123 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 1124 srcAccessMask, |
| 1125 dstAccessMask, |
| 1126 srcStageMask, |
| 1127 dstStageMask, |
| 1128 false); |
| 1129 |
| 1130 GrVkTransferBuffer* transferBuffer = |
| 1131 reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowByte
s * height, |
| 1132 kGpuToC
pu_TransferType)); |
| 1133 |
| 1134 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin(); |
| 1135 VkOffset3D offset = { |
| 1136 left, |
| 1137 flipY ? surface->height() - top - height : top, |
| 1138 0 |
| 1139 }; |
| 1140 |
| 1141 // Copy the image to a buffer so we can map it to cpu memory |
| 1142 VkBufferImageCopy region; |
| 1143 memset(®ion, 0, sizeof(VkBufferImageCopy)); |
| 1144 region.bufferOffset = 0; |
| 1145 region.bufferRowLength = 0; // Forces RowLength to be imageExtent.width |
| 1146 region.bufferImageHeight = 0; // Forces height to be tightly packed. Only us
eful for 3d images. |
| 1147 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; |
| 1148 region.imageOffset = offset; |
| 1149 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 }; |
| 1150 |
| 1151 fCurrentCmdBuffer->copyImageToBuffer(this, |
| 1152 tgt, |
| 1153 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 1154 transferBuffer, |
| 1155 1, |
| 1156 ®ion); |
| 1157 |
| 1158 // make sure the copy to buffer has finished |
| 1159 transferBuffer->addMemoryBarrier(this, |
| 1160 VK_ACCESS_TRANSFER_WRITE_BIT, |
| 1161 VK_ACCESS_HOST_READ_BIT, |
| 1162 VK_PIPELINE_STAGE_TRANSFER_BIT, |
| 1163 VK_PIPELINE_STAGE_HOST_BIT, |
| 1164 false); |
| 1165 |
| 1166 // We need to submit the current command buffer to the Queue and make sure i
t finishes before |
| 1167 // we can copy the data out of the buffer. |
| 1168 this->submitCommandBuffer(kForce_SyncQueue); |
| 1169 |
| 1170 void* mappedMemory = transferBuffer->map(); |
| 1171 |
| 1172 memcpy(buffer, mappedMemory, rowBytes*height); |
| 1173 |
| 1174 transferBuffer->unmap(); |
| 1175 transferBuffer->unref(); |
| 1176 |
| 1177 if (flipY) { |
| 1178 SkAutoSMalloc<32 * sizeof(GrColor)> scratch; |
| 1179 size_t tightRowBytes = GrBytesPerPixel(config) * width; |
| 1180 scratch.reset(tightRowBytes); |
| 1181 void* tmpRow = scratch.get(); |
| 1182 // flip y in-place by rows |
| 1183 const int halfY = height >> 1; |
| 1184 char* top = reinterpret_cast<char*>(buffer); |
| 1185 char* bottom = top + (height - 1) * rowBytes; |
| 1186 for (int y = 0; y < halfY; y++) { |
| 1187 memcpy(tmpRow, top, tightRowBytes); |
| 1188 memcpy(top, bottom, tightRowBytes); |
| 1189 memcpy(bottom, tmpRow, tightRowBytes); |
| 1190 top += rowBytes; |
| 1191 bottom -= rowBytes; |
| 1192 } |
| 1193 } |
| 1194 |
| 1195 return true; |
| 1196 } |
| 1197 |
| 1198 void GrVkGpu::onDraw(const DrawArgs& args, const GrNonInstancedVertices& vertice
s) { |
| 1199 GrRenderTarget* rt = args.fPipeline->getRenderTarget(); |
| 1200 GrVkRenderTarget* vkRT = static_cast<GrVkRenderTarget*>(rt); |
| 1201 const GrVkRenderPass* renderPass = vkRT->simpleRenderPass(); |
| 1202 SkASSERT(renderPass); |
| 1203 |
| 1204 |
| 1205 GrVkProgram* program = GrVkProgramBuilder::CreateProgram(this, args, |
| 1206 vertices.primitiveT
ype(), |
| 1207 *renderPass); |
| 1208 |
| 1209 if (!program) { |
| 1210 return; |
| 1211 } |
| 1212 |
| 1213 program->setData(this, *args.fPrimitiveProcessor, *args.fPipeline); |
| 1214 |
| 1215 fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT); |
| 1216 |
| 1217 program->bind(this, fCurrentCmdBuffer); |
| 1218 |
| 1219 this->bindGeometry(*args.fPrimitiveProcessor, vertices); |
| 1220 |
| 1221 // Change layout of our render target so it can be used as the color attachm
ent |
| 1222 VkImageLayout layout = vkRT->currentLayout(); |
| 1223 // Our color attachment is purely a destination and won't be read so don't n
eed to flush or |
| 1224 // invalidate any caches |
| 1225 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(l
ayout); |
| 1226 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; |
| 1227 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout); |
| 1228 VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; |
| 1229 vkRT->setImageLayout(this, |
| 1230 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, |
| 1231 srcAccessMask, |
| 1232 dstAccessMask, |
| 1233 srcStageMask, |
| 1234 dstStageMask, |
| 1235 false); |
| 1236 |
| 1237 if (vertices.isIndexed()) { |
| 1238 fCurrentCmdBuffer->drawIndexed(this, |
| 1239 vertices.indexCount(), |
| 1240 1, |
| 1241 vertices.startIndex(), |
| 1242 vertices.startVertex(), |
| 1243 0); |
| 1244 } else { |
| 1245 fCurrentCmdBuffer->draw(this, vertices.vertexCount(), 1, vertices.startV
ertex(), 0); |
| 1246 } |
| 1247 |
| 1248 fCurrentCmdBuffer->endRenderPass(this); |
| 1249 |
| 1250 // Technically we don't have to call this here (since there is a safety chec
k in program:setData |
| 1251 // but this will allow for quicker freeing of resources if the program sits
in a cache for a |
| 1252 // while. |
| 1253 program->freeTempResources(this); |
| 1254 // This free will go away once we setup a program cache, and then the cache
will be responsible |
| 1255 // for call freeGpuResources. |
| 1256 program->freeGPUResources(this); |
| 1257 program->unref(); |
| 1258 |
| 1259 #if SWAP_PER_DRAW |
| 1260 glFlush(); |
| 1261 #if defined(SK_BUILD_FOR_MAC) |
| 1262 aglSwapBuffers(aglGetCurrentContext()); |
| 1263 int set_a_break_pt_here = 9; |
| 1264 aglSwapBuffers(aglGetCurrentContext()); |
| 1265 #elif defined(SK_BUILD_FOR_WIN32) |
| 1266 SwapBuf(); |
| 1267 int set_a_break_pt_here = 9; |
| 1268 SwapBuf(); |
| 1269 #endif |
| 1270 #endif |
| 1271 } |
| 1272 |
OLD | NEW |