| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrVkGpu.h" | 8 #include "GrVkGpu.h" |
| 9 | 9 |
| 10 #include "GrContextOptions.h" | 10 #include "GrContextOptions.h" |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 112 | 112 |
| 113 fCompiler = shaderc_compiler_initialize(); | 113 fCompiler = shaderc_compiler_initialize(); |
| 114 | 114 |
| 115 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendCtx->fPhysic
alDevice, | 115 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendCtx->fPhysic
alDevice, |
| 116 backendCtx->fFeatures, backendCtx->fExtensions)); | 116 backendCtx->fFeatures, backendCtx->fExtensions)); |
| 117 fCaps.reset(SkRef(fVkCaps.get())); | 117 fCaps.reset(SkRef(fVkCaps.get())); |
| 118 | 118 |
| 119 VK_CALL(GetPhysicalDeviceMemoryProperties(backendCtx->fPhysicalDevice, &fPhy
sDevMemProps)); | 119 VK_CALL(GetPhysicalDeviceMemoryProperties(backendCtx->fPhysicalDevice, &fPhy
sDevMemProps)); |
| 120 | 120 |
| 121 const VkCommandPoolCreateInfo cmdPoolInfo = { | 121 const VkCommandPoolCreateInfo cmdPoolInfo = { |
| 122 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType | 122 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType |
| 123 nullptr, // pNext | 123 nullptr, // pNext |
| 124 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // CmdPoolCreateFlags | 124 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | |
| 125 backendCtx->fGraphicsQueueIndex, // queueFamilyIndex | 125 VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // CmdPoolCreateFlags |
| 126 backendCtx->fGraphicsQueueIndex, // queueFamilyIndex |
| 126 }; | 127 }; |
| 127 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPool
Info, nullptr, | 128 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPool
Info, nullptr, |
| 128 &fCmdPool)); | 129 &fCmdPool)); |
| 129 | 130 |
| 130 // must call this after creating the CommandPool | 131 // must call this after creating the CommandPool |
| 131 fResourceProvider.init(); | 132 fResourceProvider.init(); |
| 132 fCurrentCmdBuffer = fResourceProvider.createPrimaryCommandBuffer(); | 133 fCurrentCmdBuffer = fResourceProvider.findOrCreatePrimaryCommandBuffer(); |
| 133 SkASSERT(fCurrentCmdBuffer); | 134 SkASSERT(fCurrentCmdBuffer); |
| 134 fCurrentCmdBuffer->begin(this); | 135 fCurrentCmdBuffer->begin(this); |
| 135 | 136 |
| 136 // set up our heaps | 137 // set up our heaps |
| 137 fHeaps[kLinearImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strat
egy, 16*1024*1024)); | 138 fHeaps[kLinearImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc_Strat
egy, 16*1024*1024)); |
| 138 // We want the OptimalImage_Heap to use a SubAlloc_strategy but it occasiona
lly causes the | 139 // We want the OptimalImage_Heap to use a SubAlloc_strategy but it occasiona
lly causes the |
| 139 // device to run out of memory. Most likely this is caused by fragmentation
in the device heap | 140 // device to run out of memory. Most likely this is caused by fragmentation
in the device heap |
| 140 // and we can't allocate more. Until we get a fix moving this to SingleAlloc
. | 141 // and we can't allocate more. Until we get a fix moving this to SingleAlloc
. |
| 141 fHeaps[kOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_S
trategy, 64*1024*1024)); | 142 fHeaps[kOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSingleAlloc_S
trategy, 64*1024*1024)); |
| 142 fHeaps[kSmallOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc
_Strategy, 2*1024*1024)); | 143 fHeaps[kSmallOptimalImage_Heap].reset(new GrVkHeap(this, GrVkHeap::kSubAlloc
_Strategy, 2*1024*1024)); |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 198 | 199 |
| 199 void GrVkGpu::submitCommandBuffer(SyncQueue sync) { | 200 void GrVkGpu::submitCommandBuffer(SyncQueue sync) { |
| 200 SkASSERT(fCurrentCmdBuffer); | 201 SkASSERT(fCurrentCmdBuffer); |
| 201 fCurrentCmdBuffer->end(this); | 202 fCurrentCmdBuffer->end(this); |
| 202 | 203 |
| 203 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync); | 204 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync); |
| 204 fResourceProvider.checkCommandBuffers(); | 205 fResourceProvider.checkCommandBuffers(); |
| 205 | 206 |
| 206 // Release old command buffer and create a new one | 207 // Release old command buffer and create a new one |
| 207 fCurrentCmdBuffer->unref(this); | 208 fCurrentCmdBuffer->unref(this); |
| 208 fCurrentCmdBuffer = fResourceProvider.createPrimaryCommandBuffer(); | 209 fCurrentCmdBuffer = fResourceProvider.findOrCreatePrimaryCommandBuffer(); |
| 209 SkASSERT(fCurrentCmdBuffer); | 210 SkASSERT(fCurrentCmdBuffer); |
| 210 | 211 |
| 211 fCurrentCmdBuffer->begin(this); | 212 fCurrentCmdBuffer->begin(this); |
| 212 } | 213 } |
| 213 | 214 |
| 214 /////////////////////////////////////////////////////////////////////////////// | 215 /////////////////////////////////////////////////////////////////////////////// |
| 215 GrBuffer* GrVkGpu::onCreateBuffer(size_t size, GrBufferType type, GrAccessPatter
n accessPattern, | 216 GrBuffer* GrVkGpu::onCreateBuffer(size_t size, GrBufferType type, GrAccessPatter
n accessPattern, |
| 216 const void* data) { | 217 const void* data) { |
| 217 GrBuffer* buff; | 218 GrBuffer* buff; |
| 218 switch (type) { | 219 switch (type) { |
| (...skipping 1256 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1475 SkRectMemcpy(buffer, rowBytes, mappedMemory, tightRowBytes, tightRow
Bytes, height); | 1476 SkRectMemcpy(buffer, rowBytes, mappedMemory, tightRowBytes, tightRow
Bytes, height); |
| 1476 } | 1477 } |
| 1477 } | 1478 } |
| 1478 | 1479 |
| 1479 transferBuffer->unmap(); | 1480 transferBuffer->unmap(); |
| 1480 transferBuffer->unref(); | 1481 transferBuffer->unref(); |
| 1481 | 1482 |
| 1482 return true; | 1483 return true; |
| 1483 } | 1484 } |
| 1484 | 1485 |
| 1485 void GrVkGpu::submitSecondaryCommandBuffer(const GrVkSecondaryCommandBuffer* buf
fer, | 1486 void GrVkGpu::submitSecondaryCommandBuffer(GrVkSecondaryCommandBuffer* buffer, |
| 1486 const GrVkRenderPass* renderPass, | 1487 const GrVkRenderPass* renderPass, |
| 1487 const VkClearValue* colorClear, | 1488 const VkClearValue* colorClear, |
| 1488 GrVkRenderTarget* target, | 1489 GrVkRenderTarget* target, |
| 1489 const SkIRect& bounds) { | 1490 const SkIRect& bounds) { |
| 1490 // Currently it is fine for us to always pass in 1 for the clear count even
if no attachment | 1491 // Currently it is fine for us to always pass in 1 for the clear count even
if no attachment |
| 1491 // uses it. In the current state, we also only use the LOAD_OP_CLEAR for the
color attachment | 1492 // uses it. In the current state, we also only use the LOAD_OP_CLEAR for the
color attachment |
| 1492 // which is always at the first attachment. | 1493 // which is always at the first attachment. |
| 1493 fCurrentCmdBuffer->beginRenderPass(this, renderPass, 1, colorClear, *target,
bounds, true); | 1494 fCurrentCmdBuffer->beginRenderPass(this, renderPass, 1, colorClear, *target,
bounds, true); |
| 1494 fCurrentCmdBuffer->executeCommands(this, buffer); | 1495 fCurrentCmdBuffer->executeCommands(this, buffer); |
| 1495 fCurrentCmdBuffer->endRenderPass(this); | 1496 fCurrentCmdBuffer->endRenderPass(this); |
| 1496 } | 1497 } |
| 1497 | 1498 |
| OLD | NEW |