| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef GrVkGpu_DEFINED | 8 #ifndef GrVkGpu_DEFINED |
| 9 #define GrVkGpu_DEFINED | 9 #define GrVkGpu_DEFINED |
| 10 | 10 |
| (...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 121 #if USE_SKSL | 121 #if USE_SKSL |
| 122 SkSL::Compiler* shaderCompiler() const { | 122 SkSL::Compiler* shaderCompiler() const { |
| 123 return fCompiler; | 123 return fCompiler; |
| 124 } | 124 } |
| 125 #else | 125 #else |
| 126 shaderc_compiler_t shadercCompiler() const { | 126 shaderc_compiler_t shadercCompiler() const { |
| 127 return fCompiler; | 127 return fCompiler; |
| 128 } | 128 } |
| 129 #endif | 129 #endif |
| 130 | 130 |
| 131 void onResolveRenderTarget(GrRenderTarget* target) override; |
| 132 |
| 131 void submitSecondaryCommandBuffer(GrVkSecondaryCommandBuffer*, | 133 void submitSecondaryCommandBuffer(GrVkSecondaryCommandBuffer*, |
| 132 const GrVkRenderPass*, | 134 const GrVkRenderPass*, |
| 133 const VkClearValue*, | 135 const VkClearValue*, |
| 134 GrVkRenderTarget*, | 136 GrVkRenderTarget*, |
| 135 const SkIRect& bounds); | 137 const SkIRect& bounds); |
| 136 | 138 |
| 137 void finishDrawTarget() override; | 139 void finishDrawTarget() override; |
| 138 | 140 |
| 139 void generateMipmap(GrVkTexture* tex); | 141 void generateMipmap(GrVkTexture* tex); |
| 140 | 142 |
| 141 bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset,
VkDeviceSize size); | 143 bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset,
VkDeviceSize size); |
| 142 | 144 |
| 143 // Heaps | 145 // Heaps |
| 144 enum Heap { | 146 enum Heap { |
| 145 kLinearImage_Heap = 0, | 147 kLinearImage_Heap = 0, |
| 146 // We separate out small (i.e., <= 16K) images to reduce fragmentation | 148 // We separate out small (i.e., <= 16K) images to reduce fragmentation |
| 147 // in the main heap. | 149 // in the main heap. |
| 148 kOptimalImage_Heap, | 150 kOptimalImage_Heap, |
| 149 kSmallOptimalImage_Heap, | 151 kSmallOptimalImage_Heap, |
| 150 // We have separate vertex and image heaps, because it's possible that | 152 // We have separate vertex and image heaps, because it's possible that |
| 151 // a given Vulkan driver may allocate them separately. | 153 // a given Vulkan driver may allocate them separately. |
| 152 kVertexBuffer_Heap, | 154 kVertexBuffer_Heap, |
| 153 kIndexBuffer_Heap, | 155 kIndexBuffer_Heap, |
| 154 kUniformBuffer_Heap, | 156 kUniformBuffer_Heap, |
| 155 kCopyReadBuffer_Heap, | 157 kCopyReadBuffer_Heap, |
| 156 kCopyWriteBuffer_Heap, | 158 kCopyWriteBuffer_Heap, |
| 157 | 159 |
| 158 kLastHeap = kCopyWriteBuffer_Heap | 160 kLastHeap = kCopyWriteBuffer_Heap |
| 159 }; | 161 }; |
| 160 static const int kHeapCount = kLastHeap + 1; | 162 static const int kHeapCount = kLastHeap + 1; |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 192 | 194 |
| 193 bool onWritePixels(GrSurface* surface, | 195 bool onWritePixels(GrSurface* surface, |
| 194 int left, int top, int width, int height, | 196 int left, int top, int width, int height, |
| 195 GrPixelConfig config, const SkTArray<GrMipLevel>&) overri
de; | 197 GrPixelConfig config, const SkTArray<GrMipLevel>&) overri
de; |
| 196 | 198 |
| 197 bool onTransferPixels(GrSurface*, | 199 bool onTransferPixels(GrSurface*, |
| 198 int left, int top, int width, int height, | 200 int left, int top, int width, int height, |
| 199 GrPixelConfig config, GrBuffer* transferBuffer, | 201 GrPixelConfig config, GrBuffer* transferBuffer, |
| 200 size_t offset, size_t rowBytes) override { return fals
e; } | 202 size_t offset, size_t rowBytes) override { return fals
e; } |
| 201 | 203 |
| 202 void onResolveRenderTarget(GrRenderTarget* target) override; | |
| 203 | |
| 204 // Ends and submits the current command buffer to the queue and then creates
a new command | 204 // Ends and submits the current command buffer to the queue and then creates
a new command |
| 205 // buffer and begins it. If sync is set to kForce_SyncQueue, the function wi
ll wait for all | 205 // buffer and begins it. If sync is set to kForce_SyncQueue, the function wi
ll wait for all |
| 206 // work in the queue to finish before returning. | 206 // work in the queue to finish before returning. |
| 207 void submitCommandBuffer(SyncQueue sync); | 207 void submitCommandBuffer(SyncQueue sync); |
| 208 | 208 |
| 209 void copySurfaceAsCopyImage(GrSurface* dst, | 209 void copySurfaceAsCopyImage(GrSurface* dst, |
| 210 GrSurface* src, | 210 GrSurface* src, |
| 211 GrVkImage* dstImage, | 211 GrVkImage* dstImage, |
| 212 GrVkImage* srcImage, | 212 GrVkImage* srcImage, |
| 213 const SkIRect& srcRect, | 213 const SkIRect& srcRect, |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 262 #else | 262 #else |
| 263 // Shaderc compiler used for compiling glsl in spirv. We only want to create
the compiler once | 263 // Shaderc compiler used for compiling glsl in spirv. We only want to create
the compiler once |
| 264 // since there is significant overhead to the first compile of any compiler. | 264 // since there is significant overhead to the first compile of any compiler. |
| 265 shaderc_compiler_t fCompiler; | 265 shaderc_compiler_t fCompiler; |
| 266 #endif | 266 #endif |
| 267 | 267 |
| 268 typedef GrGpu INHERITED; | 268 typedef GrGpu INHERITED; |
| 269 }; | 269 }; |
| 270 | 270 |
| 271 #endif | 271 #endif |
| OLD | NEW |