| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "GrVkGpu.h" | 8 #include "GrVkGpu.h" |
| 9 | 9 |
| 10 #include "GrContextOptions.h" | 10 #include "GrContextOptions.h" |
| (...skipping 22 matching lines...) Expand all Loading... |
| 33 | 33 |
| 34 #include "SkConfig8888.h" | 34 #include "SkConfig8888.h" |
| 35 | 35 |
| 36 #include "vk/GrVkInterface.h" | 36 #include "vk/GrVkInterface.h" |
| 37 #include "vk/GrVkTypes.h" | 37 #include "vk/GrVkTypes.h" |
| 38 | 38 |
| 39 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X) | 39 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X) |
| 40 #define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X) | 40 #define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X) |
| 41 #define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X) | 41 #define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X) |
| 42 | 42 |
| 43 //////////////////////////////////////////////////////////////////////////////// | |
| 44 // Stuff used to set up a GrVkGpu secrectly for now. | |
| 45 | |
| 46 | |
| 47 #ifdef ENABLE_VK_LAYERS | 43 #ifdef ENABLE_VK_LAYERS |
| 48 VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback( | 44 VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback( |
| 49 VkDebugReportFlagsEXT flags, | 45 VkDebugReportFlagsEXT flags, |
| 50 VkDebugReportObjectTypeEXT objectType, | 46 VkDebugReportObjectTypeEXT objectType, |
| 51 uint64_t object, | 47 uint64_t object, |
| 52 size_t location, | 48 size_t location, |
| 53 int32_t messageCode, | 49 int32_t messageCode, |
| 54 const char* pLayerPrefix, | 50 const char* pLayerPrefix, |
| 55 const char* pMessage, | 51 const char* pMessage, |
| 56 void* pUserData) { | 52 void* pUserData) { |
| 57 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) { | 53 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) { |
| 58 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode,
pMessage); | 54 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode,
pMessage); |
| 59 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) { | 55 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) { |
| 60 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCod
e, pMessage); | 56 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCod
e, pMessage); |
| 61 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) { | 57 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) { |
| 62 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messa
geCode, pMessage); | 58 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messa
geCode, pMessage); |
| 63 } else { | 59 } else { |
| 64 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, message
Code, pMessage); | 60 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, message
Code, pMessage); |
| 65 } | 61 } |
| 66 return VK_FALSE; | 62 return VK_FALSE; |
| 67 } | 63 } |
| 68 | |
| 69 const char* kEnabledLayerNames[] = { | |
| 70 // elements of VK_LAYER_LUNARG_standard_validation | |
| 71 "VK_LAYER_LUNARG_threading", | |
| 72 "VK_LAYER_LUNARG_param_checker", | |
| 73 "VK_LAYER_LUNARG_device_limits", | |
| 74 "VK_LAYER_LUNARG_object_tracker", | |
| 75 "VK_LAYER_LUNARG_image", | |
| 76 "VK_LAYER_LUNARG_mem_tracker", | |
| 77 "VK_LAYER_LUNARG_draw_state", | |
| 78 "VK_LAYER_LUNARG_swapchain", | |
| 79 "VK_LAYER_GOOGLE_unique_objects", | |
| 80 // not included in standard_validation | |
| 81 //"VK_LAYER_LUNARG_api_dump", | |
| 82 }; | |
| 83 const char* kEnabledInstanceExtensionNames[] = { | |
| 84 VK_EXT_DEBUG_REPORT_EXTENSION_NAME | |
| 85 }; | |
| 86 | |
| 87 bool verify_instance_layers() { | |
| 88 // make sure we can actually use the extensions and layers above | |
| 89 uint32_t extensionCount; | |
| 90 VkResult res = vkEnumerateInstanceExtensionProperties(nullptr, &extensionCou
nt, nullptr); | |
| 91 if (VK_SUCCESS != res) { | |
| 92 return false; | |
| 93 } | |
| 94 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount
]; | |
| 95 res = vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, exten
sions); | |
| 96 if (VK_SUCCESS != res) { | |
| 97 return false; | |
| 98 } | |
| 99 int instanceExtensionsFound = 0; | |
| 100 for (uint32_t j = 0; j < ARRAYSIZE(kEnabledInstanceExtensionNames); ++j) { | |
| 101 for (uint32_t i = 0; i < extensionCount; ++i) { | |
| 102 if (!strncmp(extensions[i].extensionName, kEnabledInstanceExtensionN
ames[j], | |
| 103 strlen(kEnabledInstanceExtensionNames[j]))) { | |
| 104 ++instanceExtensionsFound; | |
| 105 break; | |
| 106 } | |
| 107 } | |
| 108 } | |
| 109 delete[] extensions; | |
| 110 | |
| 111 uint32_t layerCount; | |
| 112 res = vkEnumerateInstanceLayerProperties(&layerCount, nullptr); | |
| 113 if (VK_SUCCESS != res) { | |
| 114 return false; | |
| 115 } | |
| 116 VkLayerProperties* layers = new VkLayerProperties[layerCount]; | |
| 117 res = vkEnumerateInstanceLayerProperties(&layerCount, layers); | |
| 118 if (VK_SUCCESS != res) { | |
| 119 return false; | |
| 120 } | |
| 121 int instanceLayersFound = 0; | |
| 122 for (uint32_t j = 0; j < ARRAYSIZE(kEnabledLayerNames); ++j) { | |
| 123 for (uint32_t i = 0; i < layerCount; ++i) { | |
| 124 if (!strncmp(layers[i].layerName, kEnabledLayerNames[j], | |
| 125 strlen(kEnabledLayerNames[j]))) { | |
| 126 ++instanceLayersFound; | |
| 127 break; | |
| 128 } | |
| 129 } | |
| 130 } | |
| 131 delete[] layers; | |
| 132 | |
| 133 return instanceExtensionsFound == ARRAYSIZE(kEnabledInstanceExtensionNames)
&& | |
| 134 instanceLayersFound == ARRAYSIZE(kEnabledLayerNames); | |
| 135 } | |
| 136 | |
| 137 bool verify_device_layers(VkPhysicalDevice physDev) { | |
| 138 uint32_t layerCount; | |
| 139 VkResult res = vkEnumerateDeviceLayerProperties(physDev, &layerCount, nullpt
r); | |
| 140 if (VK_SUCCESS != res) { | |
| 141 return false; | |
| 142 } | |
| 143 VkLayerProperties* layers = new VkLayerProperties[layerCount]; | |
| 144 res = vkEnumerateDeviceLayerProperties(physDev, &layerCount, layers); | |
| 145 if (VK_SUCCESS != res) { | |
| 146 return false; | |
| 147 } | |
| 148 int deviceLayersFound = 0; | |
| 149 for (uint32_t j = 0; j < ARRAYSIZE(kEnabledLayerNames); ++j) { | |
| 150 for (uint32_t i = 0; i < layerCount; ++i) { | |
| 151 if (!strncmp(layers[i].layerName, kEnabledLayerNames[j], | |
| 152 strlen(kEnabledLayerNames[j]))) { | |
| 153 ++deviceLayersFound; | |
| 154 break; | |
| 155 } | |
| 156 } | |
| 157 } | |
| 158 delete[] layers; | |
| 159 | |
| 160 return deviceLayersFound == ARRAYSIZE(kEnabledLayerNames); | |
| 161 } | |
| 162 #endif | 64 #endif |
| 163 | 65 |
| 164 // For now the VkGpuCreate is using the same signature as GL. This is mostly for
ease of | 66 GrGpu* GrVkGpu::Create(GrBackendContext backendContext, const GrContextOptions&
options, |
| 165 // hiding this code from offical skia. In the end the VkGpuCreate will not take
a GrBackendContext | 67 GrContext* context) { |
| 166 // and mostly likely would take an optional device and queues to use. | 68 SkAutoTUnref<const GrVkBackendContext> vkBackendContext( |
| 167 GrGpu* vk_gpu_create(GrBackendContext backendContext, const GrContextOptions& op
tions, | 69 reinterpret_cast<const GrVkBackendContext
*>(backendContext)); |
| 168 GrContext* context) { | 70 if (!vkBackendContext) { |
| 169 // Below is Vulkan setup code that normal would be done by a client, but wil
l do here for now | 71 vkBackendContext.reset(GrVkBackendContext::Create()); |
| 170 // for testing purposes. | 72 if (!vkBackendContext) { |
| 171 VkPhysicalDevice physDev; | 73 return nullptr; |
| 172 VkDevice device; | 74 } |
| 173 VkInstance inst; | 75 } else { |
| 174 VkResult err; | 76 vkBackendContext->ref(); |
| 175 | |
| 176 const VkApplicationInfo app_info = { | |
| 177 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType | |
| 178 nullptr, // pNext | |
| 179 "vktest", // pApplicationName | |
| 180 0, // applicationVersion | |
| 181 "vktest", // pEngineName | |
| 182 0, // engineVerison | |
| 183 kGrVkMinimumVersion, // apiVersion | |
| 184 }; | |
| 185 | |
| 186 const char** enabledLayerNames = nullptr; | |
| 187 int enabledLayerCount = 0; | |
| 188 const char** enabledInstanceExtensionNames = nullptr; | |
| 189 int enabledInstanceExtensionCount = 0; | |
| 190 #ifdef ENABLE_VK_LAYERS | |
| 191 if (verify_instance_layers()) { | |
| 192 enabledLayerNames = kEnabledLayerNames; | |
| 193 enabledLayerCount = ARRAYSIZE(kEnabledLayerNames); | |
| 194 enabledInstanceExtensionNames = kEnabledInstanceExtensionNames; | |
| 195 enabledInstanceExtensionCount = ARRAYSIZE(kEnabledInstanceExtensionNames
); | |
| 196 } | |
| 197 #endif | |
| 198 | |
| 199 const VkInstanceCreateInfo instance_create = { | |
| 200 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType | |
| 201 nullptr, // pNext | |
| 202 0, // flags | |
| 203 &app_info, // pApplicationInfo | |
| 204 enabledLayerCount, // enabledLayerNameCount | |
| 205 enabledLayerNames, // ppEnabledLayerNames | |
| 206 enabledInstanceExtensionCount, // enabledExtensionNameCount | |
| 207 enabledInstanceExtensionNames, // ppEnabledExtensionNames | |
| 208 }; | |
| 209 | |
| 210 err = vkCreateInstance(&instance_create, nullptr, &inst); | |
| 211 if (err < 0) { | |
| 212 SkDebugf("vkCreateInstanced failed: %d\n", err); | |
| 213 SkFAIL("failing"); | |
| 214 } | 77 } |
| 215 | 78 |
| 216 uint32_t gpuCount; | 79 return new GrVkGpu(context, options, vkBackendContext); |
| 217 err = vkEnumeratePhysicalDevices(inst, &gpuCount, nullptr); | |
| 218 if (err) { | |
| 219 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); | |
| 220 SkFAIL("failing"); | |
| 221 } | |
| 222 SkASSERT(gpuCount > 0); | |
| 223 // Just returning the first physical device instead of getting the whole arr
ay. | |
| 224 gpuCount = 1; | |
| 225 err = vkEnumeratePhysicalDevices(inst, &gpuCount, &physDev); | |
| 226 if (err) { | |
| 227 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err); | |
| 228 SkFAIL("failing"); | |
| 229 } | |
| 230 | |
| 231 // query to get the initial queue props size | |
| 232 uint32_t queueCount; | |
| 233 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr); | |
| 234 SkASSERT(queueCount >= 1); | |
| 235 | |
| 236 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties)); | |
| 237 // now get the actual queue props | |
| 238 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAl
loc.get(); | |
| 239 | |
| 240 vkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps); | |
| 241 | |
| 242 // iterate to find the graphics queue | |
| 243 uint32_t graphicsQueueIndex = -1; | |
| 244 for (uint32_t i = 0; i < queueCount; i++) { | |
| 245 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) { | |
| 246 graphicsQueueIndex = i; | |
| 247 break; | |
| 248 } | |
| 249 } | |
| 250 SkASSERT(graphicsQueueIndex < queueCount); | |
| 251 | |
| 252 #ifdef ENABLE_VK_LAYERS | |
| 253 // unlikely that the device will have different layers than the instance, bu
t good to check | |
| 254 if (!verify_device_layers(physDev)) { | |
| 255 enabledLayerNames = nullptr; | |
| 256 enabledLayerCount = 0; | |
| 257 } | |
| 258 #endif | |
| 259 | |
| 260 float queuePriorities[1] = { 0.0 }; | |
| 261 const VkDeviceQueueCreateInfo queueInfo = { | |
| 262 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType | |
| 263 nullptr, // pNext | |
| 264 0, // VkDeviceQueueCreateFlags | |
| 265 0, // queueFamilyIndex | |
| 266 1, // queueCount | |
| 267 queuePriorities, // pQueuePriorities | |
| 268 }; | |
| 269 const VkDeviceCreateInfo deviceInfo = { | |
| 270 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType | |
| 271 nullptr, // pNext | |
| 272 0, // VkDeviceCreateFlags | |
| 273 1, // queueCreateInfoCount | |
| 274 &queueInfo, // pQueueCreateInfos | |
| 275 enabledLayerCount, // layerCount | |
| 276 enabledLayerNames, // ppEnabledLayerNames | |
| 277 0, // extensionCount | |
| 278 nullptr, // ppEnabledExtensionNames | |
| 279 nullptr // ppEnabledFeatures | |
| 280 }; | |
| 281 | |
| 282 err = vkCreateDevice(physDev, &deviceInfo, nullptr, &device); | |
| 283 if (err) { | |
| 284 SkDebugf("CreateDevice failed: %d\n", err); | |
| 285 SkFAIL("failing"); | |
| 286 } | |
| 287 | |
| 288 VkQueue queue; | |
| 289 vkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue); | |
| 290 | |
| 291 const VkCommandPoolCreateInfo cmdPoolInfo = { | |
| 292 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType | |
| 293 nullptr, // pNext | |
| 294 0, // CmdPoolCreateFlags | |
| 295 graphicsQueueIndex, // queueFamilyIndex | |
| 296 }; | |
| 297 | |
| 298 VkCommandPool cmdPool; | |
| 299 err = vkCreateCommandPool(device, &cmdPoolInfo, nullptr, &cmdPool); | |
| 300 if (err) { | |
| 301 SkDebugf("CreateCommandPool failed: %d\n", err); | |
| 302 SkFAIL("failing"); | |
| 303 } | |
| 304 | |
| 305 return new GrVkGpu(context, options, physDev, device, queue, cmdPool, inst); | |
| 306 } | 80 } |
| 307 | 81 |
| 308 //////////////////////////////////////////////////////////////////////////////// | 82 //////////////////////////////////////////////////////////////////////////////// |
| 309 | 83 |
| 310 GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options, | 84 GrVkGpu::GrVkGpu(GrContext* context, const GrContextOptions& options, |
| 311 VkPhysicalDevice physDev, VkDevice device, VkQueue queue, VkCom
mandPool cmdPool, | 85 const GrVkBackendContext* backendCtx) |
| 312 VkInstance inst) | |
| 313 : INHERITED(context) | 86 : INHERITED(context) |
| 314 , fDevice(device) | 87 , fVkInstance(backendCtx->fInstance) |
| 315 , fQueue(queue) | 88 , fDevice(backendCtx->fDevice) |
| 316 , fCmdPool(cmdPool) | 89 , fQueue(backendCtx->fQueue) |
| 317 , fResourceProvider(this) | 90 , fResourceProvider(this) { |
| 318 , fVkInstance(inst) { | 91 fBackendContext.reset(backendCtx); |
| 319 fInterface.reset(GrVkCreateInterface(fVkInstance)); | |
| 320 fCompiler = shaderc_compiler_initialize(); | |
| 321 | |
| 322 fVkCaps.reset(new GrVkCaps(options, fInterface, physDev)); | |
| 323 fCaps.reset(SkRef(fVkCaps.get())); | |
| 324 | |
| 325 fResourceProvider.init(); | |
| 326 | |
| 327 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer(); | |
| 328 SkASSERT(fCurrentCmdBuffer); | |
| 329 fCurrentCmdBuffer->begin(this); | |
| 330 VK_CALL(GetPhysicalDeviceMemoryProperties(physDev, &fPhysDevMemProps)); | |
| 331 | 92 |
| 332 #ifdef ENABLE_VK_LAYERS | 93 #ifdef ENABLE_VK_LAYERS |
| 333 if (fInterface->hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) { | 94 if (this->vkInterface()->hasInstanceExtension(VK_EXT_DEBUG_REPORT_EXTENSION_
NAME)) { |
| 334 /* Setup callback creation information */ | 95 /* Setup callback creation information */ |
| 335 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo; | 96 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo; |
| 336 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EX
T; | 97 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EX
T; |
| 337 callbackCreateInfo.pNext = nullptr; | 98 callbackCreateInfo.pNext = nullptr; |
| 338 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | | 99 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | |
| 339 VK_DEBUG_REPORT_WARNING_BIT_EXT | | 100 VK_DEBUG_REPORT_WARNING_BIT_EXT | |
| 340 //VK_DEBUG_REPORT_INFORMATION_BIT_EXT | | 101 //VK_DEBUG_REPORT_INFORMATION_BIT_EXT | |
| 341 //VK_DEBUG_REPORT_DEBUG_BIT_EXT | | 102 //VK_DEBUG_REPORT_DEBUG_BIT_EXT | |
| 342 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT; | 103 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT; |
| 343 callbackCreateInfo.pfnCallback = &DebugReportCallback; | 104 callbackCreateInfo.pfnCallback = &DebugReportCallback; |
| 344 callbackCreateInfo.pUserData = nullptr; | 105 callbackCreateInfo.pUserData = nullptr; |
| 345 | 106 |
| 346 /* Register the callback */ | 107 /* Register the callback */ |
| 347 GR_VK_CALL_ERRCHECK(fInterface, CreateDebugReportCallbackEXT(inst, &call
backCreateInfo, | 108 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateDebugReportCallbackEXT(fV
kInstance, |
| 348 nullptr, &f
Callback)); | 109 &callbackCreateInfo, nullptr, &fCallback)); |
| 349 } | 110 } |
| 350 #endif | 111 #endif |
| 112 |
| 113 fCompiler = shaderc_compiler_initialize(); |
| 114 |
| 115 fVkCaps.reset(new GrVkCaps(options, this->vkInterface(), backendCtx->fPhysic
alDevice)); |
| 116 fCaps.reset(SkRef(fVkCaps.get())); |
| 117 |
| 118 VK_CALL(GetPhysicalDeviceMemoryProperties(backendCtx->fPhysicalDevice, &fPhy
sDevMemProps)); |
| 119 |
| 120 const VkCommandPoolCreateInfo cmdPoolInfo = { |
| 121 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType |
| 122 nullptr, // pNext |
| 123 VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, // CmdPoolCreateFlags |
| 124 backendCtx->fQueueFamilyIndex, // queueFamilyIndex |
| 125 }; |
| 126 GR_VK_CALL_ERRCHECK(this->vkInterface(), CreateCommandPool(fDevice, &cmdPool
Info, nullptr, |
| 127 &fCmdPool)); |
| 128 |
| 129 // must call this after creating the CommandPool |
| 130 fResourceProvider.init(); |
| 131 fCurrentCmdBuffer = fResourceProvider.createCommandBuffer(); |
| 132 SkASSERT(fCurrentCmdBuffer); |
| 133 fCurrentCmdBuffer->begin(this); |
| 351 } | 134 } |
| 352 | 135 |
| 353 GrVkGpu::~GrVkGpu() { | 136 GrVkGpu::~GrVkGpu() { |
| 354 shaderc_compiler_release(fCompiler); | |
| 355 fCurrentCmdBuffer->end(this); | 137 fCurrentCmdBuffer->end(this); |
| 356 fCurrentCmdBuffer->unref(this); | 138 fCurrentCmdBuffer->unref(this); |
| 357 | 139 |
| 358 // wait for all commands to finish | 140 // wait for all commands to finish |
| 359 fResourceProvider.checkCommandBuffers(); | 141 fResourceProvider.checkCommandBuffers(); |
| 360 SkDEBUGCODE(VkResult res =) VK_CALL(QueueWaitIdle(fQueue)); | 142 SkDEBUGCODE(VkResult res =) VK_CALL(QueueWaitIdle(fQueue)); |
| 361 // VK_ERROR_DEVICE_LOST is acceptable when tearing down (see 4.2.4 in spec) | 143 // VK_ERROR_DEVICE_LOST is acceptable when tearing down (see 4.2.4 in spec) |
| 362 SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res); | 144 SkASSERT(VK_SUCCESS == res || VK_ERROR_DEVICE_LOST == res); |
| 363 | 145 |
| 364 // must call this just before we destroy the VkDevice | 146 // must call this just before we destroy the VkDevice |
| 365 fResourceProvider.destroyResources(); | 147 fResourceProvider.destroyResources(); |
| 366 | 148 |
| 367 #ifdef SK_DEBUG | 149 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr)); |
| 150 |
| 151 shaderc_compiler_release(fCompiler); |
| 152 |
| 153 #ifdef ENABLE_VK_LAYERS |
| 368 VK_CALL(DestroyDebugReportCallbackEXT(fVkInstance, fCallback, nullptr)); | 154 VK_CALL(DestroyDebugReportCallbackEXT(fVkInstance, fCallback, nullptr)); |
| 369 #endif | 155 #endif |
| 370 | |
| 371 VK_CALL(DestroyCommandPool(fDevice, fCmdPool, nullptr)); | |
| 372 VK_CALL(DestroyDevice(fDevice, nullptr)); | |
| 373 VK_CALL(DestroyInstance(fVkInstance, nullptr)); | |
| 374 } | 156 } |
| 375 | 157 |
| 376 /////////////////////////////////////////////////////////////////////////////// | 158 /////////////////////////////////////////////////////////////////////////////// |
| 377 | 159 |
| 378 void GrVkGpu::submitCommandBuffer(SyncQueue sync) { | 160 void GrVkGpu::submitCommandBuffer(SyncQueue sync) { |
| 379 SkASSERT(fCurrentCmdBuffer); | 161 SkASSERT(fCurrentCmdBuffer); |
| 380 fCurrentCmdBuffer->end(this); | 162 fCurrentCmdBuffer->end(this); |
| 381 | 163 |
| 382 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync); | 164 fCurrentCmdBuffer->submitToQueue(this, fQueue, sync); |
| 383 fResourceProvider.checkCommandBuffers(); | 165 fResourceProvider.checkCommandBuffers(); |
| (...skipping 1279 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1663 int set_a_break_pt_here = 9; | 1445 int set_a_break_pt_here = 9; |
| 1664 aglSwapBuffers(aglGetCurrentContext()); | 1446 aglSwapBuffers(aglGetCurrentContext()); |
| 1665 #elif defined(SK_BUILD_FOR_WIN32) | 1447 #elif defined(SK_BUILD_FOR_WIN32) |
| 1666 SwapBuf(); | 1448 SwapBuf(); |
| 1667 int set_a_break_pt_here = 9; | 1449 int set_a_break_pt_here = 9; |
| 1668 SwapBuf(); | 1450 SwapBuf(); |
| 1669 #endif | 1451 #endif |
| 1670 #endif | 1452 #endif |
| 1671 } | 1453 } |
| 1672 | 1454 |
| OLD | NEW |