Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(31)

Side by Side Diff: src/gpu/vk/GrVkGpu.cpp

Issue 1925303002: Add mipmap loading to Vulkan. (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Fixes to handle mipmap allocations and autogen Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright 2015 Google Inc. 2 * Copyright 2015 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #include "GrVkGpu.h" 8 #include "GrVkGpu.h"
9 9
10 #include "GrContextOptions.h" 10 #include "GrContextOptions.h"
(...skipping 12 matching lines...) Expand all
23 #include "GrVkPipeline.h" 23 #include "GrVkPipeline.h"
24 #include "GrVkPipelineState.h" 24 #include "GrVkPipelineState.h"
25 #include "GrVkRenderPass.h" 25 #include "GrVkRenderPass.h"
26 #include "GrVkResourceProvider.h" 26 #include "GrVkResourceProvider.h"
27 #include "GrVkTexture.h" 27 #include "GrVkTexture.h"
28 #include "GrVkTextureRenderTarget.h" 28 #include "GrVkTextureRenderTarget.h"
29 #include "GrVkTransferBuffer.h" 29 #include "GrVkTransferBuffer.h"
30 #include "GrVkVertexBuffer.h" 30 #include "GrVkVertexBuffer.h"
31 31
32 #include "SkConfig8888.h" 32 #include "SkConfig8888.h"
33 #include "SkMipMap.h"
33 34
34 #include "vk/GrVkInterface.h" 35 #include "vk/GrVkInterface.h"
35 #include "vk/GrVkTypes.h" 36 #include "vk/GrVkTypes.h"
36 37
37 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X) 38 #define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
38 #define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X) 39 #define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
39 #define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X) 40 #define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this->vkInterface(), X)
40 41
41 #ifdef ENABLE_VK_LAYERS 42 #ifdef ENABLE_VK_LAYERS
42 VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback( 43 VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
(...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after
228 229
229 bool GrVkGpu::onWritePixels(GrSurface* surface, 230 bool GrVkGpu::onWritePixels(GrSurface* surface,
230 int left, int top, int width, int height, 231 int left, int top, int width, int height,
231 GrPixelConfig config, 232 GrPixelConfig config,
232 const SkTArray<GrMipLevel>& texels) { 233 const SkTArray<GrMipLevel>& texels) {
233 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture()); 234 GrVkTexture* vkTex = static_cast<GrVkTexture*>(surface->asTexture());
234 if (!vkTex) { 235 if (!vkTex) {
235 return false; 236 return false;
236 } 237 }
237 238
238 // TODO: We're ignoring MIP levels here. 239 // Make sure we have at least the base level
239 if (texels.empty() || !texels.begin()->fPixels) { 240 if (texels.empty() || !texels.begin()->fPixels) {
240 return false; 241 return false;
241 } 242 }
242 243
243 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels. 244 // We assume Vulkan doesn't do sRGB <-> linear conversions when reading and writing pixels.
244 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) { 245 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
245 return false; 246 return false;
246 } 247 }
247 248
248 bool success = false; 249 bool success = false;
249 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) { 250 if (GrPixelConfigIsCompressed(vkTex->desc().fConfig)) {
250 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo() 251 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo()
251 SkASSERT(config == vkTex->desc().fConfig); 252 SkASSERT(config == vkTex->desc().fConfig);
252 // TODO: add compressed texture support 253 // TODO: add compressed texture support
253 // delete the following two lines and uncomment the two after that when ready 254 // delete the following two lines and uncomment the two after that when ready
254 vkTex->unref(); 255 vkTex->unref();
255 return false; 256 return false;
256 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width, 257 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width,
257 // height); 258 // height);
258 } else { 259 } else {
259 bool linearTiling = vkTex->isLinearTiled(); 260 bool linearTiling = vkTex->isLinearTiled();
260 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayo ut()) { 261 if (linearTiling) {
261 // Need to change the layout to general in order to perform a host w rite 262 if (texels.count() > 1) {
262 VkImageLayout layout = vkTex->currentLayout(); 263 SkDebugf("Can't upload mipmap data to linear tiled texture");
263 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStag eFlags(layout); 264 return false;
264 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT; 265 }
265 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layo ut); 266 if (VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) {
266 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT; 267 // Need to change the layout to general in order to perform a ho st write
267 vkTex->setImageLayout(this, 268 VkImageLayout layout = vkTex->currentLayout();
268 VK_IMAGE_LAYOUT_GENERAL, 269 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipeline StageFlags(layout);
269 srcAccessMask, 270 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT;
270 dstAccessMask, 271 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask( layout);
271 srcStageMask, 272 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
272 dstStageMask, 273 vkTex->setImageLayout(this,
273 false); 274 VK_IMAGE_LAYOUT_GENERAL,
275 srcAccessMask,
276 dstAccessMask,
277 srcStageMask,
278 dstStageMask,
279 false);
280 }
281 success = this->uploadTexDataLinear(vkTex, left, top, width, height, config,
282 texels.begin()->fPixels, texels. begin()->fRowBytes);
283 } else {
284 uint32_t mipLevels = texels.count();
285 if (vkTex->texturePriv().maxMipMapLevel() != mipLevels) {
286 if (!vkTex->reallocForMipmap(this, mipLevels)) {
287 return false;
288 }
289 }
290 success = this->uploadTexDataOptimal(vkTex, left, top, width, height , config, texels);
274 } 291 }
275 success = this->uploadTexData(vkTex, left, top, width, height, config,
276 texels.begin()->fPixels, texels.begin()->f RowBytes);
277 } 292 }
278 293
279 if (success) { 294 return success;
280 vkTex->texturePriv().dirtyMipMaps(true);
281 return true;
282 }
283
284 return false;
285 } 295 }
286 296
287 bool GrVkGpu::uploadTexData(GrVkTexture* tex, 297 bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex,
288 int left, int top, int width, int height, 298 int left, int top, int width, int height,
289 GrPixelConfig dataConfig, 299 GrPixelConfig dataConfig,
290 const void* data, 300 const void* data,
291 size_t rowBytes) { 301 size_t rowBytes) {
292 SkASSERT(data); 302 SkASSERT(data);
303 SkASSERT(tex->isLinearTiled());
293 304
294 // If we're uploading compressed data then we should be using uploadCompress edTexData 305 // If we're uploading compressed data then we should be using uploadCompress edTexData
295 SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); 306 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
296 307
297 bool linearTiling = tex->isLinearTiled();
298
299 size_t bpp = GrBytesPerPixel(dataConfig); 308 size_t bpp = GrBytesPerPixel(dataConfig);
300 309
301 const GrSurfaceDesc& desc = tex->desc(); 310 const GrSurfaceDesc& desc = tex->desc();
302 311
303 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, & left, &top, 312 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, & left, &top,
304 &width, &height, &data, &rowBytes )) { 313 &width, &height, &data, &rowBytes )) {
305 return false; 314 return false;
306 } 315 }
307 size_t trimRowBytes = width * bpp; 316 size_t trimRowBytes = width * bpp;
308 317
309 if (linearTiling) { 318 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() ||
310 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() || 319 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout());
311 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout()); 320 const VkImageSubresource subres = {
312 const VkImageSubresource subres = { 321 VK_IMAGE_ASPECT_COLOR_BIT,
313 VK_IMAGE_ASPECT_COLOR_BIT, 322 0, // mipLevel
314 0, // mipLevel 323 0, // arraySlice
315 0, // arraySlice 324 };
316 }; 325 VkSubresourceLayout layout;
317 VkSubresourceLayout layout; 326 VkResult err;
318 VkResult err;
319 327
320 const GrVkInterface* interface = this->vkInterface(); 328 const GrVkInterface* interface = this->vkInterface();
321 329
322 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice, 330 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice,
323 tex->textureImage(), 331 tex->textureImage(),
324 &subres, 332 &subres,
325 &layout)); 333 &layout));
326 334
327 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height 335 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - t op - height : top;
328 : top; 336 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp;
329 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp; 337 VkDeviceSize size = height*layout.rowPitch;
330 VkDeviceSize size = height*layout.rowPitch; 338 void* mapPtr;
331 void* mapPtr; 339 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0,
332 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), off set, size, 0, 340 &mapPtr));
333 &mapPtr)); 341 if (err) {
334 if (err) { 342 return false;
343 }
344
345 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
346 // copy into buffer by rows
347 const char* srcRow = reinterpret_cast<const char*>(data);
348 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPi tch;
349 for (int y = 0; y < height; y++) {
350 memcpy(dstRow, srcRow, trimRowBytes);
351 srcRow += rowBytes;
352 dstRow -= layout.rowPitch;
353 }
354 } else {
355 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy
356 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
357 memcpy(mapPtr, data, trimRowBytes * height);
358 } else {
359 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, row Bytes,
360 trimRowBytes, height);
361 }
362 }
363
364 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory()));
365
366 return true;
367 }
368
369 bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex,
370 int left, int top, int width, int height,
371 GrPixelConfig dataConfig,
372 const SkTArray<GrMipLevel>& texels) {
373 SkASSERT(!tex->isLinearTiled());
374 // The assumption is either that we have no mipmaps, or that our rect is the entire texture
375 SkASSERT(1 == texels.count() ||
376 (0 == left && 0 == top && width == tex->width() && height == tex->h eight()));
377
378 // If we're uploading compressed data then we should be using uploadCompress edTexData
379 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
380
381 if (width == 0 || height == 0) {
382 return false;
383 }
384
385 const GrSurfaceDesc& desc = tex->desc();
386 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
387 size_t bpp = GrBytesPerPixel(dataConfig);
388
389 // texels is const.
390 // But we may need to adjust the fPixels ptr based on the copyRect.
391 // In this case we need to make a non-const shallow copy of texels.
392 const SkTArray<GrMipLevel>* texelsPtr = &texels;
393 SkTArray<GrMipLevel> texelsCopy;
394 if (0 != left || 0 != top || width != tex->width() || height != tex->height( )) {
395 texelsCopy = texels;
396
397 SkASSERT(1 == texels.count());
398 SkASSERT(texelsCopy[0].fPixels);
399
400 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bp p, &left, &top,
401 &width, &height, &texelsCopy[ 0].fPixels,
402 &texelsCopy[0].fRowBytes)) {
335 return false; 403 return false;
336 } 404 }
337 405
338 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { 406 texelsPtr = &texelsCopy;
339 // copy into buffer by rows 407 }
340 const char* srcRow = reinterpret_cast<const char*>(data); 408
341 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.r owPitch; 409 // Determine whether we need to flip when we copy into the buffer
342 for (int y = 0; y < height; y++) { 410 bool flipY = (kBottomLeft_GrSurfaceOrigin == desc.fOrigin && !texelsPtr->emp ty());
343 memcpy(dstRow, srcRow, trimRowBytes); 411
344 srcRow += rowBytes; 412 // find the combined size of all the mip levels and the relative offset of
345 dstRow -= layout.rowPitch; 413 // each into the collective buffer
414 size_t combinedBufferSize = 0;
415 SkTArray<size_t> individualMipOffsets(texelsPtr->count());
416 for (int currentMipLevel = 0; currentMipLevel < texelsPtr->count(); currentM ipLevel++) {
417 int twoToTheMipLevel = 1 << currentMipLevel;
418 int currentWidth = SkTMax(1, width / twoToTheMipLevel);
419 int currentHeight = SkTMax(1, height / twoToTheMipLevel);
420 const size_t trimmedSize = currentWidth * bpp * currentHeight;
421 individualMipOffsets.push_back(combinedBufferSize);
422 combinedBufferSize += trimmedSize;
423 }
424
425 // allocate buffer to hold our mip data
426 GrVkTransferBuffer* transferBuffer =
427 GrVkTransferBuffer::Create(this, combinedBufferSize, GrVkBuff er::kCopyRead_Type);
428
429 char* buffer = (char*) transferBuffer->map();
430 SkTArray<VkBufferImageCopy> regions(texelsPtr->count());
431
432 for (int currentMipLevel = 0; currentMipLevel < texelsPtr->count(); currentM ipLevel++) {
433 int twoToTheMipLevel = 1 << currentMipLevel;
434 int currentWidth = SkTMax(1, width / twoToTheMipLevel);
435 int currentHeight = SkTMax(1, height / twoToTheMipLevel);
436 const size_t trimRowBytes = currentWidth * bpp;
437 const size_t rowBytes = (*texelsPtr)[currentMipLevel].fRowBytes;
438
439 // copy data into the buffer, skipping the trailing bytes
440 char* dst = buffer + individualMipOffsets[currentMipLevel];
441 const char* src = (const char*)(*texelsPtr)[currentMipLevel].fPixels;
442 if (flipY) {
443 src += (currentHeight - 1) * rowBytes;
444 for (int y = 0; y < currentHeight; y++) {
445 memcpy(dst, src, trimRowBytes);
446 src -= rowBytes;
447 dst += trimRowBytes;
346 } 448 }
449 } else if (trimRowBytes == rowBytes) {
450 memcpy(dst, src, trimRowBytes * currentHeight);
347 } else { 451 } else {
348 // If there is no padding on the src (rowBytes) or dst (layout.rowPi tch) we can memcpy 452 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, current Height);
349 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) {
350 memcpy(mapPtr, data, trimRowBytes * height);
351 } else {
352 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes,
353 trimRowBytes, height);
354 }
355 } 453 }
356 454
357 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory())); 455 VkBufferImageCopy& region = regions.push_back();
358 } else { 456 memset(&region, 0, sizeof(VkBufferImageCopy));
359 GrVkTransferBuffer* transferBuffer = 457 region.bufferOffset = individualMipOffsets[currentMipLevel];
360 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer:: kCopyRead_Type); 458 region.bufferRowLength = currentWidth;
459 region.bufferImageHeight = currentHeight;
460 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, currentMipLevel, 0, 1 };
461 region.imageOffset = { left, top, 0 };
462 region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 };
463 }
361 464
362 void* mapPtr = transferBuffer->map(); 465 transferBuffer->unmap();
363 466
364 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { 467 // make sure the unmap has finished
365 // copy into buffer by rows 468 transferBuffer->addMemoryBarrier(this,
366 const char* srcRow = reinterpret_cast<const char*>(data); 469 VK_ACCESS_HOST_WRITE_BIT,
367 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowB ytes; 470 VK_ACCESS_TRANSFER_READ_BIT,
368 for (int y = 0; y < height; y++) { 471 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
369 memcpy(dstRow, srcRow, trimRowBytes); 472 VK_PIPELINE_STAGE_TRANSFER_BIT,
370 srcRow += rowBytes; 473 false);
371 dstRow -= trimRowBytes;
372 }
373 } else {
374 // If there is no padding on the src data rows, we can do a single m emcpy
375 if (trimRowBytes == rowBytes) {
376 memcpy(mapPtr, data, trimRowBytes * height);
377 } else {
378 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height);
379 }
380 }
381 474
382 transferBuffer->unmap(); 475 // Change layout of our target so it can be copied to
476 VkImageLayout layout = tex->currentLayout();
477 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(l ayout);
478 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
479 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
480 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
481 // TODO: change layout of all the subresources
482 tex->setImageLayout(this,
483 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
484 srcAccessMask,
485 dstAccessMask,
486 srcStageMask,
487 dstStageMask,
488 false);
383 489
384 // make sure the unmap has finished 490 // Copy the buffer to the image
385 transferBuffer->addMemoryBarrier(this, 491 fCurrentCmdBuffer->copyBufferToImage(this,
386 VK_ACCESS_HOST_WRITE_BIT, 492 transferBuffer,
387 VK_ACCESS_TRANSFER_READ_BIT, 493 tex,
388 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 494 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
389 VK_PIPELINE_STAGE_TRANSFER_BIT, 495 regions.count(),
390 false); 496 regions.begin());
391 497
392 // Set up copy region 498 // Submit the current command buffer to the Queue
393 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin(); 499 this->submitCommandBuffer(kSkip_SyncQueue);
394 VkOffset3D offset = {
395 left,
396 flipY ? tex->height() - top - height : top,
397 0
398 };
399 500
400 VkBufferImageCopy region; 501 transferBuffer->unref();
401 memset(&region, 0, sizeof(VkBufferImageCopy));
402 region.bufferOffset = 0;
403 region.bufferRowLength = width;
404 region.bufferImageHeight = height;
405 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
406 region.imageOffset = offset;
407 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 };
408
409 // Change layout of our target so it can be copied to
410 VkImageLayout layout = tex->currentLayout();
411 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFla gs(layout);
412 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
413 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
414 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
415 tex->setImageLayout(this,
416 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
417 srcAccessMask,
418 dstAccessMask,
419 srcStageMask,
420 dstStageMask,
421 false);
422
423 // Copy the buffer to the image
424 fCurrentCmdBuffer->copyBufferToImage(this,
425 transferBuffer,
426 tex,
427 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMA L,
428 1,
429 &region);
430
431 // Submit the current command buffer to the Queue
432 this->submitCommandBuffer(kSkip_SyncQueue);
433
434 transferBuffer->unref();
435 }
436 502
437 return true; 503 return true;
438 } 504 }
439 505
440 //////////////////////////////////////////////////////////////////////////////// 506 ////////////////////////////////////////////////////////////////////////////////
441 GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budget ed, 507 GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budget ed,
442 const SkTArray<GrMipLevel>& texels) { 508 const SkTArray<GrMipLevel>& texels) {
443 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); 509 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
444 510
445 VkFormat pixelFormat; 511 VkFormat pixelFormat;
446 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) { 512 if (!GrPixelConfigToVkFormat(desc.fConfig, &pixelFormat)) {
447 return nullptr; 513 return nullptr;
448 } 514 }
449 515
450 if (!fVkCaps->isConfigTexturable(desc.fConfig)) { 516 if (!fVkCaps->isConfigTexturable(desc.fConfig)) {
451 return nullptr; 517 return nullptr;
452 } 518 }
453 519
454 bool linearTiling = false; 520 bool linearTiling = false;
455 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) { 521 if (SkToBool(desc.fFlags & kZeroCopy_GrSurfaceFlag)) {
522 // we can't have a linear texture with a mipmap
523 if (texels.count() > 1) {
524 SkDebugf("Trying to create linear tiled texture with mipmap");
525 return nullptr;
526 }
456 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) && 527 if (fVkCaps->isConfigTexurableLinearly(desc.fConfig) &&
457 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) { 528 (!renderTarget || fVkCaps->isConfigRenderableLinearly(desc.fConfig, false))) {
458 linearTiling = true; 529 linearTiling = true;
459 } else { 530 } else {
460 return nullptr; 531 return nullptr;
461 } 532 }
462 } 533 }
463 534
464 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT; 535 VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
465 if (renderTarget) { 536 if (renderTarget) {
(...skipping 12 matching lines...) Expand all
478 VK_MEMORY_PROPERTY_DE VICE_LOCAL_BIT; 549 VK_MEMORY_PROPERTY_DE VICE_LOCAL_BIT;
479 550
480 // This ImageDesc refers to the texture that will be read by the client. Thu s even if msaa is 551 // This ImageDesc refers to the texture that will be read by the client. Thu s even if msaa is
481 // requested, this ImageDesc describes the resolved texture. Therefore we al ways have samples set 552 // requested, this ImageDesc describes the resolved texture. Therefore we al ways have samples set
482 // to 1. 553 // to 1.
483 GrVkImage::ImageDesc imageDesc; 554 GrVkImage::ImageDesc imageDesc;
484 imageDesc.fImageType = VK_IMAGE_TYPE_2D; 555 imageDesc.fImageType = VK_IMAGE_TYPE_2D;
485 imageDesc.fFormat = pixelFormat; 556 imageDesc.fFormat = pixelFormat;
486 imageDesc.fWidth = desc.fWidth; 557 imageDesc.fWidth = desc.fWidth;
487 imageDesc.fHeight = desc.fHeight; 558 imageDesc.fHeight = desc.fHeight;
488 imageDesc.fLevels = 1; // TODO: support miplevels for optimal tiling 559 imageDesc.fLevels = linearTiling ? 1 : texels.count();
489 imageDesc.fSamples = 1; 560 imageDesc.fSamples = 1;
490 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TI LING_OPTIMAL; 561 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TI LING_OPTIMAL;
491 imageDesc.fUsageFlags = usageFlags; 562 imageDesc.fUsageFlags = usageFlags;
492 imageDesc.fMemProps = memProps; 563 imageDesc.fMemProps = memProps;
493 564
494 GrVkTexture* tex; 565 GrVkTexture* tex;
495 if (renderTarget) { 566 if (renderTarget) {
496 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, budget ed, desc, 567 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, budget ed, desc,
497 imageDesc); 568 imageDesc);
498 } else { 569 } else {
499 tex = GrVkTexture::CreateNewTexture(this, budgeted, desc, imageDesc); 570 tex = GrVkTexture::CreateNewTexture(this, budgeted, desc, imageDesc);
500 } 571 }
501 572
502 if (!tex) { 573 if (!tex) {
503 return nullptr; 574 return nullptr;
504 } 575 }
505 576
506 // TODO: We're ignoring MIP levels here.
507 if (!texels.empty()) { 577 if (!texels.empty()) {
508 SkASSERT(texels.begin()->fPixels); 578 SkASSERT(texels.begin()->fPixels);
509 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fCon fig, 579 bool success;
510 texels.begin()->fPixels, texels.begin()->fRowBy tes)) { 580 if (linearTiling) {
581 success = this->uploadTexDataLinear(tex, 0, 0, desc.fWidth, desc.fHe ight, desc.fConfig,
582 texels.begin()->fPixels, texels. begin()->fRowBytes);
583 } else {
584 success = this->uploadTexDataOptimal(tex, 0, 0, desc.fWidth, desc.fH eight, desc.fConfig,
585 texels);
586 }
587 if (!success) {
511 tex->unref(); 588 tex->unref();
512 return nullptr; 589 return nullptr;
513 } 590 }
514 } 591 }
515 592
516 return tex; 593 return tex;
517 } 594 }
518 595
519 //////////////////////////////////////////////////////////////////////////////// 596 ////////////////////////////////////////////////////////////////////////////////
520 597
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
600 if (tgt && wrapDesc.fStencilBits) { 677 if (tgt && wrapDesc.fStencilBits) {
601 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeig ht)) { 678 if (!createStencilAttachmentForRenderTarget(tgt, desc.fWidth, desc.fHeig ht)) {
602 tgt->unref(); 679 tgt->unref();
603 return nullptr; 680 return nullptr;
604 } 681 }
605 } 682 }
606 return tgt; 683 return tgt;
607 } 684 }
608 685
609 void GrVkGpu::generateMipmap(GrVkTexture* tex) const { 686 void GrVkGpu::generateMipmap(GrVkTexture* tex) const {
610 // don't need to do anything for linearly tiled textures (can't have mipmaps ) 687 // don't do anything for linearly tiled textures (can't have mipmaps)
611 if (tex->isLinearTiled()) { 688 if (tex->isLinearTiled()) {
689 SkDebugf("Trying to create mipmap for linear tiled texture");
612 return; 690 return;
613 } 691 }
614 692
615 // We cannot generate mipmaps for images that are multisampled. 693 // We cannot generate mipmaps for images that are multisampled.
616 // TODO: does it even make sense for rendertargets in general? 694 // TODO: does it even make sense for rendertargets in general?
617 if (tex->asRenderTarget() && tex->asRenderTarget()->numColorSamples() > 1) { 695 if (tex->asRenderTarget() && tex->asRenderTarget()->numColorSamples() > 1) {
618 return; 696 return;
619 } 697 }
620 698
621 // determine if we can blit to and from this format 699 // determine if we can blit to and from this format
622 const GrVkCaps& caps = this->vkCaps(); 700 const GrVkCaps& caps = this->vkCaps();
623 if (!caps.configCanBeDstofBlit(tex->config(), false) || 701 if (!caps.configCanBeDstofBlit(tex->config(), false) ||
624 !caps.configCanBeSrcofBlit(tex->config(), false)) { 702 !caps.configCanBeSrcofBlit(tex->config(), false)) {
625 return; 703 return;
626 } 704 }
627 705
628 // change the original image's layout 706 // change the original image's layout
629 VkImageLayout origSrcLayout = tex->currentLayout(); 707 VkImageLayout origSrcLayout = tex->currentLayout();
630 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(o rigSrcLayout); 708 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(o rigSrcLayout);
631 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; 709 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
632 710
633 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayou t); 711 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayou t);
634 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; 712 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
635 713
714 // TODO: change layout of all the subresources
636 tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 715 tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
637 srcAccessMask, dstAccessMask, srcStageMask, dstStageMask , false); 716 srcAccessMask, dstAccessMask, srcStageMask, dstStageMask , false);
638 717
639 // grab handle to the original image resource 718 // grab handle to the original image resource
640 const GrVkImage::Resource* oldResource = tex->resource(); 719 const GrVkImage::Resource* oldResource = tex->resource();
641 oldResource->ref(); 720 oldResource->ref();
642 721
643 if (!tex->reallocForMipmap(this)) { 722 uint32_t mipLevels = SkMipMap::ComputeLevelCount(tex->width(), tex->height() );
723 if (!tex->reallocForMipmap(this, mipLevels)) {
644 oldResource->unref(this); 724 oldResource->unref(this);
645 return; 725 return;
646 } 726 }
647 727
648 // change the new image's layout 728 // change the new image's layout
649 VkImageLayout origDstLayout = tex->currentLayout(); 729 VkImageLayout origDstLayout = tex->currentLayout();
650 730
651 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout); 731 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
652 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; 732 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT;
653 733
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
686 VK_FILTER_LINEAR); 766 VK_FILTER_LINEAR);
687 // Blit the miplevels 767 // Blit the miplevels
688 while (width/2 > 0 && height/2 > 0) { 768 while (width/2 > 0 && height/2 > 0) {
689 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 }; 769 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 };
690 blitRegion.srcOffsets[0] = { 0, 0, 0 }; 770 blitRegion.srcOffsets[0] = { 0, 0, 0 };
691 blitRegion.srcOffsets[1] = { width, height, 0 }; 771 blitRegion.srcOffsets[1] = { width, height, 0 };
692 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel+1, 0, 1 }; 772 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel+1, 0, 1 };
693 blitRegion.dstOffsets[0] = { 0, 0, 0 }; 773 blitRegion.dstOffsets[0] = { 0, 0, 0 };
694 blitRegion.dstOffsets[1] = { width/2, height/2, 0 }; 774 blitRegion.dstOffsets[1] = { width/2, height/2, 0 };
695 775
776 // TODO: insert image barrier to wait on previous blit
777
696 fCurrentCmdBuffer->blitImage(this, 778 fCurrentCmdBuffer->blitImage(this,
697 tex->resource(), 779 tex->resource(),
698 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 780 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
699 tex->resource(), 781 tex->resource(),
700 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 782 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
701 1, 783 1,
702 &blitRegion, 784 &blitRegion,
703 VK_FILTER_LINEAR); 785 VK_FILTER_LINEAR);
704 786
705 width /= 2; 787 width /= 2;
(...skipping 561 matching lines...) Expand 10 before | Expand all | Expand 10 after
1267 // depth value be 1. 1349 // depth value be 1.
1268 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.heigh t(), 1 }; 1350 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.heigh t(), 1 };
1269 1351
1270 fCurrentCmdBuffer->copyImage(this, 1352 fCurrentCmdBuffer->copyImage(this,
1271 srcImage, 1353 srcImage,
1272 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1354 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1273 dstImage, 1355 dstImage,
1274 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1356 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1275 1, 1357 1,
1276 &copyRegion); 1358 &copyRegion);
1359
1360 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
1361 srcRect.width(), srcRect.height());
1362 this->didWriteToSurface(dst, &dstRect);
egdaniel 2016/04/29 18:10:28 is this something that GL does as well when we do
jvanverth1 2016/04/29 18:16:10 Yup, and in flushRenderTarget(). I don't think we
1277 } 1363 }
1278 1364
1279 inline bool can_copy_as_blit(const GrSurface* dst, 1365 inline bool can_copy_as_blit(const GrSurface* dst,
1280 const GrSurface* src, 1366 const GrSurface* src,
1281 const GrVkImage* dstImage, 1367 const GrVkImage* dstImage,
1282 const GrVkImage* srcImage, 1368 const GrVkImage* srcImage,
1283 const GrVkGpu* gpu) { 1369 const GrVkGpu* gpu) {
1284 // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src 1370 // We require that all vulkan GrSurfaces have been created with transfer_dst and transfer_src
1285 // as image usage flags. 1371 // as image usage flags.
1286 const GrVkCaps& caps = gpu->vkCaps(); 1372 const GrVkCaps& caps = gpu->vkCaps();
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
1377 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 0 }; 1463 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 0 };
1378 1464
1379 fCurrentCmdBuffer->blitImage(this, 1465 fCurrentCmdBuffer->blitImage(this,
1380 srcImage->resource(), 1466 srcImage->resource(),
1381 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 1467 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
1382 dstImage->resource(), 1468 dstImage->resource(),
1383 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1469 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1384 1, 1470 1,
1385 &blitRegion, 1471 &blitRegion,
1386 VK_FILTER_NEAREST); // We never scale so any fi lter works here 1472 VK_FILTER_NEAREST); // We never scale so any fi lter works here
1473
1474 this->didWriteToSurface(dst, &dstRect);
1387 } 1475 }
1388 1476
1389 inline bool can_copy_as_draw(const GrSurface* dst, 1477 inline bool can_copy_as_draw(const GrSurface* dst,
1390 const GrSurface* src, 1478 const GrSurface* src,
1391 const GrVkGpu* gpu) { 1479 const GrVkGpu* gpu) {
1392 return false; 1480 return false;
1393 } 1481 }
1394 1482
1395 void GrVkGpu::copySurfaceAsDraw(GrSurface* dst, 1483 void GrVkGpu::copySurfaceAsDraw(GrSurface* dst,
1396 GrSurface* src, 1484 GrSurface* src,
(...skipping 307 matching lines...) Expand 10 before | Expand all | Expand 10 after
1704 aglSwapBuffers(aglGetCurrentContext()); 1792 aglSwapBuffers(aglGetCurrentContext());
1705 int set_a_break_pt_here = 9; 1793 int set_a_break_pt_here = 9;
1706 aglSwapBuffers(aglGetCurrentContext()); 1794 aglSwapBuffers(aglGetCurrentContext());
1707 #elif defined(SK_BUILD_FOR_WIN32) 1795 #elif defined(SK_BUILD_FOR_WIN32)
1708 SwapBuf(); 1796 SwapBuf();
1709 int set_a_break_pt_here = 9; 1797 int set_a_break_pt_here = 9;
1710 SwapBuf(); 1798 SwapBuf();
1711 #endif 1799 #endif
1712 #endif 1800 #endif
1713 } 1801 }
OLDNEW
« src/gpu/GrGpu.cpp ('K') | « src/gpu/vk/GrVkGpu.h ('k') | src/gpu/vk/GrVkImage.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698