OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrVkGpu.h" | 8 #include "GrVkGpu.h" |
9 | 9 |
10 #include "GrContextOptions.h" | 10 #include "GrContextOptions.h" |
(...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
250 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo() | 250 // We check that config == desc.fConfig in GrGpu::getWritePixelsInfo() |
251 SkASSERT(config == vkTex->desc().fConfig); | 251 SkASSERT(config == vkTex->desc().fConfig); |
252 // TODO: add compressed texture support | 252 // TODO: add compressed texture support |
253 // delete the following two lines and uncomment the two after that when ready | 253 // delete the following two lines and uncomment the two after that when ready |
254 vkTex->unref(); | 254 vkTex->unref(); |
255 return false; | 255 return false; |
256 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width, | 256 //success = this->uploadCompressedTexData(vkTex->desc(), buffer, false, left, top, width, |
257 // height); | 257 // height); |
258 } else { | 258 } else { |
259 bool linearTiling = vkTex->isLinearTiled(); | 259 bool linearTiling = vkTex->isLinearTiled(); |
260 if (linearTiling && VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayo ut()) { | 260 if (linearTiling) { |
261 // Need to change the layout to general in order to perform a host w rite | 261 if (VK_IMAGE_LAYOUT_PREINITIALIZED != vkTex->currentLayout()) { |
262 VkImageLayout layout = vkTex->currentLayout(); | 262 // Need to change the layout to general in order to perform a ho st write |
263 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStag eFlags(layout); | 263 VkImageLayout layout = vkTex->currentLayout(); |
264 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT; | 264 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipeline StageFlags(layout); |
265 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layo ut); | 265 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_HOST_BIT; |
266 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT; | 266 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask( layout); |
267 vkTex->setImageLayout(this, | 267 VkAccessFlags dstAccessMask = VK_ACCESS_HOST_WRITE_BIT; |
268 VK_IMAGE_LAYOUT_GENERAL, | 268 vkTex->setImageLayout(this, |
269 srcAccessMask, | 269 VK_IMAGE_LAYOUT_GENERAL, |
270 dstAccessMask, | 270 srcAccessMask, |
271 srcStageMask, | 271 dstAccessMask, |
272 dstStageMask, | 272 srcStageMask, |
273 false); | 273 dstStageMask, |
274 false); | |
275 } | |
276 success = this->uploadTexDataLinear(vkTex, left, top, width, height, config, | |
277 texels.begin()->fPixels, texels. begin()->fRowBytes); | |
278 } else { | |
279 success = this->uploadTexDataOptimal(vkTex, left, top, width, height , config, texels); | |
274 } | 280 } |
275 success = this->uploadTexData(vkTex, left, top, width, height, config, | |
276 texels.begin()->fPixels, texels.begin()->f RowBytes); | |
277 } | 281 } |
278 | |
279 if (success) { | 282 if (success) { |
280 vkTex->texturePriv().dirtyMipMaps(true); | 283 vkTex->texturePriv().dirtyMipMaps(true); |
281 return true; | 284 return true; |
282 } | 285 } |
283 | 286 |
284 return false; | 287 return false; |
285 } | 288 } |
286 | 289 |
287 bool GrVkGpu::uploadTexData(GrVkTexture* tex, | 290 bool GrVkGpu::uploadTexDataLinear(GrVkTexture* tex, |
288 int left, int top, int width, int height, | 291 int left, int top, int width, int height, |
289 GrPixelConfig dataConfig, | 292 GrPixelConfig dataConfig, |
290 const void* data, | 293 const void* data, |
291 size_t rowBytes) { | 294 size_t rowBytes) { |
292 SkASSERT(data); | 295 SkASSERT(data); |
296 SkASSERT(tex->isLinearTiled()); | |
293 | 297 |
294 // If we're uploading compressed data then we should be using uploadCompress edTexData | 298 // If we're uploading compressed data then we should be using uploadCompress edTexData |
295 SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); | 299 SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); |
296 | 300 |
297 bool linearTiling = tex->isLinearTiled(); | |
298 | |
299 size_t bpp = GrBytesPerPixel(dataConfig); | 301 size_t bpp = GrBytesPerPixel(dataConfig); |
300 | 302 |
301 const GrSurfaceDesc& desc = tex->desc(); | 303 const GrSurfaceDesc& desc = tex->desc(); |
302 | 304 |
303 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, & left, &top, | 305 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, & left, &top, |
304 &width, &height, &data, &rowBytes )) { | 306 &width, &height, &data, &rowBytes )) { |
305 return false; | 307 return false; |
306 } | 308 } |
307 size_t trimRowBytes = width * bpp; | 309 size_t trimRowBytes = width * bpp; |
308 | 310 |
309 if (linearTiling) { | 311 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() || |
310 SkASSERT(VK_IMAGE_LAYOUT_PREINITIALIZED == tex->currentLayout() || | 312 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout()); |
311 VK_IMAGE_LAYOUT_GENERAL == tex->currentLayout()); | 313 const VkImageSubresource subres = { |
312 const VkImageSubresource subres = { | 314 VK_IMAGE_ASPECT_COLOR_BIT, |
313 VK_IMAGE_ASPECT_COLOR_BIT, | 315 0, // mipLevel |
314 0, // mipLevel | 316 0, // arraySlice |
315 0, // arraySlice | 317 }; |
316 }; | 318 VkSubresourceLayout layout; |
317 VkSubresourceLayout layout; | 319 VkResult err; |
318 VkResult err; | |
319 | 320 |
320 const GrVkInterface* interface = this->vkInterface(); | 321 const GrVkInterface* interface = this->vkInterface(); |
321 | 322 |
322 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice, | 323 GR_VK_CALL(interface, GetImageSubresourceLayout(fDevice, |
323 tex->textureImage(), | 324 tex->textureImage(), |
324 &subres, | 325 &subres, |
325 &layout)); | 326 &layout)); |
326 | 327 |
327 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - top - height | 328 int texTop = kBottomLeft_GrSurfaceOrigin == desc.fOrigin ? tex->height() - t op - height |
328 : top; | 329 : top; |
egdaniel
2016/04/29 14:12:28
can this now fit on the line above?
jvanverth1
2016/04/29 17:36:32
Done.
| |
329 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp; | 330 VkDeviceSize offset = texTop*layout.rowPitch + left*bpp; |
330 VkDeviceSize size = height*layout.rowPitch; | 331 VkDeviceSize size = height*layout.rowPitch; |
331 void* mapPtr; | 332 void* mapPtr; |
332 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), off set, size, 0, | 333 err = GR_VK_CALL(interface, MapMemory(fDevice, tex->textureMemory(), offset, size, 0, |
333 &mapPtr)); | 334 &mapPtr)); |
334 if (err) { | 335 if (err) { |
336 return false; | |
337 } | |
338 | |
339 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { | |
340 // copy into buffer by rows | |
341 const char* srcRow = reinterpret_cast<const char*>(data); | |
342 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.rowPi tch; | |
343 for (int y = 0; y < height; y++) { | |
344 memcpy(dstRow, srcRow, trimRowBytes); | |
345 srcRow += rowBytes; | |
346 dstRow -= layout.rowPitch; | |
347 } | |
348 } else { | |
349 // If there is no padding on the src (rowBytes) or dst (layout.rowPitch) we can memcpy | |
350 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) { | |
351 memcpy(mapPtr, data, trimRowBytes * height); | |
352 } else { | |
353 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, row Bytes, | |
354 trimRowBytes, height); | |
355 } | |
356 } | |
357 | |
358 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory())); | |
359 | |
360 return true; | |
361 } | |
362 | |
363 bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex, | |
364 int left, int top, int width, int height, | |
365 GrPixelConfig dataConfig, | |
366 const SkTArray<GrMipLevel>& texels) { | |
367 SkASSERT(!tex->isLinearTiled()); | |
368 // The assumption is either that we have no mipmaps, or that our rect is the entire texture | |
369 SkASSERT(1 == texels.count() || | |
370 (0 == left && 0 == top && width == tex->width() && height == tex->h eight())); | |
371 | |
372 // If we're uploading compressed data then we should be using uploadCompress edTexData | |
373 SkASSERT(!GrPixelConfigIsCompressed(dataConfig)); | |
374 | |
375 if (width == 0 || height == 0) { | |
376 return false; | |
377 } | |
378 | |
379 const GrSurfaceDesc& desc = tex->desc(); | |
380 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig)); | |
381 size_t bpp = GrBytesPerPixel(dataConfig); | |
382 | |
383 // texels is const. | |
384 // But we may need to adjust the fPixels ptr based on the copyRect. | |
385 // In this case we need to make a non-const shallow copy of texels. | |
386 const SkTArray<GrMipLevel>* texelsPtr = &texels; | |
387 SkTArray<GrMipLevel> texelsCopy; | |
388 if (0 != left || 0 != top || width != tex->width() || height != tex->height( )) { | |
389 texelsCopy = texels; | |
390 | |
391 SkASSERT(1 == texels.count()); | |
392 SkASSERT(texelsCopy[0].fPixels); | |
393 | |
394 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bp p, &left, &top, | |
395 &width, | |
egdaniel
2016/04/29 14:12:28
align these?
jvanverth1
2016/04/29 17:36:32
Done.
| |
396 &height, | |
397 &texelsCopy[0].fPixels, | |
398 &texelsCopy[0].fRowBytes)) { | |
335 return false; | 399 return false; |
336 } | 400 } |
337 | 401 |
338 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { | 402 texelsPtr = &texelsCopy; |
339 // copy into buffer by rows | 403 } |
340 const char* srcRow = reinterpret_cast<const char*>(data); | 404 |
341 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*layout.r owPitch; | 405 // Determine whether we need to flip when we copy into the buffer |
342 for (int y = 0; y < height; y++) { | 406 bool flipY = (kBottomLeft_GrSurfaceOrigin == desc.fOrigin && !texelsPtr->emp ty()); |
343 memcpy(dstRow, srcRow, trimRowBytes); | 407 |
344 srcRow += rowBytes; | 408 // find the combined size of all the mip levels and the relative offset of |
345 dstRow -= layout.rowPitch; | 409 // each into the collective buffer |
410 size_t combinedBufferSize = 0; | |
411 SkTArray<size_t> individualMipOffsets(texelsPtr->count()); | |
412 for (int currentMipLevel = 0; currentMipLevel < texelsPtr->count(); currentM ipLevel++) { | |
413 int twoToTheMipLevel = 1 << currentMipLevel; | |
414 int currentWidth = SkTMax(1, width / twoToTheMipLevel); | |
415 int currentHeight = SkTMax(1, height / twoToTheMipLevel); | |
416 const size_t trimmedSize = currentWidth * bpp * currentHeight; | |
417 individualMipOffsets.push_back(combinedBufferSize); | |
418 combinedBufferSize += trimmedSize; | |
419 } | |
420 | |
421 // allocate buffer to hold our mip data | |
422 GrVkTransferBuffer* transferBuffer = | |
423 GrVkTransferBuffer::Create(this, combinedBufferSize, GrVkBuff er::kCopyRead_Type); | |
424 | |
425 char* buffer = (char*) transferBuffer->map(); | |
426 SkTArray<VkBufferImageCopy> regions(texelsPtr->count()); | |
427 | |
428 for (int currentMipLevel = 0; currentMipLevel < texelsPtr->count(); currentM ipLevel++) { | |
429 int twoToTheMipLevel = 1 << currentMipLevel; | |
430 int currentWidth = SkTMax(1, width / twoToTheMipLevel); | |
431 int currentHeight = SkTMax(1, height / twoToTheMipLevel); | |
432 const size_t trimRowBytes = currentWidth * bpp; | |
433 const size_t rowBytes = (*texelsPtr)[currentMipLevel].fRowBytes; | |
434 | |
435 // copy data into the buffer, skipping the trailing bytes | |
436 char* dst = buffer + individualMipOffsets[currentMipLevel]; | |
437 const char* src = (const char*)(*texelsPtr)[currentMipLevel].fPixels; | |
438 if (flipY) { | |
439 src += (currentHeight - 1) * rowBytes; | |
440 for (int y = 0; y < currentHeight; y++) { | |
441 memcpy(dst, src, trimRowBytes); | |
442 src -= rowBytes; | |
443 dst += trimRowBytes; | |
346 } | 444 } |
445 } else if (trimRowBytes == rowBytes) { | |
446 memcpy(dst, src, trimRowBytes * currentHeight); | |
347 } else { | 447 } else { |
348 // If there is no padding on the src (rowBytes) or dst (layout.rowPi tch) we can memcpy | 448 SkRectMemcpy(dst, trimRowBytes, src, rowBytes, trimRowBytes, current Height); |
349 if (trimRowBytes == rowBytes && trimRowBytes == layout.rowPitch) { | |
350 memcpy(mapPtr, data, trimRowBytes * height); | |
351 } else { | |
352 SkRectMemcpy(mapPtr, static_cast<size_t>(layout.rowPitch), data, rowBytes, | |
353 trimRowBytes, height); | |
354 } | |
355 } | 449 } |
356 | 450 |
357 GR_VK_CALL(interface, UnmapMemory(fDevice, tex->textureMemory())); | 451 VkBufferImageCopy& region = regions.push_back(); |
358 } else { | 452 memset(®ion, 0, sizeof(VkBufferImageCopy)); |
359 GrVkTransferBuffer* transferBuffer = | 453 region.bufferOffset = individualMipOffsets[currentMipLevel]; |
360 GrVkTransferBuffer::Create(this, trimRowBytes * height, GrVkBuffer:: kCopyRead_Type); | 454 region.bufferRowLength = currentWidth; |
455 region.bufferImageHeight = currentHeight; | |
456 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, currentMipLevel, 0, 1 }; | |
457 region.imageOffset = { left, top, 0 }; | |
458 region.imageExtent = { (uint32_t)currentWidth, (uint32_t)currentHeight, 1 }; | |
459 } | |
361 | 460 |
362 void* mapPtr = transferBuffer->map(); | 461 transferBuffer->unmap(); |
363 | 462 |
364 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) { | 463 // make sure the unmap has finished |
365 // copy into buffer by rows | 464 transferBuffer->addMemoryBarrier(this, |
366 const char* srcRow = reinterpret_cast<const char*>(data); | 465 VK_ACCESS_HOST_WRITE_BIT, |
367 char* dstRow = reinterpret_cast<char*>(mapPtr)+(height - 1)*trimRowB ytes; | 466 VK_ACCESS_TRANSFER_READ_BIT, |
368 for (int y = 0; y < height; y++) { | 467 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, |
369 memcpy(dstRow, srcRow, trimRowBytes); | 468 VK_PIPELINE_STAGE_TRANSFER_BIT, |
370 srcRow += rowBytes; | 469 false); |
371 dstRow -= trimRowBytes; | |
372 } | |
373 } else { | |
374 // If there is no padding on the src data rows, we can do a single m emcpy | |
375 if (trimRowBytes == rowBytes) { | |
376 memcpy(mapPtr, data, trimRowBytes * height); | |
377 } else { | |
378 SkRectMemcpy(mapPtr, trimRowBytes, data, rowBytes, trimRowBytes, height); | |
379 } | |
380 } | |
381 | 470 |
382 transferBuffer->unmap(); | 471 // Change layout of our target so it can be copied to |
472 VkImageLayout layout = tex->currentLayout(); | |
473 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(l ayout); | |
474 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; | |
475 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout); | |
476 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; | |
477 // TODO: change layout of all the subresources | |
478 tex->setImageLayout(this, | |
479 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, | |
480 srcAccessMask, | |
481 dstAccessMask, | |
482 srcStageMask, | |
483 dstStageMask, | |
484 false); | |
383 | 485 |
384 // make sure the unmap has finished | 486 // Copy the buffer to the image |
385 transferBuffer->addMemoryBarrier(this, | 487 fCurrentCmdBuffer->copyBufferToImage(this, |
386 VK_ACCESS_HOST_WRITE_BIT, | 488 transferBuffer, |
387 VK_ACCESS_TRANSFER_READ_BIT, | 489 tex, |
388 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, | 490 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
389 VK_PIPELINE_STAGE_TRANSFER_BIT, | 491 regions.count(), |
390 false); | 492 ®ions[0]); |
egdaniel
2016/04/29 14:12:27
why not just regions.begin()?
jvanverth1
2016/04/29 17:36:32
Done.
| |
391 | 493 |
392 // Set up copy region | 494 // Submit the current command buffer to the Queue |
393 bool flipY = kBottomLeft_GrSurfaceOrigin == tex->origin(); | 495 this->submitCommandBuffer(kSkip_SyncQueue); |
394 VkOffset3D offset = { | |
395 left, | |
396 flipY ? tex->height() - top - height : top, | |
397 0 | |
398 }; | |
399 | 496 |
400 VkBufferImageCopy region; | 497 transferBuffer->unref(); |
401 memset(®ion, 0, sizeof(VkBufferImageCopy)); | |
402 region.bufferOffset = 0; | |
403 region.bufferRowLength = width; | |
404 region.bufferImageHeight = height; | |
405 region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; | |
406 region.imageOffset = offset; | |
407 region.imageExtent = { (uint32_t)width, (uint32_t)height, 1 }; | |
408 | |
409 // Change layout of our target so it can be copied to | |
410 VkImageLayout layout = tex->currentLayout(); | |
411 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFla gs(layout); | |
412 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; | |
413 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout); | |
414 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; | |
415 tex->setImageLayout(this, | |
416 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, | |
417 srcAccessMask, | |
418 dstAccessMask, | |
419 srcStageMask, | |
420 dstStageMask, | |
421 false); | |
422 | |
423 // Copy the buffer to the image | |
424 fCurrentCmdBuffer->copyBufferToImage(this, | |
425 transferBuffer, | |
426 tex, | |
427 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMA L, | |
428 1, | |
429 ®ion); | |
430 | |
431 // Submit the current command buffer to the Queue | |
432 this->submitCommandBuffer(kSkip_SyncQueue); | |
433 | |
434 transferBuffer->unref(); | |
435 } | |
436 | 498 |
437 return true; | 499 return true; |
438 } | 500 } |
439 | 501 |
440 //////////////////////////////////////////////////////////////////////////////// | 502 //////////////////////////////////////////////////////////////////////////////// |
441 GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budget ed, | 503 GrTexture* GrVkGpu::onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budget ed, |
442 const SkTArray<GrMipLevel>& texels) { | 504 const SkTArray<GrMipLevel>& texels) { |
443 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); | 505 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag); |
444 | 506 |
445 VkFormat pixelFormat; | 507 VkFormat pixelFormat; |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
478 VK_MEMORY_PROPERTY_DE VICE_LOCAL_BIT; | 540 VK_MEMORY_PROPERTY_DE VICE_LOCAL_BIT; |
479 | 541 |
480 // This ImageDesc refers to the texture that will be read by the client. Thu s even if msaa is | 542 // This ImageDesc refers to the texture that will be read by the client. Thu s even if msaa is |
481 // requested, this ImageDesc describes the resolved texture. Therefore we al ways have samples set | 543 // requested, this ImageDesc describes the resolved texture. Therefore we al ways have samples set |
482 // to 1. | 544 // to 1. |
483 GrVkImage::ImageDesc imageDesc; | 545 GrVkImage::ImageDesc imageDesc; |
484 imageDesc.fImageType = VK_IMAGE_TYPE_2D; | 546 imageDesc.fImageType = VK_IMAGE_TYPE_2D; |
485 imageDesc.fFormat = pixelFormat; | 547 imageDesc.fFormat = pixelFormat; |
486 imageDesc.fWidth = desc.fWidth; | 548 imageDesc.fWidth = desc.fWidth; |
487 imageDesc.fHeight = desc.fHeight; | 549 imageDesc.fHeight = desc.fHeight; |
488 imageDesc.fLevels = 1; // TODO: support miplevels for optimal tiling | 550 imageDesc.fLevels = linearTiling ? 1 : texels.count(); |
egdaniel
2016/04/29 14:12:27
how about we just decide to use linear only if tex
jvanverth1
2016/04/29 17:36:32
Linear tiling and a mipmap is invalid input, so re
| |
489 imageDesc.fSamples = 1; | 551 imageDesc.fSamples = 1; |
490 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TI LING_OPTIMAL; | 552 imageDesc.fImageTiling = linearTiling ? VK_IMAGE_TILING_LINEAR : VK_IMAGE_TI LING_OPTIMAL; |
491 imageDesc.fUsageFlags = usageFlags; | 553 imageDesc.fUsageFlags = usageFlags; |
492 imageDesc.fMemProps = memProps; | 554 imageDesc.fMemProps = memProps; |
493 | 555 |
494 GrVkTexture* tex; | 556 GrVkTexture* tex; |
495 if (renderTarget) { | 557 if (renderTarget) { |
496 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, budget ed, desc, | 558 tex = GrVkTextureRenderTarget::CreateNewTextureRenderTarget(this, budget ed, desc, |
497 imageDesc); | 559 imageDesc); |
498 } else { | 560 } else { |
499 tex = GrVkTexture::CreateNewTexture(this, budgeted, desc, imageDesc); | 561 tex = GrVkTexture::CreateNewTexture(this, budgeted, desc, imageDesc); |
500 } | 562 } |
501 | 563 |
502 if (!tex) { | 564 if (!tex) { |
503 return nullptr; | 565 return nullptr; |
504 } | 566 } |
505 | 567 |
506 // TODO: We're ignoring MIP levels here. | |
507 if (!texels.empty()) { | 568 if (!texels.empty()) { |
508 SkASSERT(texels.begin()->fPixels); | 569 SkASSERT(texels.begin()->fPixels); |
509 if (!this->uploadTexData(tex, 0, 0, desc.fWidth, desc.fHeight, desc.fCon fig, | 570 bool success; |
510 texels.begin()->fPixels, texels.begin()->fRowBy tes)) { | 571 if (linearTiling) { |
572 success = this->uploadTexDataLinear(tex, 0, 0, desc.fWidth, desc.fHe ight, desc.fConfig, | |
573 texels.begin()->fPixels, texels. begin()->fRowBytes); | |
574 } else { | |
575 success = this->uploadTexDataOptimal(tex, 0, 0, desc.fWidth, desc.fH eight, desc.fConfig, | |
576 texels); | |
577 } | |
578 if (!success) { | |
511 tex->unref(); | 579 tex->unref(); |
512 return nullptr; | 580 return nullptr; |
513 } | 581 } |
514 } | 582 } |
515 | 583 |
516 return tex; | 584 return tex; |
517 } | 585 } |
518 | 586 |
519 //////////////////////////////////////////////////////////////////////////////// | 587 //////////////////////////////////////////////////////////////////////////////// |
520 | 588 |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
626 } | 694 } |
627 | 695 |
628 // change the original image's layout | 696 // change the original image's layout |
629 VkImageLayout origSrcLayout = tex->currentLayout(); | 697 VkImageLayout origSrcLayout = tex->currentLayout(); |
630 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(o rigSrcLayout); | 698 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(o rigSrcLayout); |
631 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; | 699 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; |
632 | 700 |
633 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayou t); | 701 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayou t); |
634 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; | 702 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; |
635 | 703 |
704 // TODO: change layout of all the subresources | |
636 tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, | 705 tex->setImageLayout(this, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
637 srcAccessMask, dstAccessMask, srcStageMask, dstStageMask , false); | 706 srcAccessMask, dstAccessMask, srcStageMask, dstStageMask , false); |
638 | 707 |
639 // grab handle to the original image resource | 708 // grab handle to the original image resource |
640 const GrVkImage::Resource* oldResource = tex->resource(); | 709 const GrVkImage::Resource* oldResource = tex->resource(); |
641 oldResource->ref(); | 710 oldResource->ref(); |
642 | 711 |
643 if (!tex->reallocForMipmap(this)) { | 712 if (!tex->reallocForMipmap(this)) { |
644 oldResource->unref(this); | 713 oldResource->unref(this); |
645 return; | 714 return; |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
686 VK_FILTER_LINEAR); | 755 VK_FILTER_LINEAR); |
687 // Blit the miplevels | 756 // Blit the miplevels |
688 while (width/2 > 0 && height/2 > 0) { | 757 while (width/2 > 0 && height/2 > 0) { |
689 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 }; | 758 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 0, 1 }; |
690 blitRegion.srcOffsets[0] = { 0, 0, 0 }; | 759 blitRegion.srcOffsets[0] = { 0, 0, 0 }; |
691 blitRegion.srcOffsets[1] = { width, height, 0 }; | 760 blitRegion.srcOffsets[1] = { width, height, 0 }; |
692 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel+1, 0, 1 }; | 761 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, mipLevel+1, 0, 1 }; |
693 blitRegion.dstOffsets[0] = { 0, 0, 0 }; | 762 blitRegion.dstOffsets[0] = { 0, 0, 0 }; |
694 blitRegion.dstOffsets[1] = { width/2, height/2, 0 }; | 763 blitRegion.dstOffsets[1] = { width/2, height/2, 0 }; |
695 | 764 |
765 // TODO: insert image barrier to wait on previous blit | |
766 | |
696 fCurrentCmdBuffer->blitImage(this, | 767 fCurrentCmdBuffer->blitImage(this, |
697 tex->resource(), | 768 tex->resource(), |
698 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, | 769 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
699 tex->resource(), | 770 tex->resource(), |
700 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, | 771 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
701 1, | 772 1, |
702 &blitRegion, | 773 &blitRegion, |
703 VK_FILTER_LINEAR); | 774 VK_FILTER_LINEAR); |
704 | 775 |
705 width /= 2; | 776 width /= 2; |
(...skipping 998 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1704 aglSwapBuffers(aglGetCurrentContext()); | 1775 aglSwapBuffers(aglGetCurrentContext()); |
1705 int set_a_break_pt_here = 9; | 1776 int set_a_break_pt_here = 9; |
1706 aglSwapBuffers(aglGetCurrentContext()); | 1777 aglSwapBuffers(aglGetCurrentContext()); |
1707 #elif defined(SK_BUILD_FOR_WIN32) | 1778 #elif defined(SK_BUILD_FOR_WIN32) |
1708 SwapBuf(); | 1779 SwapBuf(); |
1709 int set_a_break_pt_here = 9; | 1780 int set_a_break_pt_here = 9; |
1710 SwapBuf(); | 1781 SwapBuf(); |
1711 #endif | 1782 #endif |
1712 #endif | 1783 #endif |
1713 } | 1784 } |
OLD | NEW |