OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrVkGpu.h" | 8 #include "GrVkGpu.h" |
9 | 9 |
10 #include "GrContextOptions.h" | 10 #include "GrContextOptions.h" |
(...skipping 1039 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1050 // we will use the clear load ops. | 1050 // we will use the clear load ops. |
1051 fCurrentCmdBuffer->clearColorImage(this, | 1051 fCurrentCmdBuffer->clearColorImage(this, |
1052 vkRT, | 1052 vkRT, |
1053 &vkColor, | 1053 &vkColor, |
1054 1, &subRange); | 1054 1, &subRange); |
1055 } | 1055 } |
1056 | 1056 |
1057 inline bool can_copy_image(const GrSurface* dst, | 1057 inline bool can_copy_image(const GrSurface* dst, |
1058 const GrSurface* src, | 1058 const GrSurface* src, |
1059 const GrVkGpu* gpu) { | 1059 const GrVkGpu* gpu) { |
1060 if (src->asTexture() && | 1060 // Currently we don't support msaa |
1061 dst->asTexture() && | 1061 if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1)
|| |
1062 src->origin() == dst->origin() && | 1062 (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1))
{ |
1063 src->config() == dst->config()) { | 1063 return false; |
| 1064 } |
| 1065 |
| 1066 // We require that all vulkan GrSurfaces have been created with transfer_dst
and transfer_src |
| 1067 // as image usage flags. |
| 1068 if (src->origin() == dst->origin() && |
| 1069 GrBytesPerPixel(src->config()) == GrBytesPerPixel(dst->config())) { |
1064 return true; | 1070 return true; |
1065 } | 1071 } |
1066 | 1072 |
1067 // How does msaa play into this? If a VkTexture is multisampled, are we copy
ing the multisampled | 1073 // How does msaa play into this? If a VkTexture is multisampled, are we copy
ing the multisampled |
1068 // or the resolved image here? | 1074 // or the resolved image here? Im multisampled, Vulkan requires sample count
s to be the same. |
1069 | 1075 |
1070 return false; | 1076 return false; |
1071 } | 1077 } |
1072 | 1078 |
1073 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst, | 1079 void GrVkGpu::copySurfaceAsCopyImage(GrSurface* dst, |
1074 GrSurface* src, | 1080 GrSurface* src, |
| 1081 GrVkImage* dstImage, |
| 1082 GrVkImage* srcImage, |
1075 const SkIRect& srcRect, | 1083 const SkIRect& srcRect, |
1076 const SkIPoint& dstPoint) { | 1084 const SkIPoint& dstPoint) { |
1077 SkASSERT(can_copy_image(dst, src, this)); | 1085 SkASSERT(can_copy_image(dst, src, this)); |
1078 | 1086 |
1079 // Insert memory barriers to switch src and dst to transfer_source and trans
fer_dst layouts | 1087 VkImageLayout origDstLayout = dstImage->currentLayout(); |
1080 GrVkTexture* dstTex = static_cast<GrVkTexture*>(dst->asTexture()); | 1088 VkImageLayout origSrcLayout = srcImage->currentLayout(); |
1081 GrVkTexture* srcTex = static_cast<GrVkTexture*>(src->asTexture()); | |
1082 | |
1083 VkImageLayout origDstLayout = dstTex->currentLayout(); | |
1084 VkImageLayout origSrcLayout = srcTex->currentLayout(); | |
1085 | 1089 |
1086 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(o
rigDstLayout); | 1090 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(o
rigDstLayout); |
1087 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; | 1091 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; |
1088 | 1092 |
1089 // These flags are for flushing/invalidating caches and for the dst image it
doesn't matter if | 1093 // These flags are for flushing/invalidating caches and for the dst image it
doesn't matter if |
1090 // the cache is flushed since it is only being written to. | 1094 // the cache is flushed since it is only being written to. |
1091 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayou
t);; | 1095 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayou
t);; |
1092 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; | 1096 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; |
1093 | 1097 |
1094 dstTex->setImageLayout(this, | 1098 dstImage->setImageLayout(this, |
1095 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, | 1099 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
1096 srcAccessMask, | 1100 srcAccessMask, |
1097 dstAccessMask, | 1101 dstAccessMask, |
1098 srcStageMask, | 1102 srcStageMask, |
1099 dstStageMask, | 1103 dstStageMask, |
1100 false); | 1104 false); |
1101 | 1105 |
1102 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout); | 1106 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout); |
1103 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; | 1107 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; |
1104 | 1108 |
1105 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout); | 1109 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout); |
1106 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; | 1110 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; |
1107 | 1111 |
1108 srcTex->setImageLayout(this, | 1112 srcImage->setImageLayout(this, |
1109 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, | 1113 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
1110 srcAccessMask, | 1114 srcAccessMask, |
1111 dstAccessMask, | 1115 dstAccessMask, |
1112 srcStageMask, | 1116 srcStageMask, |
1113 dstStageMask, | 1117 dstStageMask, |
1114 false); | 1118 false); |
1115 | 1119 |
1116 // Flip rect if necessary | 1120 // Flip rect if necessary |
1117 SkIRect srcVkRect = srcRect; | 1121 SkIRect srcVkRect = srcRect; |
1118 int32_t dstY = dstPoint.fY; | 1122 int32_t dstY = dstPoint.fY; |
1119 | 1123 |
1120 if (kBottomLeft_GrSurfaceOrigin == src->origin()) { | 1124 if (kBottomLeft_GrSurfaceOrigin == src->origin()) { |
1121 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin()); | 1125 SkASSERT(kBottomLeft_GrSurfaceOrigin == dst->origin()); |
1122 srcVkRect.fTop = src->height() - srcRect.fBottom; | 1126 srcVkRect.fTop = src->height() - srcRect.fBottom; |
1123 srcVkRect.fBottom = src->height() - srcRect.fTop; | 1127 srcVkRect.fBottom = src->height() - srcRect.fTop; |
1124 dstY = dst->height() - dstPoint.fY - srcVkRect.height(); | 1128 dstY = dst->height() - dstPoint.fY - srcVkRect.height(); |
1125 } | 1129 } |
1126 | 1130 |
1127 VkImageCopy copyRegion; | 1131 VkImageCopy copyRegion; |
1128 memset(©Region, 0, sizeof(VkImageCopy)); | 1132 memset(©Region, 0, sizeof(VkImageCopy)); |
1129 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; | 1133 copyRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; |
1130 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 }; | 1134 copyRegion.srcOffset = { srcVkRect.fLeft, srcVkRect.fTop, 0 }; |
1131 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; | 1135 copyRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; |
1132 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 }; | 1136 copyRegion.dstOffset = { dstPoint.fX, dstY, 0 }; |
1133 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.heigh
t(), 0 }; | 1137 copyRegion.extent = { (uint32_t)srcVkRect.width(), (uint32_t)srcVkRect.heigh
t(), 0 }; |
1134 | 1138 |
1135 fCurrentCmdBuffer->copyImage(this, | 1139 fCurrentCmdBuffer->copyImage(this, |
1136 srcTex, | 1140 srcImage, |
1137 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, | 1141 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
1138 dstTex, | 1142 dstImage, |
1139 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, | 1143 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
1140 1, | 1144 1, |
1141 ©Region); | 1145 ©Region); |
1142 } | 1146 } |
1143 | 1147 |
| 1148 inline bool can_copy_as_blit(const GrSurface* dst, |
| 1149 const GrSurface* src, |
| 1150 const GrVkImage* dstImage, |
| 1151 const GrVkImage* srcImage, |
| 1152 const GrVkGpu* gpu) { |
| 1153 // We require that all vulkan GrSurfaces have been created with transfer_dst
and transfer_src |
| 1154 // as image usage flags. |
| 1155 const GrVkCaps& caps = gpu->vkCaps(); |
| 1156 if (!caps.configCanBeDstofBlit(dst->config(), dstImage->isLinearTiled()) || |
| 1157 !caps.configCanBeSrcofBlit(src->config(), srcImage->isLinearTiled())) { |
| 1158 return false; |
| 1159 } |
| 1160 |
| 1161 // We cannot blit images that are multisampled. Will need to figure out if w
e can blit the |
| 1162 // resolved msaa though. |
| 1163 if ((dst->asRenderTarget() && dst->asRenderTarget()->numColorSamples() > 1)
|| |
| 1164 (src->asRenderTarget() && src->asRenderTarget()->numColorSamples() > 1))
{ |
| 1165 return false; |
| 1166 } |
| 1167 |
| 1168 return true; |
| 1169 } |
| 1170 |
| 1171 void GrVkGpu::copySurfaceAsBlit(GrSurface* dst, |
| 1172 GrSurface* src, |
| 1173 GrVkImage* dstImage, |
| 1174 GrVkImage* srcImage, |
| 1175 const SkIRect& srcRect, |
| 1176 const SkIPoint& dstPoint) { |
| 1177 SkASSERT(can_copy_as_blit(dst, src, dstImage, srcImage, this)); |
| 1178 |
| 1179 VkImageLayout origDstLayout = dstImage->currentLayout(); |
| 1180 VkImageLayout origSrcLayout = srcImage->currentLayout(); |
| 1181 |
| 1182 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(o
rigDstLayout); |
| 1183 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; |
| 1184 |
| 1185 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayou
t);; |
| 1186 VkAccessFlags dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; |
| 1187 |
| 1188 dstImage->setImageLayout(this, |
| 1189 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 1190 srcAccessMask, |
| 1191 dstAccessMask, |
| 1192 srcStageMask, |
| 1193 dstStageMask, |
| 1194 false); |
| 1195 |
| 1196 srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origSrcLayout); |
| 1197 dstStageMask = VK_PIPELINE_STAGE_TRANSFER_BIT; |
| 1198 |
| 1199 srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origSrcLayout); |
| 1200 dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; |
| 1201 |
| 1202 srcImage->setImageLayout(this, |
| 1203 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 1204 srcAccessMask, |
| 1205 dstAccessMask, |
| 1206 srcStageMask, |
| 1207 dstStageMask, |
| 1208 false); |
| 1209 |
| 1210 // Flip rect if necessary |
| 1211 SkIRect srcVkRect; |
| 1212 SkIRect dstRect; |
| 1213 dstRect.fLeft = dstPoint.fX; |
| 1214 dstRect.fRight = dstPoint.fX + srcVkRect.width(); |
| 1215 |
| 1216 if (kBottomLeft_GrSurfaceOrigin == src->origin()) { |
| 1217 srcVkRect.fTop = src->height() - srcRect.fBottom; |
| 1218 srcVkRect.fBottom = src->height() - srcRect.fTop; |
| 1219 } else { |
| 1220 srcVkRect = srcRect; |
| 1221 } |
| 1222 |
| 1223 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) { |
| 1224 dstRect.fTop = dst->height() - dstPoint.fY - srcVkRect.height(); |
| 1225 } else { |
| 1226 dstRect.fTop = dstPoint.fY; |
| 1227 } |
| 1228 dstRect.fBottom = dstRect.fTop + srcVkRect.height(); |
| 1229 |
| 1230 // If we have different origins, we need to flip the top and bottom of the d
st rect so that we |
| 1231 // get the correct origintation of the copied data. |
| 1232 if (src->origin() != dst->origin()) { |
| 1233 SkTSwap(dstRect.fTop, dstRect.fBottom); |
| 1234 } |
| 1235 |
| 1236 VkImageBlit blitRegion; |
| 1237 memset(&blitRegion, 0, sizeof(VkImageBlit)); |
| 1238 blitRegion.srcSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; |
| 1239 blitRegion.srcOffsets[0] = { srcVkRect.fLeft, srcVkRect.fTop, 0 }; |
| 1240 blitRegion.srcOffsets[1] = { srcVkRect.fRight, srcVkRect.fBottom, 0 }; |
| 1241 blitRegion.dstSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 }; |
| 1242 blitRegion.dstOffsets[0] = { dstRect.fLeft, dstRect.fTop, 0 }; |
| 1243 blitRegion.dstOffsets[1] = { dstRect.fRight, dstRect.fBottom, 0 }; |
| 1244 |
| 1245 fCurrentCmdBuffer->blitImage(this, |
| 1246 srcImage, |
| 1247 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, |
| 1248 dstImage, |
| 1249 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, |
| 1250 1, |
| 1251 &blitRegion, |
| 1252 VK_FILTER_NEAREST); // We never scale so any fi
lter works here |
| 1253 } |
| 1254 |
1144 inline bool can_copy_as_draw(const GrSurface* dst, | 1255 inline bool can_copy_as_draw(const GrSurface* dst, |
1145 const GrSurface* src, | 1256 const GrSurface* src, |
1146 const GrVkGpu* gpu) { | 1257 const GrVkGpu* gpu) { |
1147 return false; | 1258 return false; |
1148 } | 1259 } |
1149 | 1260 |
1150 void GrVkGpu::copySurfaceAsDraw(GrSurface* dst, | 1261 void GrVkGpu::copySurfaceAsDraw(GrSurface* dst, |
1151 GrSurface* src, | 1262 GrSurface* src, |
1152 const SkIRect& srcRect, | 1263 const SkIRect& srcRect, |
1153 const SkIPoint& dstPoint) { | 1264 const SkIPoint& dstPoint) { |
1154 SkASSERT(false); | 1265 SkASSERT(false); |
1155 } | 1266 } |
1156 | 1267 |
1157 bool GrVkGpu::onCopySurface(GrSurface* dst, | 1268 bool GrVkGpu::onCopySurface(GrSurface* dst, |
1158 GrSurface* src, | 1269 GrSurface* src, |
1159 const SkIRect& srcRect, | 1270 const SkIRect& srcRect, |
1160 const SkIPoint& dstPoint) { | 1271 const SkIPoint& dstPoint) { |
| 1272 GrVkImage* dstImage; |
| 1273 GrVkImage* srcImage; |
| 1274 if (dst->asTexture()) { |
| 1275 dstImage = static_cast<GrVkTexture*>(dst->asTexture()); |
| 1276 } else { |
| 1277 SkASSERT(dst->asRenderTarget()); |
| 1278 dstImage = static_cast<GrVkRenderTarget*>(dst->asRenderTarget()); |
| 1279 } |
| 1280 if (src->asTexture()) { |
| 1281 srcImage = static_cast<GrVkTexture*>(src->asTexture()); |
| 1282 } else { |
| 1283 SkASSERT(src->asRenderTarget()); |
| 1284 srcImage = static_cast<GrVkRenderTarget*>(src->asRenderTarget()); |
| 1285 } |
| 1286 |
1161 if (can_copy_image(dst, src, this)) { | 1287 if (can_copy_image(dst, src, this)) { |
1162 this->copySurfaceAsCopyImage(dst, src, srcRect, dstPoint); | 1288 this->copySurfaceAsCopyImage(dst, src, dstImage, srcImage, srcRect, dstP
oint); |
1163 return true; | 1289 return true; |
1164 } | 1290 } |
1165 | 1291 |
| 1292 if (can_copy_as_blit(dst, src, dstImage, srcImage, this)) { |
| 1293 this->copySurfaceAsBlit(dst, src, dstImage, srcImage, srcRect, dstPoint)
; |
| 1294 return true; |
| 1295 } |
| 1296 |
1166 if (can_copy_as_draw(dst, src, this)) { | 1297 if (can_copy_as_draw(dst, src, this)) { |
1167 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint); | 1298 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint); |
1168 return true; | 1299 return true; |
1169 } | 1300 } |
1170 | 1301 |
1171 return false; | 1302 return false; |
1172 } | 1303 } |
1173 | 1304 |
1174 void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&
, | 1305 void GrVkGpu::onGetMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings&
, |
1175 int* effectiveSampleCnt, SkAutoTDeleteArray<
SkPoint>*) { | 1306 int* effectiveSampleCnt, SkAutoTDeleteArray<
SkPoint>*) { |
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1433 aglSwapBuffers(aglGetCurrentContext()); | 1564 aglSwapBuffers(aglGetCurrentContext()); |
1434 int set_a_break_pt_here = 9; | 1565 int set_a_break_pt_here = 9; |
1435 aglSwapBuffers(aglGetCurrentContext()); | 1566 aglSwapBuffers(aglGetCurrentContext()); |
1436 #elif defined(SK_BUILD_FOR_WIN32) | 1567 #elif defined(SK_BUILD_FOR_WIN32) |
1437 SwapBuf(); | 1568 SwapBuf(); |
1438 int set_a_break_pt_here = 9; | 1569 int set_a_break_pt_here = 9; |
1439 SwapBuf(); | 1570 SwapBuf(); |
1440 #endif | 1571 #endif |
1441 #endif | 1572 #endif |
1442 } | 1573 } |
OLD | NEW |