OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "GrVkMemory.h" | 8 #include "GrVkMemory.h" |
9 | 9 |
10 #include "GrVkGpu.h" | 10 #include "GrVkGpu.h" |
11 #include "GrVkUtil.h" | 11 #include "GrVkUtil.h" |
12 | 12 |
| 13 #ifdef SK_DEBUG |
| 14 // for simple tracking of how much we're using in each heap |
| 15 // last counter is for non-subheap allocations |
| 16 VkDeviceSize gHeapUsage[VK_MAX_MEMORY_HEAPS+1] = { 0 }; |
| 17 #endif |
| 18 |
13 static bool get_valid_memory_type_index(const VkPhysicalDeviceMemoryProperties&
physDevMemProps, | 19 static bool get_valid_memory_type_index(const VkPhysicalDeviceMemoryProperties&
physDevMemProps, |
14 uint32_t typeBits, | 20 uint32_t typeBits, |
15 VkMemoryPropertyFlags requestedMemFlags, | 21 VkMemoryPropertyFlags requestedMemFlags, |
16 uint32_t* typeIndex) { | 22 uint32_t* typeIndex, |
| 23 uint32_t* heapIndex) { |
17 for (uint32_t i = 0; i < physDevMemProps.memoryTypeCount; ++i) { | 24 for (uint32_t i = 0; i < physDevMemProps.memoryTypeCount; ++i) { |
18 if (typeBits & (1 << i)) { | 25 if (typeBits & (1 << i)) { |
19 uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFla
gs & | 26 uint32_t supportedFlags = physDevMemProps.memoryTypes[i].propertyFla
gs & |
20 requestedMemFlags; | 27 requestedMemFlags; |
21 if (supportedFlags == requestedMemFlags) { | 28 if (supportedFlags == requestedMemFlags) { |
22 *typeIndex = i; | 29 *typeIndex = i; |
| 30 *heapIndex = physDevMemProps.memoryTypes[i].heapIndex; |
23 return true; | 31 return true; |
24 } | 32 } |
25 } | 33 } |
26 } | 34 } |
27 return false; | 35 return false; |
28 } | 36 } |
29 | 37 |
30 static GrVkGpu::Heap buffer_type_to_heap(GrVkBuffer::Type type) { | 38 static GrVkGpu::Heap buffer_type_to_heap(GrVkBuffer::Type type) { |
31 const GrVkGpu::Heap kBufferToHeap[]{ | 39 const GrVkGpu::Heap kBufferToHeap[]{ |
32 GrVkGpu::kVertexBuffer_Heap, | 40 GrVkGpu::kVertexBuffer_Heap, |
(...skipping 16 matching lines...) Expand all Loading... |
49 GrVkBuffer::Type type, | 57 GrVkBuffer::Type type, |
50 bool dynamic, | 58 bool dynamic, |
51 GrVkAlloc* alloc) { | 59 GrVkAlloc* alloc) { |
52 const GrVkInterface* iface = gpu->vkInterface(); | 60 const GrVkInterface* iface = gpu->vkInterface(); |
53 VkDevice device = gpu->device(); | 61 VkDevice device = gpu->device(); |
54 | 62 |
55 VkMemoryRequirements memReqs; | 63 VkMemoryRequirements memReqs; |
56 GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs)); | 64 GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs)); |
57 | 65 |
58 uint32_t typeIndex = 0; | 66 uint32_t typeIndex = 0; |
| 67 uint32_t heapIndex = 0; |
59 const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceM
emoryProperties(); | 68 const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceM
emoryProperties(); |
60 if (dynamic) { | 69 if (dynamic) { |
61 // try to get cached and ideally non-coherent memory first | 70 // try to get cached and ideally non-coherent memory first |
62 if (!get_valid_memory_type_index(phDevMemProps, | 71 if (!get_valid_memory_type_index(phDevMemProps, |
63 memReqs.memoryTypeBits, | 72 memReqs.memoryTypeBits, |
64 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | | 73 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | |
65 VK_MEMORY_PROPERTY_HOST_CACHED_BIT, | 74 VK_MEMORY_PROPERTY_HOST_CACHED_BIT, |
66 &typeIndex)) { | 75 &typeIndex, |
| 76 &heapIndex)) { |
67 // some sort of host-visible memory type should always be available
for dynamic buffers | 77 // some sort of host-visible memory type should always be available
for dynamic buffers |
68 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, | 78 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, |
69 memReqs.memoryTypeBits, | 79 memReqs.memoryTypeBits, |
70 VK_MEMORY_PROPERTY_HOST
_VISIBLE_BIT, | 80 VK_MEMORY_PROPERTY_HOST
_VISIBLE_BIT, |
71 &typeIndex)); | 81 &typeIndex, |
| 82 &heapIndex)); |
72 } | 83 } |
73 | 84 |
74 VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propert
yFlags; | 85 VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propert
yFlags; |
75 alloc->fFlags = mpf & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ? 0x0 | 86 alloc->fFlags = mpf & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ? 0x0 |
76 : GrVkAlloc::
kNoncoherent_Flag; | 87 : GrVkAlloc::
kNoncoherent_Flag; |
77 } else { | 88 } else { |
78 // device-local memory should always be available for static buffers | 89 // device-local memory should always be available for static buffers |
79 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, | 90 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, |
80 memReqs.memoryTypeBits, | 91 memReqs.memoryTypeBits, |
81 VK_MEMORY_PROPERTY_DEVICE_L
OCAL_BIT, | 92 VK_MEMORY_PROPERTY_DEVICE_L
OCAL_BIT, |
82 &typeIndex)); | 93 &typeIndex, |
| 94 &heapIndex)); |
83 alloc->fFlags = 0x0; | 95 alloc->fFlags = 0x0; |
84 } | 96 } |
85 | 97 |
86 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type)); | 98 GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type)); |
87 | 99 |
88 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) { | 100 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, allo
c)) { |
89 SkDebugf("Failed to alloc buffer\n"); | 101 // if static, try to allocate from non-host-visible non-device-local mem
ory instead |
90 return false; | 102 if (dynamic || |
| 103 !get_valid_memory_type_index(phDevMemProps, memReqs.memoryTypeBits, |
| 104 0, &typeIndex, &heapIndex) || |
| 105 !heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex,
alloc)) { |
| 106 SkDebugf("Failed to alloc buffer\n"); |
| 107 return false; |
| 108 } |
91 } | 109 } |
92 | 110 |
93 // Bind buffer | 111 // Bind buffer |
94 VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer, | 112 VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer, |
95 alloc->fMemory, alloc->fOf
fset)); | 113 alloc->fMemory, alloc->fOf
fset)); |
96 if (err) { | 114 if (err) { |
97 SkASSERT_RELEASE(heap->free(*alloc)); | 115 SkASSERT_RELEASE(heap->free(*alloc)); |
98 return false; | 116 return false; |
99 } | 117 } |
100 | 118 |
(...skipping 22 matching lines...) Expand all Loading... |
123 VkImage image, | 141 VkImage image, |
124 bool linearTiling, | 142 bool linearTiling, |
125 GrVkAlloc* alloc) { | 143 GrVkAlloc* alloc) { |
126 const GrVkInterface* iface = gpu->vkInterface(); | 144 const GrVkInterface* iface = gpu->vkInterface(); |
127 VkDevice device = gpu->device(); | 145 VkDevice device = gpu->device(); |
128 | 146 |
129 VkMemoryRequirements memReqs; | 147 VkMemoryRequirements memReqs; |
130 GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs)); | 148 GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs)); |
131 | 149 |
132 uint32_t typeIndex = 0; | 150 uint32_t typeIndex = 0; |
| 151 uint32_t heapIndex = 0; |
133 GrVkHeap* heap; | 152 GrVkHeap* heap; |
134 const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceM
emoryProperties(); | 153 const VkPhysicalDeviceMemoryProperties& phDevMemProps = gpu->physicalDeviceM
emoryProperties(); |
135 if (linearTiling) { | 154 if (linearTiling) { |
136 VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_
BIT | | 155 VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_
BIT | |
137 VK_MEMORY_PROPERTY_HOST_CACHED_B
IT; | 156 VK_MEMORY_PROPERTY_HOST_CACHED_B
IT; |
138 if (!get_valid_memory_type_index(phDevMemProps, | 157 if (!get_valid_memory_type_index(phDevMemProps, |
139 memReqs.memoryTypeBits, | 158 memReqs.memoryTypeBits, |
140 desiredMemProps, | 159 desiredMemProps, |
141 &typeIndex)) { | 160 &typeIndex, |
| 161 &heapIndex)) { |
142 // some sort of host-visible memory type should always be available | 162 // some sort of host-visible memory type should always be available |
143 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, | 163 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, |
144 memReqs.memoryTypeBits, | 164 memReqs.memoryTypeBits, |
145 VK_MEMORY_PROPERTY_HOST
_VISIBLE_BIT, | 165 VK_MEMORY_PROPERTY_HOST
_VISIBLE_BIT, |
146 &typeIndex)); | 166 &typeIndex, |
| 167 &heapIndex)); |
147 } | 168 } |
148 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap); | 169 heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap); |
149 VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propert
yFlags; | 170 VkMemoryPropertyFlags mpf = phDevMemProps.memoryTypes[typeIndex].propert
yFlags; |
150 alloc->fFlags = mpf & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ? 0x0 | 171 alloc->fFlags = mpf & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ? 0x0 |
151 : GrVkAlloc::
kNoncoherent_Flag; | 172 : GrVkAlloc::
kNoncoherent_Flag; |
152 } else { | 173 } else { |
153 // this memory type should always be available | 174 // this memory type should always be available |
154 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, | 175 SkASSERT_RELEASE(get_valid_memory_type_index(phDevMemProps, |
155 memReqs.memoryTypeBits, | 176 memReqs.memoryTypeBits, |
156 VK_MEMORY_PROPERTY_DEVICE_L
OCAL_BIT, | 177 VK_MEMORY_PROPERTY_DEVICE_L
OCAL_BIT, |
157 &typeIndex)); | 178 &typeIndex, |
| 179 &heapIndex)); |
158 if (memReqs.size <= kMaxSmallImageSize) { | 180 if (memReqs.size <= kMaxSmallImageSize) { |
159 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap); | 181 heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap); |
160 } else { | 182 } else { |
161 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap); | 183 heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap); |
162 } | 184 } |
163 alloc->fFlags = 0x0; | 185 alloc->fFlags = 0x0; |
164 } | 186 } |
165 | 187 |
166 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) { | 188 if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex, allo
c)) { |
167 SkDebugf("Failed to alloc image\n"); | 189 // if optimal, try to allocate from non-host-visible non-device-local me
mory instead |
168 return false; | 190 if (linearTiling || |
| 191 !get_valid_memory_type_index(phDevMemProps, memReqs.memoryTypeBits, |
| 192 0, &typeIndex, &heapIndex) || |
| 193 !heap->alloc(memReqs.size, memReqs.alignment, typeIndex, heapIndex,
alloc)) { |
| 194 SkDebugf("Failed to alloc image\n"); |
| 195 return false; |
| 196 } |
169 } | 197 } |
170 | 198 |
171 // Bind image | 199 // Bind image |
172 VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image, | 200 VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image, |
173 alloc->fMemory, alloc->fOffset)); | 201 alloc->fMemory, alloc->fOffset)); |
174 if (err) { | 202 if (err) { |
175 SkASSERT_RELEASE(heap->free(*alloc)); | 203 SkASSERT_RELEASE(heap->free(*alloc)); |
176 return false; | 204 return false; |
177 } | 205 } |
178 | 206 |
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
424 Block* block = iter.get(); | 452 Block* block = iter.get(); |
425 if (largestSize < block->fSize) { | 453 if (largestSize < block->fSize) { |
426 largestSize = block->fSize; | 454 largestSize = block->fSize; |
427 } | 455 } |
428 iter.next(); | 456 iter.next(); |
429 } | 457 } |
430 SkASSERT(fLargestBlockSize == largestSize); | 458 SkASSERT(fLargestBlockSize == largestSize); |
431 #endif | 459 #endif |
432 } | 460 } |
433 | 461 |
434 GrVkSubHeap::GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex, | 462 GrVkSubHeap::GrVkSubHeap(const GrVkGpu* gpu, uint32_t memoryTypeIndex, uint32_t
heapIndex, |
435 VkDeviceSize size, VkDeviceSize alignment) | 463 VkDeviceSize size, VkDeviceSize alignment) |
436 : INHERITED(size, alignment) | 464 : INHERITED(size, alignment) |
437 , fGpu(gpu) | 465 , fGpu(gpu) |
| 466 #ifdef SK_DEBUG |
| 467 , fHeapIndex(heapIndex) |
| 468 #endif |
438 , fMemoryTypeIndex(memoryTypeIndex) { | 469 , fMemoryTypeIndex(memoryTypeIndex) { |
439 | 470 |
440 VkMemoryAllocateInfo allocInfo = { | 471 VkMemoryAllocateInfo allocInfo = { |
441 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType | 472 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType |
442 NULL, // pNext | 473 NULL, // pNext |
443 size, // allocationSize | 474 size, // allocationSize |
444 memoryTypeIndex, // memoryTypeIndex | 475 memoryTypeIndex, // memoryTypeIndex |
445 }; | 476 }; |
446 | 477 |
447 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(), | 478 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateMemory(gpu->device(), |
448 &allocInfo, | 479 &allocInfo, |
449 nullptr, | 480 nullptr, |
450 &fAlloc)); | 481 &fAlloc)); |
451 if (VK_SUCCESS != err) { | 482 if (VK_SUCCESS != err) { |
452 this->reset(); | 483 this->reset(); |
| 484 } |
| 485 #ifdef SK_DEBUG |
| 486 else { |
| 487 gHeapUsage[heapIndex] += size; |
453 } | 488 } |
| 489 #endif |
454 } | 490 } |
455 | 491 |
456 GrVkSubHeap::~GrVkSubHeap() { | 492 GrVkSubHeap::~GrVkSubHeap() { |
457 const GrVkInterface* iface = fGpu->vkInterface(); | 493 const GrVkInterface* iface = fGpu->vkInterface(); |
458 GR_VK_CALL(iface, FreeMemory(fGpu->device(), fAlloc, nullptr)); | 494 GR_VK_CALL(iface, FreeMemory(fGpu->device(), fAlloc, nullptr)); |
| 495 #ifdef SK_DEBUG |
| 496 gHeapUsage[fHeapIndex] -= fSize; |
| 497 #endif |
459 } | 498 } |
460 | 499 |
461 bool GrVkSubHeap::alloc(VkDeviceSize size, GrVkAlloc* alloc) { | 500 bool GrVkSubHeap::alloc(VkDeviceSize size, GrVkAlloc* alloc) { |
462 alloc->fMemory = fAlloc; | 501 alloc->fMemory = fAlloc; |
463 return INHERITED::alloc(size, &alloc->fOffset, &alloc->fSize); | 502 return INHERITED::alloc(size, &alloc->fOffset, &alloc->fSize); |
464 } | 503 } |
465 | 504 |
466 void GrVkSubHeap::free(const GrVkAlloc& alloc) { | 505 void GrVkSubHeap::free(const GrVkAlloc& alloc) { |
467 SkASSERT(alloc.fMemory == fAlloc); | 506 SkASSERT(alloc.fMemory == fAlloc); |
468 | 507 |
469 INHERITED::free(alloc.fOffset, alloc.fSize); | 508 INHERITED::free(alloc.fOffset, alloc.fSize); |
470 } | 509 } |
471 | 510 |
472 bool GrVkHeap::subAlloc(VkDeviceSize size, VkDeviceSize alignment, | 511 bool GrVkHeap::subAlloc(VkDeviceSize size, VkDeviceSize alignment, |
473 uint32_t memoryTypeIndex, GrVkAlloc* alloc) { | 512 uint32_t memoryTypeIndex, uint32_t heapIndex, GrVkAlloc*
alloc) { |
474 VkDeviceSize alignedSize = align_size(size, alignment); | 513 VkDeviceSize alignedSize = align_size(size, alignment); |
475 | 514 |
476 // if requested is larger than our subheap allocation, just alloc directly | 515 // if requested is larger than our subheap allocation, just alloc directly |
477 if (alignedSize > fSubHeapSize) { | 516 if (alignedSize > fSubHeapSize) { |
478 VkMemoryAllocateInfo allocInfo = { | 517 VkMemoryAllocateInfo allocInfo = { |
479 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType | 518 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType |
480 NULL, // pNext | 519 NULL, // pNext |
481 size, // allocationSize | 520 size, // allocationSize |
482 memoryTypeIndex, // memoryTypeIndex | 521 memoryTypeIndex, // memoryTypeIndex |
483 }; | 522 }; |
484 | 523 |
485 VkResult err = GR_VK_CALL(fGpu->vkInterface(), AllocateMemory(fGpu->devi
ce(), | 524 VkResult err = GR_VK_CALL(fGpu->vkInterface(), AllocateMemory(fGpu->devi
ce(), |
486 &allocInfo
, | 525 &allocInfo
, |
487 nullptr, | 526 nullptr, |
488 &alloc->fM
emory)); | 527 &alloc->fM
emory)); |
489 if (VK_SUCCESS != err) { | 528 if (VK_SUCCESS != err) { |
490 return false; | 529 return false; |
491 } | 530 } |
492 alloc->fOffset = 0; | 531 alloc->fOffset = 0; |
493 alloc->fSize = 0; // hint that this is not a subheap allocation | 532 alloc->fSize = 0; // hint that this is not a subheap allocation |
| 533 #ifdef SK_DEBUG |
| 534 gHeapUsage[VK_MAX_MEMORY_HEAPS] += alignedSize; |
| 535 #endif |
494 | 536 |
495 return true; | 537 return true; |
496 } | 538 } |
497 | 539 |
498 // first try to find a subheap that fits our allocation request | 540 // first try to find a subheap that fits our allocation request |
499 int bestFitIndex = -1; | 541 int bestFitIndex = -1; |
500 VkDeviceSize bestFitSize = 0x7FFFFFFF; | 542 VkDeviceSize bestFitSize = 0x7FFFFFFF; |
501 for (auto i = 0; i < fSubHeaps.count(); ++i) { | 543 for (auto i = 0; i < fSubHeaps.count(); ++i) { |
502 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && | 544 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && |
503 fSubHeaps[i]->alignment() == alignment) { | 545 fSubHeaps[i]->alignment() == alignment) { |
504 VkDeviceSize heapSize = fSubHeaps[i]->largestBlockSize(); | 546 VkDeviceSize heapSize = fSubHeaps[i]->largestBlockSize(); |
505 if (heapSize >= alignedSize && heapSize < bestFitSize) { | 547 if (heapSize >= alignedSize && heapSize < bestFitSize) { |
506 bestFitIndex = i; | 548 bestFitIndex = i; |
507 bestFitSize = heapSize; | 549 bestFitSize = heapSize; |
508 } | 550 } |
509 } | 551 } |
510 } | 552 } |
511 | 553 |
512 if (bestFitIndex >= 0) { | 554 if (bestFitIndex >= 0) { |
513 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment); | 555 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment); |
514 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) { | 556 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) { |
515 fUsedSize += alloc->fSize; | 557 fUsedSize += alloc->fSize; |
516 return true; | 558 return true; |
517 } | 559 } |
518 return false; | 560 return false; |
519 } | 561 } |
520 | 562 |
521 // need to allocate a new subheap | 563 // need to allocate a new subheap |
522 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back(); | 564 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back(); |
523 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, fSubHeapSize, alignment
)); | 565 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, fSubHeapSize
, alignment)); |
524 // try to recover from failed allocation by only allocating what we need | 566 // try to recover from failed allocation by only allocating what we need |
525 if (subHeap->size() == 0) { | 567 if (subHeap->size() == 0) { |
526 VkDeviceSize alignedSize = align_size(size, alignment); | 568 VkDeviceSize alignedSize = align_size(size, alignment); |
527 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignm
ent)); | 569 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, alignedS
ize, alignment)); |
528 if (subHeap->size() == 0) { | 570 if (subHeap->size() == 0) { |
529 return false; | 571 return false; |
530 } | 572 } |
531 } | 573 } |
532 fAllocSize += fSubHeapSize; | 574 fAllocSize += fSubHeapSize; |
533 if (subHeap->alloc(size, alloc)) { | 575 if (subHeap->alloc(size, alloc)) { |
534 fUsedSize += alloc->fSize; | 576 fUsedSize += alloc->fSize; |
535 return true; | 577 return true; |
536 } | 578 } |
537 | 579 |
538 return false; | 580 return false; |
539 } | 581 } |
540 | 582 |
541 bool GrVkHeap::singleAlloc(VkDeviceSize size, VkDeviceSize alignment, | 583 bool GrVkHeap::singleAlloc(VkDeviceSize size, VkDeviceSize alignment, |
542 uint32_t memoryTypeIndex, GrVkAlloc* alloc) { | 584 uint32_t memoryTypeIndex, uint32_t heapIndex, GrVkAll
oc* alloc) { |
543 VkDeviceSize alignedSize = align_size(size, alignment); | 585 VkDeviceSize alignedSize = align_size(size, alignment); |
544 | 586 |
545 // first try to find an unallocated subheap that fits our allocation request | 587 // first try to find an unallocated subheap that fits our allocation request |
546 int bestFitIndex = -1; | 588 int bestFitIndex = -1; |
547 VkDeviceSize bestFitSize = 0x7FFFFFFF; | 589 VkDeviceSize bestFitSize = 0x7FFFFFFF; |
548 for (auto i = 0; i < fSubHeaps.count(); ++i) { | 590 for (auto i = 0; i < fSubHeaps.count(); ++i) { |
549 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && | 591 if (fSubHeaps[i]->memoryTypeIndex() == memoryTypeIndex && |
550 fSubHeaps[i]->alignment() == alignment && | 592 fSubHeaps[i]->alignment() == alignment && |
551 fSubHeaps[i]->unallocated()) { | 593 fSubHeaps[i]->unallocated()) { |
552 VkDeviceSize heapSize = fSubHeaps[i]->size(); | 594 VkDeviceSize heapSize = fSubHeaps[i]->size(); |
553 if (heapSize >= alignedSize && heapSize < bestFitSize) { | 595 if (heapSize >= alignedSize && heapSize < bestFitSize) { |
554 bestFitIndex = i; | 596 bestFitIndex = i; |
555 bestFitSize = heapSize; | 597 bestFitSize = heapSize; |
556 } | 598 } |
557 } | 599 } |
558 } | 600 } |
559 | 601 |
560 if (bestFitIndex >= 0) { | 602 if (bestFitIndex >= 0) { |
561 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment); | 603 SkASSERT(fSubHeaps[bestFitIndex]->alignment() == alignment); |
562 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) { | 604 if (fSubHeaps[bestFitIndex]->alloc(size, alloc)) { |
563 fUsedSize += alloc->fSize; | 605 fUsedSize += alloc->fSize; |
564 return true; | 606 return true; |
565 } | 607 } |
566 return false; | 608 return false; |
567 } | 609 } |
568 | 610 |
569 // need to allocate a new subheap | 611 // need to allocate a new subheap |
570 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back(); | 612 SkAutoTDelete<GrVkSubHeap>& subHeap = fSubHeaps.push_back(); |
571 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, alignedSize, alignment)
); | 613 subHeap.reset(new GrVkSubHeap(fGpu, memoryTypeIndex, heapIndex, alignedSize,
alignment)); |
572 fAllocSize += alignedSize; | 614 fAllocSize += alignedSize; |
573 if (subHeap->alloc(size, alloc)) { | 615 if (subHeap->alloc(size, alloc)) { |
574 fUsedSize += alloc->fSize; | 616 fUsedSize += alloc->fSize; |
575 return true; | 617 return true; |
576 } | 618 } |
577 | 619 |
578 return false; | 620 return false; |
579 } | 621 } |
580 | 622 |
581 bool GrVkHeap::free(const GrVkAlloc& alloc) { | 623 bool GrVkHeap::free(const GrVkAlloc& alloc) { |
582 // a size of 0 means we're using the system heap | 624 // a size of 0 means we're using the system heap |
583 if (0 == alloc.fSize) { | 625 if (0 == alloc.fSize) { |
584 const GrVkInterface* iface = fGpu->vkInterface(); | 626 const GrVkInterface* iface = fGpu->vkInterface(); |
585 GR_VK_CALL(iface, FreeMemory(fGpu->device(), alloc.fMemory, nullptr)); | 627 GR_VK_CALL(iface, FreeMemory(fGpu->device(), alloc.fMemory, nullptr)); |
586 return true; | 628 return true; |
587 } | 629 } |
588 | 630 |
589 for (auto i = 0; i < fSubHeaps.count(); ++i) { | 631 for (auto i = 0; i < fSubHeaps.count(); ++i) { |
590 if (fSubHeaps[i]->memory() == alloc.fMemory) { | 632 if (fSubHeaps[i]->memory() == alloc.fMemory) { |
591 fSubHeaps[i]->free(alloc); | 633 fSubHeaps[i]->free(alloc); |
592 fUsedSize -= alloc.fSize; | 634 fUsedSize -= alloc.fSize; |
593 return true; | 635 return true; |
594 } | 636 } |
595 } | 637 } |
596 | 638 |
597 return false; | 639 return false; |
598 } | 640 } |
599 | 641 |
600 | 642 |
OLD | NEW |