OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2015 Google Inc. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. |
| 6 */ |
| 7 |
| 8 // This is a GPU-backend specific test. It relies on static intializers to work |
| 9 |
| 10 #include "SkTypes.h" |
| 11 |
| 12 #if SK_SUPPORT_GPU && SK_ALLOW_STATIC_GLOBAL_INITIALIZERS && defined(SK_VULKAN) |
| 13 |
| 14 #include "GrContextFactory.h" |
| 15 #include "GrTest.h" |
| 16 #include "Test.h" |
| 17 #include "vk/GrVkGpu.h" |
| 18 |
| 19 using sk_gpu_test::GrContextFactory; |
| 20 |
| 21 void subheap_test(skiatest::Reporter* reporter, GrContext* context) { |
| 22 GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu()); |
| 23 |
| 24 // heap index doesn't matter, we're just testing the suballocation algorithm
so we'll use 0 |
| 25 GrVkSubHeap heap(gpu, 0, 64 * 1024, 32); |
| 26 GrVkAlloc alloc0, alloc1, alloc2, alloc3; |
| 27 // test full allocation and free |
| 28 REPORTER_ASSERT(reporter, heap.alloc(64 * 1024, &alloc0)); |
| 29 REPORTER_ASSERT(reporter, alloc0.fOffset == 0); |
| 30 REPORTER_ASSERT(reporter, alloc0.fSize == 64 * 1024); |
| 31 REPORTER_ASSERT(reporter, heap.freeSize() == 0 && heap.largestBlockSize() ==
0); |
| 32 heap.free(alloc0); |
| 33 REPORTER_ASSERT(reporter, heap.freeSize() == 64*1024 && heap.largestBlockSiz
e() == 64 * 1024); |
| 34 |
| 35 // now let's suballoc some memory |
| 36 REPORTER_ASSERT(reporter, heap.alloc(16 * 1024, &alloc0)); |
| 37 REPORTER_ASSERT(reporter, heap.alloc(23 * 1024, &alloc1)); |
| 38 REPORTER_ASSERT(reporter, heap.alloc(18 * 1024, &alloc2)); |
| 39 REPORTER_ASSERT(reporter, heap.freeSize() == 7 * 1024 && heap.largestBlockSi
ze() == 7 * 1024); |
| 40 // free lone block |
| 41 heap.free(alloc1); |
| 42 REPORTER_ASSERT(reporter, heap.freeSize() == 30 * 1024 && heap.largestBlockS
ize() == 23 * 1024); |
| 43 // allocate into smallest free block |
| 44 REPORTER_ASSERT(reporter, heap.alloc(6 * 1024, &alloc3)); |
| 45 REPORTER_ASSERT(reporter, heap.freeSize() == 24 * 1024 && heap.largestBlockS
ize() == 23 * 1024); |
| 46 // allocate into exact size free block |
| 47 REPORTER_ASSERT(reporter, heap.alloc(23 * 1024, &alloc1)); |
| 48 REPORTER_ASSERT(reporter, heap.freeSize() == 1 * 1024 && heap.largestBlockSi
ze() == 1 * 1024); |
| 49 // free lone block |
| 50 heap.free(alloc2); |
| 51 REPORTER_ASSERT(reporter, heap.freeSize() == 19 * 1024 && heap.largestBlockS
ize() == 18 * 1024); |
| 52 // free and merge with preceding block and following |
| 53 heap.free(alloc3); |
| 54 REPORTER_ASSERT(reporter, heap.freeSize() == 25 * 1024 && heap.largestBlockS
ize() == 25 * 1024); |
| 55 // free and merge with following block |
| 56 heap.free(alloc1); |
| 57 REPORTER_ASSERT(reporter, heap.freeSize() == 48 * 1024 && heap.largestBlockS
ize() == 48 * 1024); |
| 58 // free starting block and merge with following |
| 59 heap.free(alloc0); |
| 60 REPORTER_ASSERT(reporter, heap.freeSize() == 64 * 1024 && heap.largestBlockS
ize() == 64 * 1024); |
| 61 |
| 62 // realloc |
| 63 REPORTER_ASSERT(reporter, heap.alloc(4 * 1024, &alloc0)); |
| 64 REPORTER_ASSERT(reporter, heap.alloc(35 * 1024, &alloc1)); |
| 65 REPORTER_ASSERT(reporter, heap.alloc(10 * 1024, &alloc2)); |
| 66 REPORTER_ASSERT(reporter, heap.freeSize() == 15 * 1024 && heap.largestBlockS
ize() == 15 * 1024); |
| 67 // free starting block and merge with following |
| 68 heap.free(alloc0); |
| 69 REPORTER_ASSERT(reporter, heap.freeSize() == 19 * 1024 && heap.largestBlockS
ize() == 15 * 1024); |
| 70 // free block and merge with preceding |
| 71 heap.free(alloc1); |
| 72 REPORTER_ASSERT(reporter, heap.freeSize() == 54 * 1024 && heap.largestBlockS
ize() == 39 * 1024); |
| 73 // free block and merge with preceding and following |
| 74 heap.free(alloc2); |
| 75 REPORTER_ASSERT(reporter, heap.freeSize() == 64 * 1024 && heap.largestBlockS
ize() == 64 * 1024); |
| 76 |
| 77 // fragment |
| 78 REPORTER_ASSERT(reporter, heap.alloc(19 * 1024, &alloc0)); |
| 79 REPORTER_ASSERT(reporter, heap.alloc(5 * 1024, &alloc1)); |
| 80 REPORTER_ASSERT(reporter, heap.alloc(15 * 1024, &alloc2)); |
| 81 REPORTER_ASSERT(reporter, heap.alloc(3 * 1024, &alloc3)); |
| 82 REPORTER_ASSERT(reporter, heap.freeSize() == 22 * 1024 && heap.largestBlockS
ize() == 22 * 1024); |
| 83 heap.free(alloc0); |
| 84 REPORTER_ASSERT(reporter, heap.freeSize() == 41 * 1024 && heap.largestBlockS
ize() == 22 * 1024); |
| 85 heap.free(alloc2); |
| 86 REPORTER_ASSERT(reporter, heap.freeSize() == 56 * 1024 && heap.largestBlockS
ize() == 22 * 1024); |
| 87 REPORTER_ASSERT(reporter, !heap.alloc(40 * 1024, &alloc0)); |
| 88 heap.free(alloc3); |
| 89 REPORTER_ASSERT(reporter, heap.freeSize() == 59 * 1024 && heap.largestBlockS
ize() == 40 * 1024); |
| 90 REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, &alloc0)); |
| 91 REPORTER_ASSERT(reporter, heap.freeSize() == 19 * 1024 && heap.largestBlockS
ize() == 19 * 1024); |
| 92 heap.free(alloc1); |
| 93 REPORTER_ASSERT(reporter, heap.freeSize() == 24 * 1024 && heap.largestBlockS
ize() == 24 * 1024); |
| 94 heap.free(alloc0); |
| 95 REPORTER_ASSERT(reporter, heap.freeSize() == 64 * 1024 && heap.largestBlockS
ize() == 64 * 1024); |
| 96 |
| 97 // unaligned sizes |
| 98 REPORTER_ASSERT(reporter, heap.alloc(19 * 1024 - 31, &alloc0)); |
| 99 REPORTER_ASSERT(reporter, heap.alloc(5 * 1024 - 5, &alloc1)); |
| 100 REPORTER_ASSERT(reporter, heap.alloc(15 * 1024 - 19, &alloc2)); |
| 101 REPORTER_ASSERT(reporter, heap.alloc(3 * 1024 - 3, &alloc3)); |
| 102 REPORTER_ASSERT(reporter, heap.freeSize() == 22 * 1024 && heap.largestBlockS
ize() == 22 * 1024); |
| 103 heap.free(alloc0); |
| 104 REPORTER_ASSERT(reporter, heap.freeSize() == 41 * 1024 && heap.largestBlockS
ize() == 22 * 1024); |
| 105 heap.free(alloc2); |
| 106 REPORTER_ASSERT(reporter, heap.freeSize() == 56 * 1024 && heap.largestBlockS
ize() == 22 * 1024); |
| 107 REPORTER_ASSERT(reporter, !heap.alloc(40 * 1024, &alloc0)); |
| 108 heap.free(alloc3); |
| 109 REPORTER_ASSERT(reporter, heap.freeSize() == 59 * 1024 && heap.largestBlockS
ize() == 40 * 1024); |
| 110 REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, &alloc0)); |
| 111 REPORTER_ASSERT(reporter, heap.freeSize() == 19 * 1024 && heap.largestBlockS
ize() == 19 * 1024); |
| 112 heap.free(alloc1); |
| 113 REPORTER_ASSERT(reporter, heap.freeSize() == 24 * 1024 && heap.largestBlockS
ize() == 24 * 1024); |
| 114 heap.free(alloc0); |
| 115 REPORTER_ASSERT(reporter, heap.freeSize() == 64 * 1024 && heap.largestBlockS
ize() == 64 * 1024); |
| 116 } |
| 117 |
| 118 void suballoc_test(skiatest::Reporter* reporter, GrContext* context) { |
| 119 GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu()); |
| 120 |
| 121 // heap index doesn't matter, we're just testing the allocation algorithm so
we'll use 0 |
| 122 GrVkHeap heap(gpu, GrVkHeap::kSubAlloc_Strategy, 64 * 1024); |
| 123 GrVkAlloc alloc0, alloc1, alloc2, alloc3; |
| 124 const VkDeviceSize kAlignment = 16; |
| 125 const uint32_t kHeapIndex = 0; |
| 126 |
| 127 REPORTER_ASSERT(reporter, heap.allocSize() == 0 && heap.usedSize() == 0); |
| 128 |
| 129 // fragment allocations so we need to grow heap |
| 130 REPORTER_ASSERT(reporter, heap.alloc(19 * 1024 - 3, kAlignment, kHeapIndex,
&alloc0)); |
| 131 REPORTER_ASSERT(reporter, heap.alloc(5 * 1024 - 9, kAlignment, kHeapIndex, &
alloc1)); |
| 132 REPORTER_ASSERT(reporter, heap.alloc(15 * 1024 - 15, kAlignment, kHeapIndex,
&alloc2)); |
| 133 REPORTER_ASSERT(reporter, heap.alloc(3 * 1024 - 6, kAlignment, kHeapIndex, &
alloc3)); |
| 134 REPORTER_ASSERT(reporter, heap.allocSize() == 64 * 1024 && heap.usedSize() =
= 42 * 1024); |
| 135 heap.free(alloc0); |
| 136 REPORTER_ASSERT(reporter, heap.allocSize() == 64 * 1024 && heap.usedSize() =
= 23 * 1024); |
| 137 heap.free(alloc2); |
| 138 REPORTER_ASSERT(reporter, heap.allocSize() == 64 * 1024 && heap.usedSize() =
= 8 * 1024); |
| 139 // we expect the heap to grow here |
| 140 REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kHeapIndex, &all
oc0)); |
| 141 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize()
== 48 * 1024); |
| 142 heap.free(alloc3); |
| 143 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize()
== 45 * 1024); |
| 144 // heap should not grow here (first subheap has exactly enough room) |
| 145 REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kHeapIndex, &all
oc3)); |
| 146 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize()
== 85 * 1024); |
| 147 // heap should not grow here (second subheap has room) |
| 148 REPORTER_ASSERT(reporter, heap.alloc(22 * 1024, kAlignment, kHeapIndex, &all
oc2)); |
| 149 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize()
== 107 * 1024); |
| 150 heap.free(alloc1); |
| 151 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize()
== 102 * 1024); |
| 152 heap.free(alloc0); |
| 153 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize()
== 62 * 1024); |
| 154 heap.free(alloc2); |
| 155 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize()
== 40 * 1024); |
| 156 heap.free(alloc3); |
| 157 REPORTER_ASSERT(reporter, heap.allocSize() == 128 * 1024 && heap.usedSize()
== 0 * 1024); |
| 158 } |
| 159 |
| 160 void singlealloc_test(skiatest::Reporter* reporter, GrContext* context) { |
| 161 GrVkGpu* gpu = static_cast<GrVkGpu*>(context->getGpu()); |
| 162 |
| 163 // heap index doesn't matter, we're just testing the allocation algorithm so
we'll use 0 |
| 164 GrVkHeap heap(gpu, GrVkHeap::kSingleAlloc_Strategy, 64 * 1024); |
| 165 GrVkAlloc alloc0, alloc1, alloc2, alloc3; |
| 166 const VkDeviceSize kAlignment = 64; |
| 167 const uint32_t kHeapIndex = 0; |
| 168 |
| 169 REPORTER_ASSERT(reporter, heap.allocSize() == 0 && heap.usedSize() == 0); |
| 170 |
| 171 // make a few allocations |
| 172 REPORTER_ASSERT(reporter, heap.alloc(49 * 1024 - 3, kAlignment, kHeapIndex,
&alloc0)); |
| 173 REPORTER_ASSERT(reporter, heap.alloc(5 * 1024 - 37, kAlignment, kHeapIndex,
&alloc1)); |
| 174 REPORTER_ASSERT(reporter, heap.alloc(15 * 1024 - 11, kAlignment, kHeapIndex,
&alloc2)); |
| 175 REPORTER_ASSERT(reporter, heap.alloc(3 * 1024 - 29, kAlignment, kHeapIndex,
&alloc3)); |
| 176 REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() =
= 72 * 1024); |
| 177 heap.free(alloc0); |
| 178 REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() =
= 23 * 1024); |
| 179 heap.free(alloc2); |
| 180 REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() =
= 8 * 1024); |
| 181 // heap should not grow here (first subheap has room) |
| 182 REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kHeapIndex, &all
oc0)); |
| 183 REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() =
= 48 * 1024); |
| 184 heap.free(alloc3); |
| 185 REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() =
= 45 * 1024); |
| 186 // check for exact fit -- heap should not grow here (third subheap has room) |
| 187 REPORTER_ASSERT(reporter, heap.alloc(15 * 1024 - 63, kAlignment, kHeapIndex,
&alloc2)); |
| 188 REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() =
= 60 * 1024); |
| 189 heap.free(alloc2); |
| 190 REPORTER_ASSERT(reporter, heap.allocSize() == 72 * 1024 && heap.usedSize() =
= 45 * 1024); |
| 191 // heap should grow here (no subheap has room) |
| 192 REPORTER_ASSERT(reporter, heap.alloc(40 * 1024, kAlignment, kHeapIndex, &all
oc3)); |
| 193 REPORTER_ASSERT(reporter, heap.allocSize() == 112 * 1024 && heap.usedSize()
== 85 * 1024); |
| 194 heap.free(alloc1); |
| 195 REPORTER_ASSERT(reporter, heap.allocSize() == 112 * 1024 && heap.usedSize()
== 80 * 1024); |
| 196 heap.free(alloc0); |
| 197 REPORTER_ASSERT(reporter, heap.allocSize() == 112 * 1024 && heap.usedSize()
== 40 * 1024); |
| 198 heap.free(alloc3); |
| 199 REPORTER_ASSERT(reporter, heap.allocSize() == 112 * 1024 && heap.usedSize()
== 0 * 1024); |
| 200 } |
| 201 |
| 202 DEF_GPUTEST_FOR_VULKAN_CONTEXT(VkHeapTests, reporter, ctxInfo) { |
| 203 subheap_test(reporter, ctxInfo.grContext()); |
| 204 suballoc_test(reporter, ctxInfo.grContext()); |
| 205 singlealloc_test(reporter, ctxInfo.grContext()); |
| 206 } |
| 207 |
| 208 #endif |
OLD | NEW |