| OLD | NEW |
| (Empty) |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "base/memory/discardable_memory_ashmem_allocator.h" | |
| 6 | |
| 7 #include <sys/types.h> | |
| 8 #include <unistd.h> | |
| 9 | |
| 10 #include "base/memory/discardable_memory.h" | |
| 11 #include "base/memory/scoped_ptr.h" | |
| 12 #include "base/strings/string_number_conversions.h" | |
| 13 #include "base/strings/string_split.h" | |
| 14 #include "base/strings/stringprintf.h" | |
| 15 #include "build/build_config.h" | |
| 16 #include "testing/gtest/include/gtest/gtest.h" | |
| 17 | |
| 18 namespace base { | |
| 19 namespace internal { | |
| 20 | |
| 21 const char kAllocatorName[] = "allocator-for-testing"; | |
| 22 | |
| 23 const size_t kAshmemRegionSizeForTesting = 32 * 1024 * 1024; | |
| 24 const size_t kPageSize = 4096; | |
| 25 | |
| 26 const size_t kMaxAllowedAllocationSize = | |
| 27 std::numeric_limits<size_t>::max() - kPageSize + 1; | |
| 28 | |
| 29 class DiscardableMemoryAshmemAllocatorTest : public testing::Test { | |
| 30 protected: | |
| 31 DiscardableMemoryAshmemAllocatorTest() | |
| 32 : allocator_(kAllocatorName, kAshmemRegionSizeForTesting) { | |
| 33 } | |
| 34 | |
| 35 DiscardableMemoryAshmemAllocator allocator_; | |
| 36 }; | |
| 37 | |
| 38 void WriteToDiscardableAshmemChunk(DiscardableAshmemChunk* memory, | |
| 39 size_t size) { | |
| 40 // Write to the first and the last pages only to avoid paging in up to 64 | |
| 41 // MBytes. | |
| 42 static_cast<char*>(memory->Memory())[0] = 'a'; | |
| 43 static_cast<char*>(memory->Memory())[size - 1] = 'a'; | |
| 44 } | |
| 45 | |
| 46 TEST_F(DiscardableMemoryAshmemAllocatorTest, Basic) { | |
| 47 const size_t size = 128; | |
| 48 scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(size)); | |
| 49 ASSERT_TRUE(memory); | |
| 50 WriteToDiscardableAshmemChunk(memory.get(), size); | |
| 51 } | |
| 52 | |
| 53 TEST_F(DiscardableMemoryAshmemAllocatorTest, ZeroAllocationIsNotSupported) { | |
| 54 scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(0)); | |
| 55 ASSERT_FALSE(memory); | |
| 56 } | |
| 57 | |
| 58 TEST_F(DiscardableMemoryAshmemAllocatorTest, TooLargeAllocationFails) { | |
| 59 scoped_ptr<DiscardableAshmemChunk> memory( | |
| 60 allocator_.Allocate(kMaxAllowedAllocationSize + 1)); | |
| 61 // Page-alignment would have caused an overflow resulting in a small | |
| 62 // allocation if the input size wasn't checked correctly. | |
| 63 ASSERT_FALSE(memory); | |
| 64 } | |
| 65 | |
| 66 TEST_F(DiscardableMemoryAshmemAllocatorTest, | |
| 67 AshmemRegionsAreNotSmallerThanRequestedSize) { | |
| 68 // The creation of the underlying ashmem region is expected to fail since | |
| 69 // there should not be enough room in the address space. When ashmem creation | |
| 70 // fails, the allocator repetitively retries by dividing the size by 2. This | |
| 71 // size should not be smaller than the size the user requested so the | |
| 72 // allocation here should just fail (and not succeed with the minimum ashmem | |
| 73 // region size). | |
| 74 scoped_ptr<DiscardableAshmemChunk> memory( | |
| 75 allocator_.Allocate(kMaxAllowedAllocationSize)); | |
| 76 ASSERT_FALSE(memory); | |
| 77 } | |
| 78 | |
| 79 TEST_F(DiscardableMemoryAshmemAllocatorTest, | |
| 80 AshmemRegionsAreAlwaysPageAligned) { | |
| 81 // Use a separate allocator here so that we can override the ashmem region | |
| 82 // size. | |
| 83 DiscardableMemoryAshmemAllocator allocator( | |
| 84 kAllocatorName, kMaxAllowedAllocationSize); | |
| 85 scoped_ptr<DiscardableAshmemChunk> memory(allocator.Allocate(kPageSize)); | |
| 86 ASSERT_TRUE(memory); | |
| 87 EXPECT_GT(kMaxAllowedAllocationSize, allocator.last_ashmem_region_size()); | |
| 88 ASSERT_TRUE(allocator.last_ashmem_region_size() % kPageSize == 0); | |
| 89 } | |
| 90 | |
| 91 TEST_F(DiscardableMemoryAshmemAllocatorTest, LargeAllocation) { | |
| 92 const size_t size = 64 * 1024 * 1024; | |
| 93 scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(size)); | |
| 94 ASSERT_TRUE(memory); | |
| 95 WriteToDiscardableAshmemChunk(memory.get(), size); | |
| 96 } | |
| 97 | |
| 98 TEST_F(DiscardableMemoryAshmemAllocatorTest, ChunksArePageAligned) { | |
| 99 scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(kPageSize)); | |
| 100 ASSERT_TRUE(memory); | |
| 101 EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory->Memory()) % kPageSize); | |
| 102 WriteToDiscardableAshmemChunk(memory.get(), kPageSize); | |
| 103 } | |
| 104 | |
| 105 TEST_F(DiscardableMemoryAshmemAllocatorTest, AllocateFreeAllocate) { | |
| 106 scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(kPageSize)); | |
| 107 // Extra allocation that prevents the region from being deleted when |memory| | |
| 108 // gets deleted. | |
| 109 scoped_ptr<DiscardableAshmemChunk> memory_lock( | |
| 110 allocator_.Allocate(kPageSize)); | |
| 111 ASSERT_TRUE(memory); | |
| 112 void* const address = memory->Memory(); | |
| 113 memory->Unlock(); // Tests that the reused chunk is being locked correctly. | |
| 114 memory.reset(); | |
| 115 memory = allocator_.Allocate(kPageSize); | |
| 116 ASSERT_TRUE(memory); | |
| 117 // The previously freed chunk should be reused. | |
| 118 EXPECT_EQ(address, memory->Memory()); | |
| 119 WriteToDiscardableAshmemChunk(memory.get(), kPageSize); | |
| 120 } | |
| 121 | |
| 122 TEST_F(DiscardableMemoryAshmemAllocatorTest, | |
| 123 FreeingWholeAshmemRegionClosesAshmem) { | |
| 124 scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(kPageSize)); | |
| 125 ASSERT_TRUE(memory); | |
| 126 const int kMagic = 0xdeadbeef; | |
| 127 *static_cast<int*>(memory->Memory()) = kMagic; | |
| 128 memory.reset(); | |
| 129 // The previous ashmem region should have been closed thus it should not be | |
| 130 // reused. | |
| 131 memory = allocator_.Allocate(kPageSize); | |
| 132 ASSERT_TRUE(memory); | |
| 133 EXPECT_NE(kMagic, *static_cast<const int*>(memory->Memory())); | |
| 134 } | |
| 135 | |
| 136 TEST_F(DiscardableMemoryAshmemAllocatorTest, AllocateUsesBestFitAlgorithm) { | |
| 137 scoped_ptr<DiscardableAshmemChunk> memory1( | |
| 138 allocator_.Allocate(3 * kPageSize)); | |
| 139 ASSERT_TRUE(memory1); | |
| 140 scoped_ptr<DiscardableAshmemChunk> memory2( | |
| 141 allocator_.Allocate(2 * kPageSize)); | |
| 142 ASSERT_TRUE(memory2); | |
| 143 scoped_ptr<DiscardableAshmemChunk> memory3( | |
| 144 allocator_.Allocate(1 * kPageSize)); | |
| 145 ASSERT_TRUE(memory3); | |
| 146 void* const address_3 = memory3->Memory(); | |
| 147 memory1.reset(); | |
| 148 // Don't free |memory2| to avoid merging the 3 blocks together. | |
| 149 memory3.reset(); | |
| 150 memory1 = allocator_.Allocate(1 * kPageSize); | |
| 151 ASSERT_TRUE(memory1); | |
| 152 // The chunk whose size is closest to the requested size should be reused. | |
| 153 EXPECT_EQ(address_3, memory1->Memory()); | |
| 154 WriteToDiscardableAshmemChunk(memory1.get(), kPageSize); | |
| 155 } | |
| 156 | |
| 157 TEST_F(DiscardableMemoryAshmemAllocatorTest, MergeFreeChunks) { | |
| 158 scoped_ptr<DiscardableAshmemChunk> memory1(allocator_.Allocate(kPageSize)); | |
| 159 ASSERT_TRUE(memory1); | |
| 160 scoped_ptr<DiscardableAshmemChunk> memory2(allocator_.Allocate(kPageSize)); | |
| 161 ASSERT_TRUE(memory2); | |
| 162 scoped_ptr<DiscardableAshmemChunk> memory3(allocator_.Allocate(kPageSize)); | |
| 163 ASSERT_TRUE(memory3); | |
| 164 scoped_ptr<DiscardableAshmemChunk> memory4(allocator_.Allocate(kPageSize)); | |
| 165 ASSERT_TRUE(memory4); | |
| 166 void* const memory1_address = memory1->Memory(); | |
| 167 memory1.reset(); | |
| 168 memory3.reset(); | |
| 169 // Freeing |memory2| (located between memory1 and memory3) should merge the | |
| 170 // three free blocks together. | |
| 171 memory2.reset(); | |
| 172 memory1 = allocator_.Allocate(3 * kPageSize); | |
| 173 EXPECT_EQ(memory1_address, memory1->Memory()); | |
| 174 } | |
| 175 | |
| 176 TEST_F(DiscardableMemoryAshmemAllocatorTest, MergeFreeChunksAdvanced) { | |
| 177 scoped_ptr<DiscardableAshmemChunk> memory1( | |
| 178 allocator_.Allocate(4 * kPageSize)); | |
| 179 ASSERT_TRUE(memory1); | |
| 180 scoped_ptr<DiscardableAshmemChunk> memory2( | |
| 181 allocator_.Allocate(4 * kPageSize)); | |
| 182 ASSERT_TRUE(memory2); | |
| 183 void* const memory1_address = memory1->Memory(); | |
| 184 memory1.reset(); | |
| 185 memory1 = allocator_.Allocate(2 * kPageSize); | |
| 186 memory2.reset(); | |
| 187 // At this point, the region should be in this state: | |
| 188 // 8 KBytes (used), 24 KBytes (free). | |
| 189 memory2 = allocator_.Allocate(6 * kPageSize); | |
| 190 EXPECT_EQ( | |
| 191 static_cast<const char*>(memory2->Memory()), | |
| 192 static_cast<const char*>(memory1_address) + 2 * kPageSize); | |
| 193 } | |
| 194 | |
| 195 TEST_F(DiscardableMemoryAshmemAllocatorTest, MergeFreeChunksAdvanced2) { | |
| 196 scoped_ptr<DiscardableAshmemChunk> memory1( | |
| 197 allocator_.Allocate(4 * kPageSize)); | |
| 198 ASSERT_TRUE(memory1); | |
| 199 scoped_ptr<DiscardableAshmemChunk> memory2( | |
| 200 allocator_.Allocate(4 * kPageSize)); | |
| 201 ASSERT_TRUE(memory2); | |
| 202 void* const memory1_address = memory1->Memory(); | |
| 203 memory1.reset(); | |
| 204 memory1 = allocator_.Allocate(2 * kPageSize); | |
| 205 scoped_ptr<DiscardableAshmemChunk> memory3( | |
| 206 allocator_.Allocate(2 * kPageSize)); | |
| 207 // At this point, the region should be in this state: | |
| 208 // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used). | |
| 209 memory3.reset(); | |
| 210 memory2.reset(); | |
| 211 // At this point, the region should be in this state: | |
| 212 // 8 KBytes (used), 24 KBytes (free). | |
| 213 memory2 = allocator_.Allocate(6 * kPageSize); | |
| 214 EXPECT_EQ( | |
| 215 static_cast<const char*>(memory2->Memory()), | |
| 216 static_cast<const char*>(memory1_address) + 2 * kPageSize); | |
| 217 } | |
| 218 | |
| 219 TEST_F(DiscardableMemoryAshmemAllocatorTest, | |
| 220 MergeFreeChunksAndDeleteAshmemRegion) { | |
| 221 scoped_ptr<DiscardableAshmemChunk> memory1( | |
| 222 allocator_.Allocate(4 * kPageSize)); | |
| 223 ASSERT_TRUE(memory1); | |
| 224 scoped_ptr<DiscardableAshmemChunk> memory2( | |
| 225 allocator_.Allocate(4 * kPageSize)); | |
| 226 ASSERT_TRUE(memory2); | |
| 227 memory1.reset(); | |
| 228 memory1 = allocator_.Allocate(2 * kPageSize); | |
| 229 scoped_ptr<DiscardableAshmemChunk> memory3( | |
| 230 allocator_.Allocate(2 * kPageSize)); | |
| 231 // At this point, the region should be in this state: | |
| 232 // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used). | |
| 233 memory1.reset(); | |
| 234 memory3.reset(); | |
| 235 // At this point, the region should be in this state: | |
| 236 // 8 KBytes (free), 8 KBytes (used), 8 KBytes (free). | |
| 237 const int kMagic = 0xdeadbeef; | |
| 238 *static_cast<int*>(memory2->Memory()) = kMagic; | |
| 239 memory2.reset(); | |
| 240 // The whole region should have been deleted. | |
| 241 memory2 = allocator_.Allocate(2 * kPageSize); | |
| 242 EXPECT_NE(kMagic, *static_cast<int*>(memory2->Memory())); | |
| 243 } | |
| 244 | |
| 245 TEST_F(DiscardableMemoryAshmemAllocatorTest, | |
| 246 TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled) { | |
| 247 // Keep |memory_1| below allocated so that the ashmem region doesn't get | |
| 248 // closed when |memory_2| is deleted. | |
| 249 scoped_ptr<DiscardableAshmemChunk> memory_1(allocator_.Allocate(64 * 1024)); | |
| 250 ASSERT_TRUE(memory_1); | |
| 251 scoped_ptr<DiscardableAshmemChunk> memory_2(allocator_.Allocate(32 * 1024)); | |
| 252 ASSERT_TRUE(memory_2); | |
| 253 void* const address = memory_2->Memory(); | |
| 254 memory_2.reset(); | |
| 255 const size_t size = 16 * 1024; | |
| 256 memory_2 = allocator_.Allocate(size); | |
| 257 ASSERT_TRUE(memory_2); | |
| 258 EXPECT_EQ(address, memory_2->Memory()); | |
| 259 WriteToDiscardableAshmemChunk(memory_2.get(), size); | |
| 260 scoped_ptr<DiscardableAshmemChunk> memory_3(allocator_.Allocate(size)); | |
| 261 // The unused tail (16 KBytes large) of the previously freed chunk should be | |
| 262 // reused. | |
| 263 EXPECT_EQ(static_cast<char*>(address) + size, memory_3->Memory()); | |
| 264 WriteToDiscardableAshmemChunk(memory_3.get(), size); | |
| 265 } | |
| 266 | |
| 267 TEST_F(DiscardableMemoryAshmemAllocatorTest, UseMultipleAshmemRegions) { | |
| 268 // Leave one page untouched at the end of the ashmem region. | |
| 269 const size_t size = kAshmemRegionSizeForTesting - kPageSize; | |
| 270 scoped_ptr<DiscardableAshmemChunk> memory1(allocator_.Allocate(size)); | |
| 271 ASSERT_TRUE(memory1); | |
| 272 WriteToDiscardableAshmemChunk(memory1.get(), size); | |
| 273 | |
| 274 scoped_ptr<DiscardableAshmemChunk> memory2( | |
| 275 allocator_.Allocate(kAshmemRegionSizeForTesting)); | |
| 276 ASSERT_TRUE(memory2); | |
| 277 WriteToDiscardableAshmemChunk(memory2.get(), kAshmemRegionSizeForTesting); | |
| 278 // The last page of the first ashmem region should be used for this | |
| 279 // allocation. | |
| 280 scoped_ptr<DiscardableAshmemChunk> memory3(allocator_.Allocate(kPageSize)); | |
| 281 ASSERT_TRUE(memory3); | |
| 282 WriteToDiscardableAshmemChunk(memory3.get(), kPageSize); | |
| 283 EXPECT_EQ(memory3->Memory(), static_cast<char*>(memory1->Memory()) + size); | |
| 284 } | |
| 285 | |
| 286 TEST_F(DiscardableMemoryAshmemAllocatorTest, | |
| 287 HighestAllocatedChunkPointerIsUpdatedWhenHighestChunkGetsSplit) { | |
| 288 // Prevents the ashmem region from getting closed when |memory2| gets freed. | |
| 289 scoped_ptr<DiscardableAshmemChunk> memory1(allocator_.Allocate(kPageSize)); | |
| 290 ASSERT_TRUE(memory1); | |
| 291 | |
| 292 scoped_ptr<DiscardableAshmemChunk> memory2( | |
| 293 allocator_.Allocate(4 * kPageSize)); | |
| 294 ASSERT_TRUE(memory2); | |
| 295 | |
| 296 memory2.reset(); | |
| 297 memory2 = allocator_.Allocate(kPageSize); | |
| 298 // There should now be a free chunk of size 3 * |kPageSize| starting at offset | |
| 299 // 2 * |kPageSize| and the pointer to the highest allocated chunk should have | |
| 300 // also been updated to |base_| + 2 * |kPageSize|. This pointer is used to | |
| 301 // maintain the container mapping a chunk address to its previous chunk and | |
| 302 // this map is in turn used while merging previous contiguous chunks. | |
| 303 | |
| 304 // Allocate more than 3 * |kPageSize| so that the free chunk of size 3 * | |
| 305 // |kPageSize| is not reused and |highest_allocated_chunk_| gets used instead. | |
| 306 scoped_ptr<DiscardableAshmemChunk> memory3( | |
| 307 allocator_.Allocate(4 * kPageSize)); | |
| 308 ASSERT_TRUE(memory3); | |
| 309 | |
| 310 // Deleting |memory3| (whose size is 4 * |kPageSize|) should result in a merge | |
| 311 // with its previous chunk which is the free chunk of size |3 * kPageSize|. | |
| 312 memory3.reset(); | |
| 313 memory3 = allocator_.Allocate((3 + 4) * kPageSize); | |
| 314 EXPECT_EQ(memory3->Memory(), | |
| 315 static_cast<const char*>(memory2->Memory()) + kPageSize); | |
| 316 } | |
| 317 | |
| 318 } // namespace internal | |
| 319 } // namespace base | |
| OLD | NEW |