OLD | NEW |
(Empty) | |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/memory/discardable_memory_allocator_android.h" |
| 6 |
| 7 #include <sys/types.h> |
| 8 #include <unistd.h> |
| 9 |
| 10 #include "base/memory/discardable_memory.h" |
| 11 #include "base/memory/scoped_ptr.h" |
| 12 #include "base/strings/string_number_conversions.h" |
| 13 #include "base/strings/string_split.h" |
| 14 #include "base/strings/stringprintf.h" |
| 15 #include "build/build_config.h" |
| 16 #include "testing/gtest/include/gtest/gtest.h" |
| 17 |
| 18 namespace base { |
| 19 namespace internal { |
| 20 |
| 21 const char kAllocatorName[] = "allocator-for-testing"; |
| 22 |
| 23 const size_t kPageSize = 4096; |
| 24 const size_t kMinAshmemRegionSize = |
| 25 DiscardableMemoryAllocator::kMinAshmemRegionSize; |
| 26 |
| 27 class DiscardableMemoryAllocatorTest : public testing::Test { |
| 28 protected: |
| 29 DiscardableMemoryAllocatorTest() : allocator_(kAllocatorName) {} |
| 30 |
| 31 DiscardableMemoryAllocator allocator_; |
| 32 }; |
| 33 |
| 34 void WriteToDiscardableMemory(DiscardableMemory* memory) { |
| 35 // Write to the first and the last pages only to avoid paging in up to 64 |
| 36 // MBytes. |
| 37 static_cast<char*>(memory->Memory())[0] = 'a'; |
| 38 static_cast<char*>(memory->Memory())[memory->Size() - 1] = 'a'; |
| 39 } |
| 40 |
| 41 TEST_F(DiscardableMemoryAllocatorTest, Basic) { |
| 42 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(128)); |
| 43 ASSERT_TRUE(memory); |
| 44 WriteToDiscardableMemory(memory.get()); |
| 45 } |
| 46 |
| 47 TEST_F(DiscardableMemoryAllocatorTest, LargeAllocation) { |
| 48 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(64 * 1024 * 1024)); |
| 49 ASSERT_TRUE(memory); |
| 50 WriteToDiscardableMemory(memory.get()); |
| 51 } |
| 52 |
| 53 TEST_F(DiscardableMemoryAllocatorTest, ChunksArePageAligned) { |
| 54 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize)); |
| 55 ASSERT_TRUE(memory); |
| 56 EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory->Memory()) % kPageSize); |
| 57 WriteToDiscardableMemory(memory.get()); |
| 58 } |
| 59 |
| 60 TEST_F(DiscardableMemoryAllocatorTest, AllocateFreeAllocate) { |
| 61 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize)); |
| 62 ASSERT_TRUE(memory); |
| 63 void* const address = memory->Memory(); |
| 64 memory->Unlock(); // Tests that the recycled chunk is being locked correctly. |
| 65 memory.reset(); |
| 66 memory = allocator_.Allocate(kPageSize); |
| 67 ASSERT_TRUE(memory); |
| 68 // The previously freed chunk should be reused. |
| 69 EXPECT_EQ(address, memory->Memory()); |
| 70 WriteToDiscardableMemory(memory.get()); |
| 71 } |
| 72 |
| 73 TEST_F(DiscardableMemoryAllocatorTest, FreeingWholeAshmemRegionClosesAshmem) { |
| 74 scoped_ptr<DiscardableMemory> memory( |
| 75 allocator_.Allocate(kMinAshmemRegionSize)); |
| 76 ASSERT_TRUE(memory); |
| 77 const int kMagic = 0xdeadbeef; |
| 78 *static_cast<int*>(memory->Memory()) = kMagic; |
| 79 memory.reset(); |
| 80 // The previous ashmem region should have been closed thus it should not be |
| 81 // recycled. |
| 82 memory = allocator_.Allocate(kPageSize); |
| 83 ASSERT_TRUE(memory); |
| 84 EXPECT_NE(kMagic, *static_cast<const int*>(memory->Memory())); |
| 85 } |
| 86 |
| 87 TEST_F(DiscardableMemoryAllocatorTest, AllocateUsesBestFitAlgorithm) { |
| 88 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(3 * kPageSize)); |
| 89 ASSERT_TRUE(memory1); |
| 90 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(2 * kPageSize)); |
| 91 ASSERT_TRUE(memory2); |
| 92 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(1 * kPageSize)); |
| 93 ASSERT_TRUE(memory3); |
| 94 void* const address_3 = memory3->Memory(); |
| 95 memory1.reset(); |
| 96 // Don't free |memory2| to avoid merging the 3 blocks together. |
| 97 memory3.reset(); |
| 98 memory1 = allocator_.Allocate(1 * kPageSize); |
| 99 ASSERT_TRUE(memory1); |
| 100 // The chunk whose size is closest to the requested size should be recycled. |
| 101 EXPECT_EQ(address_3, memory1->Memory()); |
| 102 WriteToDiscardableMemory(memory1.get()); |
| 103 } |
| 104 |
| 105 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunks) { |
| 106 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(kPageSize)); |
| 107 ASSERT_TRUE(memory1); |
| 108 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(kPageSize)); |
| 109 ASSERT_TRUE(memory2); |
| 110 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize)); |
| 111 ASSERT_TRUE(memory3); |
| 112 void* const memory1_address = memory1->Memory(); |
| 113 memory1.reset(); |
| 114 memory3.reset(); |
| 115 // Freeing |memory2| (located between memory1 and memory3) should merge the |
| 116 // three free blocks together. |
| 117 memory2.reset(); |
| 118 memory1.reset(allocator_.Allocate(3 * kPageSize).release()); |
| 119 EXPECT_EQ(memory1_address, memory1->Memory()); |
| 120 } |
| 121 |
| 122 TEST_F(DiscardableMemoryAllocatorTest, |
| 123 TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled) { |
| 124 // Keep |memory_1| below allocated so that the ashmem region doesn't get |
| 125 // closed when |memory_2| is deleted. |
| 126 scoped_ptr<DiscardableMemory> memory_1(allocator_.Allocate(64 * 1024)); |
| 127 ASSERT_TRUE(memory_1); |
| 128 scoped_ptr<DiscardableMemory> memory_2(allocator_.Allocate(32 * 1024)); |
| 129 ASSERT_TRUE(memory_2); |
| 130 void* const address = memory_2->Memory(); |
| 131 memory_2.reset(); |
| 132 memory_2 = allocator_.Allocate(16 * 1024); |
| 133 ASSERT_TRUE(memory_2); |
| 134 EXPECT_EQ(address, memory_2->Memory()); |
| 135 WriteToDiscardableMemory(memory_2.get()); |
| 136 scoped_ptr<DiscardableMemory> memory_3(allocator_.Allocate(16 * 1024)); |
| 137 // The unused tail (16 KBytes large) of the previously freed chunk should be |
| 138 // recycled. |
| 139 EXPECT_EQ(static_cast<char*>(address) + memory_2->Size(), memory_3->Memory()); |
| 140 WriteToDiscardableMemory(memory_3.get()); |
| 141 } |
| 142 |
| 143 TEST_F(DiscardableMemoryAllocatorTest, UseMultipleAshmemRegions) { |
| 144 // Leave one page untouched at the end of the ashmem region. |
| 145 scoped_ptr<DiscardableMemory> memory1( |
| 146 allocator_.Allocate(kMinAshmemRegionSize - kPageSize)); |
| 147 ASSERT_TRUE(memory1); |
| 148 WriteToDiscardableMemory(memory1.get()); |
| 149 |
| 150 scoped_ptr<DiscardableMemory> memory2( |
| 151 allocator_.Allocate(kMinAshmemRegionSize)); |
| 152 ASSERT_TRUE(memory2); |
| 153 WriteToDiscardableMemory(memory2.get()); |
| 154 // The last page of the first ashmem region should be used for this |
| 155 // allocation. |
| 156 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize)); |
| 157 ASSERT_TRUE(memory3); |
| 158 WriteToDiscardableMemory(memory3.get()); |
| 159 EXPECT_EQ(memory3->Memory(), |
| 160 static_cast<char*>(memory1->Memory()) + memory1->Size()); |
| 161 } |
| 162 |
| 163 } // namespace internal |
| 164 } // namespace base |
OLD | NEW |