OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/memory/discardable_memory_allocator_android.h" | 5 #include "base/memory/discardable_memory_ashmem_allocator.h" |
6 | 6 |
7 #include <sys/types.h> | 7 #include <sys/types.h> |
8 #include <unistd.h> | 8 #include <unistd.h> |
9 | 9 |
10 #include "base/memory/discardable_memory.h" | 10 #include "base/memory/discardable_memory.h" |
11 #include "base/memory/scoped_ptr.h" | 11 #include "base/memory/scoped_ptr.h" |
12 #include "base/strings/string_number_conversions.h" | 12 #include "base/strings/string_number_conversions.h" |
13 #include "base/strings/string_split.h" | 13 #include "base/strings/string_split.h" |
14 #include "base/strings/stringprintf.h" | 14 #include "base/strings/stringprintf.h" |
15 #include "build/build_config.h" | 15 #include "build/build_config.h" |
16 #include "testing/gtest/include/gtest/gtest.h" | 16 #include "testing/gtest/include/gtest/gtest.h" |
17 | 17 |
18 namespace base { | 18 namespace base { |
19 namespace internal { | 19 namespace internal { |
20 | 20 |
21 const char kAllocatorName[] = "allocator-for-testing"; | 21 const char kAllocatorName[] = "allocator-for-testing"; |
22 | 22 |
23 const size_t kAshmemRegionSizeForTesting = 32 * 1024 * 1024; | 23 const size_t kAshmemRegionSizeForTesting = 32 * 1024 * 1024; |
24 const size_t kPageSize = 4096; | 24 const size_t kPageSize = 4096; |
25 | 25 |
26 const size_t kMaxAllowedAllocationSize = | 26 const size_t kMaxAllowedAllocationSize = |
27 std::numeric_limits<size_t>::max() - kPageSize + 1; | 27 std::numeric_limits<size_t>::max() - kPageSize + 1; |
28 | 28 |
29 class DiscardableMemoryAllocatorTest : public testing::Test { | 29 class DiscardableMemoryAshmemAllocatorTest : public testing::Test { |
30 protected: | 30 protected: |
31 DiscardableMemoryAllocatorTest() | 31 DiscardableMemoryAshmemAllocatorTest() |
32 : allocator_(kAllocatorName, kAshmemRegionSizeForTesting) { | 32 : allocator_(kAllocatorName, kAshmemRegionSizeForTesting) { |
33 } | 33 } |
34 | 34 |
35 DiscardableMemoryAllocator allocator_; | 35 DiscardableMemoryAshmemAllocator allocator_; |
36 }; | 36 }; |
37 | 37 |
38 void WriteToDiscardableMemory(DiscardableMemory* memory, size_t size) { | 38 void WriteToDiscardableAshmemChunk(DiscardableAshmemChunk* memory, |
| 39 size_t size) { |
39 // Write to the first and the last pages only to avoid paging in up to 64 | 40 // Write to the first and the last pages only to avoid paging in up to 64 |
40 // MBytes. | 41 // MBytes. |
41 static_cast<char*>(memory->Memory())[0] = 'a'; | 42 static_cast<char*>(memory->Memory())[0] = 'a'; |
42 static_cast<char*>(memory->Memory())[size - 1] = 'a'; | 43 static_cast<char*>(memory->Memory())[size - 1] = 'a'; |
43 } | 44 } |
44 | 45 |
45 TEST_F(DiscardableMemoryAllocatorTest, Basic) { | 46 TEST_F(DiscardableMemoryAshmemAllocatorTest, Basic) { |
46 const size_t size = 128; | 47 const size_t size = 128; |
47 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(size)); | 48 scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(size)); |
48 ASSERT_TRUE(memory); | 49 ASSERT_TRUE(memory); |
49 WriteToDiscardableMemory(memory.get(), size); | 50 WriteToDiscardableAshmemChunk(memory.get(), size); |
50 } | 51 } |
51 | 52 |
52 TEST_F(DiscardableMemoryAllocatorTest, ZeroAllocationIsNotSupported) { | 53 TEST_F(DiscardableMemoryAshmemAllocatorTest, ZeroAllocationIsNotSupported) { |
53 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(0)); | 54 scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(0)); |
54 ASSERT_FALSE(memory); | 55 ASSERT_FALSE(memory); |
55 } | 56 } |
56 | 57 |
57 TEST_F(DiscardableMemoryAllocatorTest, TooLargeAllocationFails) { | 58 TEST_F(DiscardableMemoryAshmemAllocatorTest, TooLargeAllocationFails) { |
58 scoped_ptr<DiscardableMemory> memory( | 59 scoped_ptr<DiscardableAshmemChunk> memory( |
59 allocator_.Allocate(kMaxAllowedAllocationSize + 1)); | 60 allocator_.Allocate(kMaxAllowedAllocationSize + 1)); |
60 // Page-alignment would have caused an overflow resulting in a small | 61 // Page-alignment would have caused an overflow resulting in a small |
61 // allocation if the input size wasn't checked correctly. | 62 // allocation if the input size wasn't checked correctly. |
62 ASSERT_FALSE(memory); | 63 ASSERT_FALSE(memory); |
63 } | 64 } |
64 | 65 |
65 TEST_F(DiscardableMemoryAllocatorTest, | 66 TEST_F(DiscardableMemoryAshmemAllocatorTest, |
66 AshmemRegionsAreNotSmallerThanRequestedSize) { | 67 AshmemRegionsAreNotSmallerThanRequestedSize) { |
67 // The creation of the underlying ashmem region is expected to fail since | 68 // The creation of the underlying ashmem region is expected to fail since |
68 // there should not be enough room in the address space. When ashmem creation | 69 // there should not be enough room in the address space. When ashmem creation |
69 // fails, the allocator repetitively retries by dividing the size by 2. This | 70 // fails, the allocator repetitively retries by dividing the size by 2. This |
70 // size should not be smaller than the size the user requested so the | 71 // size should not be smaller than the size the user requested so the |
71 // allocation here should just fail (and not succeed with the minimum ashmem | 72 // allocation here should just fail (and not succeed with the minimum ashmem |
72 // region size). | 73 // region size). |
73 scoped_ptr<DiscardableMemory> memory( | 74 scoped_ptr<DiscardableAshmemChunk> memory( |
74 allocator_.Allocate(kMaxAllowedAllocationSize)); | 75 allocator_.Allocate(kMaxAllowedAllocationSize)); |
75 ASSERT_FALSE(memory); | 76 ASSERT_FALSE(memory); |
76 } | 77 } |
77 | 78 |
78 TEST_F(DiscardableMemoryAllocatorTest, AshmemRegionsAreAlwaysPageAligned) { | 79 TEST_F(DiscardableMemoryAshmemAllocatorTest, |
| 80 AshmemRegionsAreAlwaysPageAligned) { |
79 // Use a separate allocator here so that we can override the ashmem region | 81 // Use a separate allocator here so that we can override the ashmem region |
80 // size. | 82 // size. |
81 DiscardableMemoryAllocator allocator( | 83 DiscardableMemoryAshmemAllocator allocator( |
82 kAllocatorName, kMaxAllowedAllocationSize); | 84 kAllocatorName, kMaxAllowedAllocationSize); |
83 scoped_ptr<DiscardableMemory> memory(allocator.Allocate(kPageSize)); | 85 scoped_ptr<DiscardableAshmemChunk> memory(allocator.Allocate(kPageSize)); |
84 ASSERT_TRUE(memory); | 86 ASSERT_TRUE(memory); |
85 EXPECT_GT(kMaxAllowedAllocationSize, allocator.last_ashmem_region_size()); | 87 EXPECT_GT(kMaxAllowedAllocationSize, allocator.last_ashmem_region_size()); |
86 ASSERT_TRUE(allocator.last_ashmem_region_size() % kPageSize == 0); | 88 ASSERT_TRUE(allocator.last_ashmem_region_size() % kPageSize == 0); |
87 } | 89 } |
88 | 90 |
89 TEST_F(DiscardableMemoryAllocatorTest, LargeAllocation) { | 91 TEST_F(DiscardableMemoryAshmemAllocatorTest, LargeAllocation) { |
90 // Note that large allocations should just use DiscardableMemoryAndroidSimple | |
91 // instead. | |
92 const size_t size = 64 * 1024 * 1024; | 92 const size_t size = 64 * 1024 * 1024; |
93 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(size)); | 93 scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(size)); |
94 ASSERT_TRUE(memory); | 94 ASSERT_TRUE(memory); |
95 WriteToDiscardableMemory(memory.get(), size); | 95 WriteToDiscardableAshmemChunk(memory.get(), size); |
96 } | 96 } |
97 | 97 |
98 TEST_F(DiscardableMemoryAllocatorTest, ChunksArePageAligned) { | 98 TEST_F(DiscardableMemoryAshmemAllocatorTest, ChunksArePageAligned) { |
99 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize)); | 99 scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(kPageSize)); |
100 ASSERT_TRUE(memory); | 100 ASSERT_TRUE(memory); |
101 EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory->Memory()) % kPageSize); | 101 EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory->Memory()) % kPageSize); |
102 WriteToDiscardableMemory(memory.get(), kPageSize); | 102 WriteToDiscardableAshmemChunk(memory.get(), kPageSize); |
103 } | 103 } |
104 | 104 |
105 TEST_F(DiscardableMemoryAllocatorTest, AllocateFreeAllocate) { | 105 TEST_F(DiscardableMemoryAshmemAllocatorTest, AllocateFreeAllocate) { |
106 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize)); | 106 scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(kPageSize)); |
107 // Extra allocation that prevents the region from being deleted when |memory| | 107 // Extra allocation that prevents the region from being deleted when |memory| |
108 // gets deleted. | 108 // gets deleted. |
109 scoped_ptr<DiscardableMemory> memory_lock(allocator_.Allocate(kPageSize)); | 109 scoped_ptr<DiscardableAshmemChunk> memory_lock( |
| 110 allocator_.Allocate(kPageSize)); |
110 ASSERT_TRUE(memory); | 111 ASSERT_TRUE(memory); |
111 void* const address = memory->Memory(); | 112 void* const address = memory->Memory(); |
112 memory->Unlock(); // Tests that the reused chunk is being locked correctly. | 113 memory->Unlock(); // Tests that the reused chunk is being locked correctly. |
113 memory.reset(); | 114 memory.reset(); |
114 memory = allocator_.Allocate(kPageSize); | 115 memory = allocator_.Allocate(kPageSize); |
115 ASSERT_TRUE(memory); | 116 ASSERT_TRUE(memory); |
116 // The previously freed chunk should be reused. | 117 // The previously freed chunk should be reused. |
117 EXPECT_EQ(address, memory->Memory()); | 118 EXPECT_EQ(address, memory->Memory()); |
118 WriteToDiscardableMemory(memory.get(), kPageSize); | 119 WriteToDiscardableAshmemChunk(memory.get(), kPageSize); |
119 } | 120 } |
120 | 121 |
121 TEST_F(DiscardableMemoryAllocatorTest, FreeingWholeAshmemRegionClosesAshmem) { | 122 TEST_F(DiscardableMemoryAshmemAllocatorTest, |
122 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize)); | 123 FreeingWholeAshmemRegionClosesAshmem) { |
| 124 scoped_ptr<DiscardableAshmemChunk> memory(allocator_.Allocate(kPageSize)); |
123 ASSERT_TRUE(memory); | 125 ASSERT_TRUE(memory); |
124 const int kMagic = 0xdeadbeef; | 126 const int kMagic = 0xdeadbeef; |
125 *static_cast<int*>(memory->Memory()) = kMagic; | 127 *static_cast<int*>(memory->Memory()) = kMagic; |
126 memory.reset(); | 128 memory.reset(); |
127 // The previous ashmem region should have been closed thus it should not be | 129 // The previous ashmem region should have been closed thus it should not be |
128 // reused. | 130 // reused. |
129 memory = allocator_.Allocate(kPageSize); | 131 memory = allocator_.Allocate(kPageSize); |
130 ASSERT_TRUE(memory); | 132 ASSERT_TRUE(memory); |
131 EXPECT_NE(kMagic, *static_cast<const int*>(memory->Memory())); | 133 EXPECT_NE(kMagic, *static_cast<const int*>(memory->Memory())); |
132 } | 134 } |
133 | 135 |
134 TEST_F(DiscardableMemoryAllocatorTest, AllocateUsesBestFitAlgorithm) { | 136 TEST_F(DiscardableMemoryAshmemAllocatorTest, AllocateUsesBestFitAlgorithm) { |
135 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(3 * kPageSize)); | 137 scoped_ptr<DiscardableAshmemChunk> memory1( |
| 138 allocator_.Allocate(3 * kPageSize)); |
136 ASSERT_TRUE(memory1); | 139 ASSERT_TRUE(memory1); |
137 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(2 * kPageSize)); | 140 scoped_ptr<DiscardableAshmemChunk> memory2( |
| 141 allocator_.Allocate(2 * kPageSize)); |
138 ASSERT_TRUE(memory2); | 142 ASSERT_TRUE(memory2); |
139 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(1 * kPageSize)); | 143 scoped_ptr<DiscardableAshmemChunk> memory3( |
| 144 allocator_.Allocate(1 * kPageSize)); |
140 ASSERT_TRUE(memory3); | 145 ASSERT_TRUE(memory3); |
141 void* const address_3 = memory3->Memory(); | 146 void* const address_3 = memory3->Memory(); |
142 memory1.reset(); | 147 memory1.reset(); |
143 // Don't free |memory2| to avoid merging the 3 blocks together. | 148 // Don't free |memory2| to avoid merging the 3 blocks together. |
144 memory3.reset(); | 149 memory3.reset(); |
145 memory1 = allocator_.Allocate(1 * kPageSize); | 150 memory1 = allocator_.Allocate(1 * kPageSize); |
146 ASSERT_TRUE(memory1); | 151 ASSERT_TRUE(memory1); |
147 // The chunk whose size is closest to the requested size should be reused. | 152 // The chunk whose size is closest to the requested size should be reused. |
148 EXPECT_EQ(address_3, memory1->Memory()); | 153 EXPECT_EQ(address_3, memory1->Memory()); |
149 WriteToDiscardableMemory(memory1.get(), kPageSize); | 154 WriteToDiscardableAshmemChunk(memory1.get(), kPageSize); |
150 } | 155 } |
151 | 156 |
152 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunks) { | 157 TEST_F(DiscardableMemoryAshmemAllocatorTest, MergeFreeChunks) { |
153 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(kPageSize)); | 158 scoped_ptr<DiscardableAshmemChunk> memory1(allocator_.Allocate(kPageSize)); |
154 ASSERT_TRUE(memory1); | 159 ASSERT_TRUE(memory1); |
155 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(kPageSize)); | 160 scoped_ptr<DiscardableAshmemChunk> memory2(allocator_.Allocate(kPageSize)); |
156 ASSERT_TRUE(memory2); | 161 ASSERT_TRUE(memory2); |
157 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize)); | 162 scoped_ptr<DiscardableAshmemChunk> memory3(allocator_.Allocate(kPageSize)); |
158 ASSERT_TRUE(memory3); | 163 ASSERT_TRUE(memory3); |
159 scoped_ptr<DiscardableMemory> memory4(allocator_.Allocate(kPageSize)); | 164 scoped_ptr<DiscardableAshmemChunk> memory4(allocator_.Allocate(kPageSize)); |
160 ASSERT_TRUE(memory4); | 165 ASSERT_TRUE(memory4); |
161 void* const memory1_address = memory1->Memory(); | 166 void* const memory1_address = memory1->Memory(); |
162 memory1.reset(); | 167 memory1.reset(); |
163 memory3.reset(); | 168 memory3.reset(); |
164 // Freeing |memory2| (located between memory1 and memory3) should merge the | 169 // Freeing |memory2| (located between memory1 and memory3) should merge the |
165 // three free blocks together. | 170 // three free blocks together. |
166 memory2.reset(); | 171 memory2.reset(); |
167 memory1 = allocator_.Allocate(3 * kPageSize); | 172 memory1 = allocator_.Allocate(3 * kPageSize); |
168 EXPECT_EQ(memory1_address, memory1->Memory()); | 173 EXPECT_EQ(memory1_address, memory1->Memory()); |
169 } | 174 } |
170 | 175 |
171 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAdvanced) { | 176 TEST_F(DiscardableMemoryAshmemAllocatorTest, MergeFreeChunksAdvanced) { |
172 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize)); | 177 scoped_ptr<DiscardableAshmemChunk> memory1( |
| 178 allocator_.Allocate(4 * kPageSize)); |
173 ASSERT_TRUE(memory1); | 179 ASSERT_TRUE(memory1); |
174 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize)); | 180 scoped_ptr<DiscardableAshmemChunk> memory2( |
| 181 allocator_.Allocate(4 * kPageSize)); |
175 ASSERT_TRUE(memory2); | 182 ASSERT_TRUE(memory2); |
176 void* const memory1_address = memory1->Memory(); | 183 void* const memory1_address = memory1->Memory(); |
177 memory1.reset(); | 184 memory1.reset(); |
178 memory1 = allocator_.Allocate(2 * kPageSize); | 185 memory1 = allocator_.Allocate(2 * kPageSize); |
179 memory2.reset(); | 186 memory2.reset(); |
180 // At this point, the region should be in this state: | 187 // At this point, the region should be in this state: |
181 // 8 KBytes (used), 24 KBytes (free). | 188 // 8 KBytes (used), 24 KBytes (free). |
182 memory2 = allocator_.Allocate(6 * kPageSize); | 189 memory2 = allocator_.Allocate(6 * kPageSize); |
183 EXPECT_EQ( | 190 EXPECT_EQ( |
184 static_cast<const char*>(memory2->Memory()), | 191 static_cast<const char*>(memory2->Memory()), |
185 static_cast<const char*>(memory1_address) + 2 * kPageSize); | 192 static_cast<const char*>(memory1_address) + 2 * kPageSize); |
186 } | 193 } |
187 | 194 |
188 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAdvanced2) { | 195 TEST_F(DiscardableMemoryAshmemAllocatorTest, MergeFreeChunksAdvanced2) { |
189 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize)); | 196 scoped_ptr<DiscardableAshmemChunk> memory1( |
| 197 allocator_.Allocate(4 * kPageSize)); |
190 ASSERT_TRUE(memory1); | 198 ASSERT_TRUE(memory1); |
191 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize)); | 199 scoped_ptr<DiscardableAshmemChunk> memory2( |
| 200 allocator_.Allocate(4 * kPageSize)); |
192 ASSERT_TRUE(memory2); | 201 ASSERT_TRUE(memory2); |
193 void* const memory1_address = memory1->Memory(); | 202 void* const memory1_address = memory1->Memory(); |
194 memory1.reset(); | 203 memory1.reset(); |
195 memory1 = allocator_.Allocate(2 * kPageSize); | 204 memory1 = allocator_.Allocate(2 * kPageSize); |
196 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(2 * kPageSize)); | 205 scoped_ptr<DiscardableAshmemChunk> memory3( |
| 206 allocator_.Allocate(2 * kPageSize)); |
197 // At this point, the region should be in this state: | 207 // At this point, the region should be in this state: |
198 // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used). | 208 // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used). |
199 memory3.reset(); | 209 memory3.reset(); |
200 memory2.reset(); | 210 memory2.reset(); |
201 // At this point, the region should be in this state: | 211 // At this point, the region should be in this state: |
202 // 8 KBytes (used), 24 KBytes (free). | 212 // 8 KBytes (used), 24 KBytes (free). |
203 memory2 = allocator_.Allocate(6 * kPageSize); | 213 memory2 = allocator_.Allocate(6 * kPageSize); |
204 EXPECT_EQ( | 214 EXPECT_EQ( |
205 static_cast<const char*>(memory2->Memory()), | 215 static_cast<const char*>(memory2->Memory()), |
206 static_cast<const char*>(memory1_address) + 2 * kPageSize); | 216 static_cast<const char*>(memory1_address) + 2 * kPageSize); |
207 } | 217 } |
208 | 218 |
209 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAndDeleteAshmemRegion) { | 219 TEST_F(DiscardableMemoryAshmemAllocatorTest, |
210 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize)); | 220 MergeFreeChunksAndDeleteAshmemRegion) { |
| 221 scoped_ptr<DiscardableAshmemChunk> memory1( |
| 222 allocator_.Allocate(4 * kPageSize)); |
211 ASSERT_TRUE(memory1); | 223 ASSERT_TRUE(memory1); |
212 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize)); | 224 scoped_ptr<DiscardableAshmemChunk> memory2( |
| 225 allocator_.Allocate(4 * kPageSize)); |
213 ASSERT_TRUE(memory2); | 226 ASSERT_TRUE(memory2); |
214 memory1.reset(); | 227 memory1.reset(); |
215 memory1 = allocator_.Allocate(2 * kPageSize); | 228 memory1 = allocator_.Allocate(2 * kPageSize); |
216 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(2 * kPageSize)); | 229 scoped_ptr<DiscardableAshmemChunk> memory3( |
| 230 allocator_.Allocate(2 * kPageSize)); |
217 // At this point, the region should be in this state: | 231 // At this point, the region should be in this state: |
218 // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used). | 232 // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used). |
219 memory1.reset(); | 233 memory1.reset(); |
220 memory3.reset(); | 234 memory3.reset(); |
221 // At this point, the region should be in this state: | 235 // At this point, the region should be in this state: |
222 // 8 KBytes (free), 8 KBytes (used), 8 KBytes (free). | 236 // 8 KBytes (free), 8 KBytes (used), 8 KBytes (free). |
223 const int kMagic = 0xdeadbeef; | 237 const int kMagic = 0xdeadbeef; |
224 *static_cast<int*>(memory2->Memory()) = kMagic; | 238 *static_cast<int*>(memory2->Memory()) = kMagic; |
225 memory2.reset(); | 239 memory2.reset(); |
226 // The whole region should have been deleted. | 240 // The whole region should have been deleted. |
227 memory2 = allocator_.Allocate(2 * kPageSize); | 241 memory2 = allocator_.Allocate(2 * kPageSize); |
228 EXPECT_NE(kMagic, *static_cast<int*>(memory2->Memory())); | 242 EXPECT_NE(kMagic, *static_cast<int*>(memory2->Memory())); |
229 } | 243 } |
230 | 244 |
231 TEST_F(DiscardableMemoryAllocatorTest, | 245 TEST_F(DiscardableMemoryAshmemAllocatorTest, |
232 TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled) { | 246 TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled) { |
233 // Keep |memory_1| below allocated so that the ashmem region doesn't get | 247 // Keep |memory_1| below allocated so that the ashmem region doesn't get |
234 // closed when |memory_2| is deleted. | 248 // closed when |memory_2| is deleted. |
235 scoped_ptr<DiscardableMemory> memory_1(allocator_.Allocate(64 * 1024)); | 249 scoped_ptr<DiscardableAshmemChunk> memory_1(allocator_.Allocate(64 * 1024)); |
236 ASSERT_TRUE(memory_1); | 250 ASSERT_TRUE(memory_1); |
237 scoped_ptr<DiscardableMemory> memory_2(allocator_.Allocate(32 * 1024)); | 251 scoped_ptr<DiscardableAshmemChunk> memory_2(allocator_.Allocate(32 * 1024)); |
238 ASSERT_TRUE(memory_2); | 252 ASSERT_TRUE(memory_2); |
239 void* const address = memory_2->Memory(); | 253 void* const address = memory_2->Memory(); |
240 memory_2.reset(); | 254 memory_2.reset(); |
241 const size_t size = 16 * 1024; | 255 const size_t size = 16 * 1024; |
242 memory_2 = allocator_.Allocate(size); | 256 memory_2 = allocator_.Allocate(size); |
243 ASSERT_TRUE(memory_2); | 257 ASSERT_TRUE(memory_2); |
244 EXPECT_EQ(address, memory_2->Memory()); | 258 EXPECT_EQ(address, memory_2->Memory()); |
245 WriteToDiscardableMemory(memory_2.get(), size); | 259 WriteToDiscardableAshmemChunk(memory_2.get(), size); |
246 scoped_ptr<DiscardableMemory> memory_3(allocator_.Allocate(size)); | 260 scoped_ptr<DiscardableAshmemChunk> memory_3(allocator_.Allocate(size)); |
247 // The unused tail (16 KBytes large) of the previously freed chunk should be | 261 // The unused tail (16 KBytes large) of the previously freed chunk should be |
248 // reused. | 262 // reused. |
249 EXPECT_EQ(static_cast<char*>(address) + size, memory_3->Memory()); | 263 EXPECT_EQ(static_cast<char*>(address) + size, memory_3->Memory()); |
250 WriteToDiscardableMemory(memory_3.get(), size); | 264 WriteToDiscardableAshmemChunk(memory_3.get(), size); |
251 } | 265 } |
252 | 266 |
253 TEST_F(DiscardableMemoryAllocatorTest, UseMultipleAshmemRegions) { | 267 TEST_F(DiscardableMemoryAshmemAllocatorTest, UseMultipleAshmemRegions) { |
254 // Leave one page untouched at the end of the ashmem region. | 268 // Leave one page untouched at the end of the ashmem region. |
255 const size_t size = kAshmemRegionSizeForTesting - kPageSize; | 269 const size_t size = kAshmemRegionSizeForTesting - kPageSize; |
256 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(size)); | 270 scoped_ptr<DiscardableAshmemChunk> memory1(allocator_.Allocate(size)); |
257 ASSERT_TRUE(memory1); | 271 ASSERT_TRUE(memory1); |
258 WriteToDiscardableMemory(memory1.get(), size); | 272 WriteToDiscardableAshmemChunk(memory1.get(), size); |
259 | 273 |
260 scoped_ptr<DiscardableMemory> memory2( | 274 scoped_ptr<DiscardableAshmemChunk> memory2( |
261 allocator_.Allocate(kAshmemRegionSizeForTesting)); | 275 allocator_.Allocate(kAshmemRegionSizeForTesting)); |
262 ASSERT_TRUE(memory2); | 276 ASSERT_TRUE(memory2); |
263 WriteToDiscardableMemory(memory2.get(), kAshmemRegionSizeForTesting); | 277 WriteToDiscardableAshmemChunk(memory2.get(), kAshmemRegionSizeForTesting); |
264 // The last page of the first ashmem region should be used for this | 278 // The last page of the first ashmem region should be used for this |
265 // allocation. | 279 // allocation. |
266 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize)); | 280 scoped_ptr<DiscardableAshmemChunk> memory3(allocator_.Allocate(kPageSize)); |
267 ASSERT_TRUE(memory3); | 281 ASSERT_TRUE(memory3); |
268 WriteToDiscardableMemory(memory3.get(), kPageSize); | 282 WriteToDiscardableAshmemChunk(memory3.get(), kPageSize); |
269 EXPECT_EQ(memory3->Memory(), static_cast<char*>(memory1->Memory()) + size); | 283 EXPECT_EQ(memory3->Memory(), static_cast<char*>(memory1->Memory()) + size); |
270 } | 284 } |
271 | 285 |
272 TEST_F(DiscardableMemoryAllocatorTest, | 286 TEST_F(DiscardableMemoryAshmemAllocatorTest, |
273 HighestAllocatedChunkPointerIsUpdatedWhenHighestChunkGetsSplit) { | 287 HighestAllocatedChunkPointerIsUpdatedWhenHighestChunkGetsSplit) { |
274 // Prevents the ashmem region from getting closed when |memory2| gets freed. | 288 // Prevents the ashmem region from getting closed when |memory2| gets freed. |
275 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(kPageSize)); | 289 scoped_ptr<DiscardableAshmemChunk> memory1(allocator_.Allocate(kPageSize)); |
276 ASSERT_TRUE(memory1); | 290 ASSERT_TRUE(memory1); |
277 | 291 |
278 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize)); | 292 scoped_ptr<DiscardableAshmemChunk> memory2( |
| 293 allocator_.Allocate(4 * kPageSize)); |
279 ASSERT_TRUE(memory2); | 294 ASSERT_TRUE(memory2); |
280 | 295 |
281 memory2.reset(); | 296 memory2.reset(); |
282 memory2 = allocator_.Allocate(kPageSize); | 297 memory2 = allocator_.Allocate(kPageSize); |
283 // There should now be a free chunk of size 3 * |kPageSize| starting at offset | 298 // There should now be a free chunk of size 3 * |kPageSize| starting at offset |
284 // 2 * |kPageSize| and the pointer to the highest allocated chunk should have | 299 // 2 * |kPageSize| and the pointer to the highest allocated chunk should have |
285 // also been updated to |base_| + 2 * |kPageSize|. This pointer is used to | 300 // also been updated to |base_| + 2 * |kPageSize|. This pointer is used to |
286 // maintain the container mapping a chunk address to its previous chunk and | 301 // maintain the container mapping a chunk address to its previous chunk and |
287 // this map is in turn used while merging previous contiguous chunks. | 302 // this map is in turn used while merging previous contiguous chunks. |
288 | 303 |
289 // Allocate more than 3 * |kPageSize| so that the free chunk of size 3 * | 304 // Allocate more than 3 * |kPageSize| so that the free chunk of size 3 * |
290 // |kPageSize| is not reused and |highest_allocated_chunk_| gets used instead. | 305 // |kPageSize| is not reused and |highest_allocated_chunk_| gets used instead. |
291 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(4 * kPageSize)); | 306 scoped_ptr<DiscardableAshmemChunk> memory3( |
| 307 allocator_.Allocate(4 * kPageSize)); |
292 ASSERT_TRUE(memory3); | 308 ASSERT_TRUE(memory3); |
293 | 309 |
294 // Deleting |memory3| (whose size is 4 * |kPageSize|) should result in a merge | 310 // Deleting |memory3| (whose size is 4 * |kPageSize|) should result in a merge |
295 // with its previous chunk which is the free chunk of size |3 * kPageSize|. | 311 // with its previous chunk which is the free chunk of size |3 * kPageSize|. |
296 memory3.reset(); | 312 memory3.reset(); |
297 memory3 = allocator_.Allocate((3 + 4) * kPageSize); | 313 memory3 = allocator_.Allocate((3 + 4) * kPageSize); |
298 EXPECT_EQ(memory3->Memory(), | 314 EXPECT_EQ(memory3->Memory(), |
299 static_cast<const char*>(memory2->Memory()) + kPageSize); | 315 static_cast<const char*>(memory2->Memory()) + kPageSize); |
300 } | 316 } |
301 | 317 |
302 } // namespace internal | 318 } // namespace internal |
303 } // namespace base | 319 } // namespace base |
OLD | NEW |