Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(423)

Side by Side Diff: base/memory/discardable_memory_allocator_android_unittest.cc

Issue 25293002: Add DiscardableMemoryAllocator to work around FD limit issue. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Fix Clang build Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/discardable_memory_allocator_android.h"
6
7 #include <sys/types.h>
8 #include <unistd.h>
9
10 #include "base/memory/discardable_memory.h"
11 #include "base/memory/scoped_ptr.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "base/strings/string_split.h"
14 #include "base/strings/stringprintf.h"
15 #include "build/build_config.h"
16 #include "testing/gtest/include/gtest/gtest.h"
17
18 namespace base {
19 namespace internal {
20
21 const char kAllocatorName[] = "allocator-for-testing";
22
23 const size_t kPageSize = 4096;
24 const size_t kMinAshmemRegionSize =
25 DiscardableMemoryAllocator::kMinAshmemRegionSize;
26
27 class DiscardableMemoryAllocatorTest : public testing::Test {
28 protected:
29 DiscardableMemoryAllocatorTest() : allocator_(kAllocatorName) {}
30
31 DiscardableMemoryAllocator allocator_;
32 };
33
34 void WriteToDiscardableMemory(DiscardableMemory* memory, size_t size) {
35 // Write to the first and the last pages only to avoid paging in up to 64
36 // MBytes.
37 static_cast<char*>(memory->Memory())[0] = 'a';
38 static_cast<char*>(memory->Memory())[size - 1] = 'a';
39 }
40
41 TEST_F(DiscardableMemoryAllocatorTest, Basic) {
42 const size_t size = 128;
43 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(size));
44 ASSERT_TRUE(memory);
45 WriteToDiscardableMemory(memory.get(), size);
46 }
47
48 TEST_F(DiscardableMemoryAllocatorTest, LargeAllocation) {
49 // Note that large allocations should just use DiscardableMemoryAndroidSimple
50 // instead.
51 const size_t size = 64 * 1024 * 1024;
52 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(size));
53 ASSERT_TRUE(memory);
54 WriteToDiscardableMemory(memory.get(), size);
55 }
56
57 TEST_F(DiscardableMemoryAllocatorTest, ChunksArePageAligned) {
58 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize));
59 ASSERT_TRUE(memory);
60 EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory->Memory()) % kPageSize);
61 WriteToDiscardableMemory(memory.get(), kPageSize);
62 }
63
64 TEST_F(DiscardableMemoryAllocatorTest, AllocateFreeAllocate) {
65 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize));
66 // Extra allocation that prevents the region from being deleted when |memory|
67 // gets deleted.
68 scoped_ptr<DiscardableMemory> memory_lock(allocator_.Allocate(kPageSize));
69 ASSERT_TRUE(memory);
70 void* const address = memory->Memory();
71 memory->Unlock(); // Tests that the reused chunk is being locked correctly.
72 memory.reset();
73 memory = allocator_.Allocate(kPageSize);
74 ASSERT_TRUE(memory);
75 // The previously freed chunk should be reused.
76 EXPECT_EQ(address, memory->Memory());
77 WriteToDiscardableMemory(memory.get(), kPageSize);
78 }
79
80 TEST_F(DiscardableMemoryAllocatorTest, FreeingWholeAshmemRegionClosesAshmem) {
81 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize));
82 ASSERT_TRUE(memory);
83 const int kMagic = 0xdeadbeef;
84 *static_cast<int*>(memory->Memory()) = kMagic;
85 memory.reset();
86 // The previous ashmem region should have been closed thus it should not be
87 // reused.
88 memory = allocator_.Allocate(kPageSize);
89 ASSERT_TRUE(memory);
90 EXPECT_NE(kMagic, *static_cast<const int*>(memory->Memory()));
91 }
92
93 TEST_F(DiscardableMemoryAllocatorTest, AllocateUsesBestFitAlgorithm) {
94 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(3 * kPageSize));
95 ASSERT_TRUE(memory1);
96 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(2 * kPageSize));
97 ASSERT_TRUE(memory2);
98 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(1 * kPageSize));
99 ASSERT_TRUE(memory3);
100 void* const address_3 = memory3->Memory();
101 memory1.reset();
102 // Don't free |memory2| to avoid merging the 3 blocks together.
103 memory3.reset();
104 memory1 = allocator_.Allocate(1 * kPageSize);
105 ASSERT_TRUE(memory1);
106 // The chunk whose size is closest to the requested size should be reused.
107 EXPECT_EQ(address_3, memory1->Memory());
108 WriteToDiscardableMemory(memory1.get(), kPageSize);
109 }
110
111 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunks) {
112 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(kPageSize));
113 ASSERT_TRUE(memory1);
114 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(kPageSize));
115 ASSERT_TRUE(memory2);
116 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize));
117 ASSERT_TRUE(memory3);
118 scoped_ptr<DiscardableMemory> memory4(allocator_.Allocate(kPageSize));
119 ASSERT_TRUE(memory4);
120 void* const memory1_address = memory1->Memory();
121 memory1.reset();
122 memory3.reset();
123 // Freeing |memory2| (located between memory1 and memory3) should merge the
124 // three free blocks together.
125 memory2.reset();
126 memory1 = allocator_.Allocate(3 * kPageSize);
127 EXPECT_EQ(memory1_address, memory1->Memory());
128 }
129
130 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAdvanced) {
131 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize));
132 ASSERT_TRUE(memory1);
133 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize));
134 ASSERT_TRUE(memory2);
135 void* const memory1_address = memory1->Memory();
136 memory1.reset();
137 memory1 = allocator_.Allocate(2 * kPageSize);
138 memory2.reset();
139 // At this point, the region should be in this state:
140 // 8 KBytes (used), 24 KBytes (free).
141 memory2 = allocator_.Allocate(6 * kPageSize);
142 EXPECT_EQ(
143 static_cast<const char*>(memory2->Memory()),
144 static_cast<const char*>(memory1_address) + 2 * kPageSize);
145 }
146
147 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAdvanced2) {
148 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize));
149 ASSERT_TRUE(memory1);
150 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize));
151 ASSERT_TRUE(memory2);
152 void* const memory1_address = memory1->Memory();
153 memory1.reset();
154 memory1 = allocator_.Allocate(2 * kPageSize);
155 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(2 * kPageSize));
156 // At this point, the region should be in this state:
157 // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used).
158 memory3.reset();
159 memory2.reset();
160 // At this point, the region should be in this state:
161 // 8 KBytes (used), 24 KBytes (free).
162 memory2 = allocator_.Allocate(6 * kPageSize);
163 EXPECT_EQ(
164 static_cast<const char*>(memory2->Memory()),
165 static_cast<const char*>(memory1_address) + 2 * kPageSize);
166 }
167
168 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAndDeleteAshmemRegion) {
169 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(4 * kPageSize));
170 ASSERT_TRUE(memory1);
171 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(4 * kPageSize));
172 ASSERT_TRUE(memory2);
173 memory1.reset();
174 memory1 = allocator_.Allocate(2 * kPageSize);
175 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(2 * kPageSize));
176 // At this point, the region should be in this state:
177 // 8 KBytes (used), 8 KBytes (used), 16 KBytes (used).
178 memory1.reset();
179 memory3.reset();
180 // At this point, the region should be in this state:
181 // 8 KBytes (free), 8 KBytes (used), 8 KBytes (free).
182 const int kMagic = 0xdeadbeef;
183 *static_cast<int*>(memory2->Memory()) = kMagic;
184 memory2.reset();
185 // The whole region should have been deleted.
186 memory2 = allocator_.Allocate(2 * kPageSize);
187 EXPECT_NE(kMagic, *static_cast<int*>(memory2->Memory()));
188 }
189
190 TEST_F(DiscardableMemoryAllocatorTest,
191 TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled) {
192 // Keep |memory_1| below allocated so that the ashmem region doesn't get
193 // closed when |memory_2| is deleted.
194 scoped_ptr<DiscardableMemory> memory_1(allocator_.Allocate(64 * 1024));
195 ASSERT_TRUE(memory_1);
196 scoped_ptr<DiscardableMemory> memory_2(allocator_.Allocate(32 * 1024));
197 ASSERT_TRUE(memory_2);
198 void* const address = memory_2->Memory();
199 memory_2.reset();
200 const size_t size = 16 * 1024;
201 memory_2 = allocator_.Allocate(size);
202 ASSERT_TRUE(memory_2);
203 EXPECT_EQ(address, memory_2->Memory());
204 WriteToDiscardableMemory(memory_2.get(), size);
205 scoped_ptr<DiscardableMemory> memory_3(allocator_.Allocate(size));
206 // The unused tail (16 KBytes large) of the previously freed chunk should be
207 // reused.
208 EXPECT_EQ(static_cast<char*>(address) + size, memory_3->Memory());
209 WriteToDiscardableMemory(memory_3.get(), size);
210 }
211
212 TEST_F(DiscardableMemoryAllocatorTest, UseMultipleAshmemRegions) {
213 // Leave one page untouched at the end of the ashmem region.
214 const size_t size = kMinAshmemRegionSize - kPageSize;
215 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(size));
216 ASSERT_TRUE(memory1);
217 WriteToDiscardableMemory(memory1.get(), size);
218
219 scoped_ptr<DiscardableMemory> memory2(
220 allocator_.Allocate(kMinAshmemRegionSize));
221 ASSERT_TRUE(memory2);
222 WriteToDiscardableMemory(memory2.get(), kMinAshmemRegionSize);
223 // The last page of the first ashmem region should be used for this
224 // allocation.
225 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize));
226 ASSERT_TRUE(memory3);
227 WriteToDiscardableMemory(memory3.get(), kPageSize);
228 EXPECT_EQ(memory3->Memory(), static_cast<char*>(memory1->Memory()) + size);
229 }
230
231 } // namespace internal
232 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698