Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(66)

Side by Side Diff: base/memory/discardable_memory_allocator_unittest.cc

Issue 25293002: Add DiscardableMemoryAllocator to work around FD limit issue. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Address reviewers' comments Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/discardable_memory_allocator.h"
6
7 #include "base/logging.h"
8 #include "base/memory/discardable_memory.h"
9 #include "base/memory/scoped_ptr.h"
10 #include "build/build_config.h"
11 #include "testing/gtest/include/gtest/gtest.h"
12
13 #if defined(OS_ANDROID)
14 #include <sys/types.h>
15 #include <unistd.h>
16
17 #include <fstream>
18 #include <iostream>
19 #include <string>
20 #include <vector>
21
22 #include "base/strings/string_number_conversions.h"
23 #include "base/strings/string_split.h"
24 #include "base/strings/stringprintf.h"
25 #endif
26
27 namespace base {
28
29 scoped_ptr<DiscardableMemoryAllocator>
30 CreateDiscardableMemoryAllocatorForTesting(const std::string& name);
31
32 namespace {
33
34 const char kAllocatorName[] = "allocator-for-testing";
35 const size_t kPageSize = 4096;
36
37 scoped_ptr<DiscardableMemoryAllocator> CreateAllocator() {
38 return CreateDiscardableMemoryAllocatorForTesting(kAllocatorName);
39 }
40
41 void WriteToDiscardableMemory(DiscardableMemory* memory) {
42 DCHECK_EQ(0U, memory->Size() % sizeof(int));
43 // Write to the first and the last pages only to avoid paging in up to 64
44 // MBytes.
45 static_cast<int*>(memory->Memory())[0] = 1;
46 static_cast<int*>(memory->Memory())[memory->Size() / sizeof(int) - 1] = 1;
47 }
48
49 TEST(DiscardableMemoryAllocatorTest, Basic) {
50 const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
51 ASSERT_TRUE(allocator);
52 scoped_ptr<DiscardableMemory> memory(allocator->Allocate(128 * sizeof(int)));
53 ASSERT_TRUE(memory);
54 WriteToDiscardableMemory(memory.get());
55 }
56
57 TEST(DiscardableMemoryAllocatorTest, LargeAllocation) {
58 const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
59 ASSERT_TRUE(allocator);
60 scoped_ptr<DiscardableMemory> memory(allocator->Allocate(64 * 1024 * 1024));
61 ASSERT_TRUE(memory);
62 WriteToDiscardableMemory(memory.get());
63 }
64
65 TEST(DiscardableMemoryAllocatorTest, ChunksArePageAligned) {
66 const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
67 ASSERT_TRUE(allocator);
68 scoped_ptr<DiscardableMemory> memory(allocator->Allocate(kPageSize));
69 ASSERT_TRUE(memory);
70 EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory->Memory()) % kPageSize);
71 WriteToDiscardableMemory(memory.get());
72 }
73
74 #if defined(OS_ANDROID)
75
76 const size_t kAshmemRegionSize = 32 * 1024 * 1024;
77
78 TEST(DiscardableMemoryAllocatorTest, AllocateFreeAllocate) {
79 const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
80 ASSERT_TRUE(allocator);
81 scoped_ptr<DiscardableMemory> memory(allocator->Allocate(kPageSize));
82 ASSERT_TRUE(memory);
83 void* const address = memory->Memory();
84 memory->Unlock(); // Tests that the recycled chunk is being locked correctly.
85 memory.reset();
86 memory = allocator->Allocate(kPageSize);
87 ASSERT_TRUE(memory);
88 // The previously freed chunk should be reused.
89 EXPECT_EQ(address, memory->Memory());
90 WriteToDiscardableMemory(memory.get());
91 }
92
93 TEST(DiscardableMemoryAllocatorTest, FreeingWholeAshmemRegionClosesAshmem) {
94 const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
95 ASSERT_TRUE(allocator);
96 scoped_ptr<DiscardableMemory> memory(allocator->Allocate(kAshmemRegionSize));
97 ASSERT_TRUE(memory);
98 const int kMagic = 0xdeadbeef;
99 *static_cast<int*>(memory->Memory()) = kMagic;
100 memory.reset();
101 // The previous ashmem region should have been closed thus it should not be
102 // recycled.
103 memory = allocator->Allocate(kPageSize);
104 ASSERT_TRUE(memory);
105 EXPECT_NE(kMagic, *static_cast<const int*>(memory->Memory()));
106 }
107
108 TEST(DiscardableMemoryAllocatorTest, AllocateUsesBestFitAlgorithm) {
109 const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
110 ASSERT_TRUE(allocator);
111 scoped_ptr<DiscardableMemory> memory1(allocator->Allocate(3 * kPageSize));
112 ASSERT_TRUE(memory1);
113 scoped_ptr<DiscardableMemory> memory2(allocator->Allocate(2 * kPageSize));
114 ASSERT_TRUE(memory2);
115 scoped_ptr<DiscardableMemory> memory3(allocator->Allocate(1 * kPageSize));
116 ASSERT_TRUE(memory3);
117 void* const address_3 = memory3->Memory();
118 memory1.reset();
119 // Don't free |memory2| to avoid merging the 3 blocks together.
120 memory3.reset();
121 memory1 = allocator->Allocate(1 * kPageSize);
122 ASSERT_TRUE(memory1);
123 // The chunk whose size is closest to the requested size should be recycled.
124 EXPECT_EQ(address_3, memory1->Memory());
125 WriteToDiscardableMemory(memory1.get());
126 }
127
128 TEST(DiscardableMemoryAllocatorTest, MergeFreeChunks) {
129 const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
130 ASSERT_TRUE(allocator);
131 scoped_ptr<DiscardableMemory> memory1(allocator->Allocate(kPageSize));
132 ASSERT_TRUE(memory1);
133 scoped_ptr<DiscardableMemory> memory2(allocator->Allocate(kPageSize));
134 ASSERT_TRUE(memory2);
135 scoped_ptr<DiscardableMemory> memory3(allocator->Allocate(kPageSize));
136 ASSERT_TRUE(memory3);
137 void* const memory1_address = memory1->Memory();
138 memory1.reset();
139 memory3.reset();
140 // Freeing |memory2| (located between memory1 and memory3) should merge the
141 // three free blocks together.
142 memory2.reset();
143 memory1.reset(allocator->Allocate(3 * kPageSize).release());
144 EXPECT_EQ(memory1_address, memory1->Memory());
145 }
146
147 TEST(DiscardableMemoryAllocatorTest,
148 TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled) {
149 const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
150 ASSERT_TRUE(allocator);
151 // Keep |memory_1| below allocated so that the ashmem region doesn't get
152 // closed when |memory_2| is deleted.
153 scoped_ptr<DiscardableMemory> memory_1(allocator->Allocate(64 * 1024));
154 ASSERT_TRUE(memory_1);
155 scoped_ptr<DiscardableMemory> memory_2(allocator->Allocate(32 * 1024));
156 ASSERT_TRUE(memory_2);
157 void* const address = memory_2->Memory();
158 memory_2.reset();
159 memory_2 = allocator->Allocate(16 * 1024);
160 ASSERT_TRUE(memory_2);
161 EXPECT_EQ(address, memory_2->Memory());
162 WriteToDiscardableMemory(memory_2.get());
163 scoped_ptr<DiscardableMemory> memory_3(allocator->Allocate(16 * 1024));
164 // The unused tail (16 KBytes large) of the previously freed chunk should be
165 // recycled.
166 EXPECT_EQ(static_cast<char*>(address) + memory_2->Size(), memory_3->Memory());
167 WriteToDiscardableMemory(memory_3.get());
168 }
169
170 TEST(DiscardableMemoryAllocatorTest, UseMultipleAshmemRegions) {
171 const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
172 ASSERT_TRUE(allocator);
173 const size_t kAshmemRegionSize = 32 * 1024 * 1024;
174 // Leave one page untouched at the end of the ashmem region.
175 scoped_ptr<DiscardableMemory> memory1(
176 allocator->Allocate(kAshmemRegionSize - kPageSize));
177 ASSERT_TRUE(memory1);
178 WriteToDiscardableMemory(memory1.get());
179
180 scoped_ptr<DiscardableMemory> memory2(
181 allocator->Allocate(kAshmemRegionSize));
182 ASSERT_TRUE(memory2);
183 WriteToDiscardableMemory(memory2.get());
184 // The last page of the first ashmem region should be used for this
185 // allocation.
186 scoped_ptr<DiscardableMemory> memory3(
187 allocator->Allocate(kPageSize));
188 ASSERT_TRUE(memory3);
189 WriteToDiscardableMemory(memory3.get());
190 EXPECT_EQ(memory3->Memory(),
191 static_cast<char*>(memory1->Memory()) + memory1->Size());
192 }
193
194 // Returns -1 if an error happened during parsing.
195 int GetAllocatorDirtyMemoryInKBytes() {
196 const std::string smaps_path = StringPrintf("/proc/%d/smaps", getpid());
197 std::ifstream stream(smaps_path.c_str());
198 if (!stream.good())
199 return -1;
200 std::vector<std::string> lines;
201 for (std::string line; std::getline(stream, line); lines.push_back(line));
202 int dirty_kbytes = 0;
203 for (std::vector<std::string>::const_iterator it = lines.begin();
204 it != lines.end(); ++it) {
205 if (it->find(kAllocatorName) == std::string::npos)
206 continue;
207 const std::string& private_dirty_line = it[7];
208 if (private_dirty_line.find("Private_Dirty") == std::string::npos)
209 return -1;
210 std::vector<std::string> tokens;
211 SplitString(private_dirty_line, ' ', &tokens);
212 if (tokens.size() < 3U)
213 return -1;
214 int size;
215 if (!base::StringToInt(tokens[tokens.size() - 2], &size))
216 return -1;
217 dirty_kbytes += size;
218 it += 6;
219 }
220 return dirty_kbytes;
221 }
222
223 TEST(DiscardableMemoryAllocatorTest, PagesAreCommittedLazily) {
224 const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
225 ASSERT_TRUE(allocator);
226 scoped_ptr<DiscardableMemory> memory(allocator->Allocate(2 * 4096));
227 ASSERT_TRUE(memory);
228 EXPECT_EQ(0, GetAllocatorDirtyMemoryInKBytes());
229 static_cast<char*>(memory->Memory())[0] = 'a';
230 EXPECT_EQ(4, GetAllocatorDirtyMemoryInKBytes());
231 // Write to the second page.
232 static_cast<char*>(memory->Memory())[kPageSize] = 'b';
233 EXPECT_EQ(8, GetAllocatorDirtyMemoryInKBytes());
234 }
235
236 #endif // OS_ANDROID
237
238 } // namespace
239 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698