Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1263)

Side by Side Diff: base/memory/discardable_memory_allocator_android_unittest.cc

Issue 25293002: Add DiscardableMemoryAllocator to work around FD limit issue. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: s/actual_size/recycled_chunk_size Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/discardable_memory_allocator_android.h"
6
7 #include <sys/types.h>
8 #include <unistd.h>
9
10 #include <fstream>
11 #include <iostream>
12 #include <string>
13 #include <vector>
14
15 #include "base/logging.h"
16 #include "base/memory/discardable_memory.h"
17 #include "base/memory/scoped_ptr.h"
18 #include "base/strings/string_number_conversions.h"
19 #include "base/strings/string_split.h"
20 #include "base/strings/stringprintf.h"
21 #include "build/build_config.h"
22 #include "testing/gtest/include/gtest/gtest.h"
23
24 namespace base {
25 namespace internal {
26
27 const char kAllocatorName[] = "allocator-for-testing";
28
29 const size_t kPageSize = 4096;
30 const size_t kMinAshmemRegionSize =
31 DiscardableMemoryAllocator::kMinAshmemRegionSize;
32
33 class DiscardableMemoryAllocatorTest : public testing::Test {
34 protected:
35 DiscardableMemoryAllocatorTest() : allocator_(kAllocatorName) {}
36
37 DiscardableMemoryAllocator allocator_;
38 };
39
40 void WriteToDiscardableMemory(DiscardableMemory* memory) {
41 DCHECK_EQ(0U, memory->Size() % sizeof(int));
42 // Write to the first and the last pages only to avoid paging in up to 64
43 // MBytes.
44 static_cast<int*>(memory->Memory())[0] = 1;
45 static_cast<int*>(memory->Memory())[memory->Size() / sizeof(int) - 1] = 1;
pasko 2013/10/25 15:21:13 if you wrote 'char', you would not have had to div
Philippe 2013/10/28 09:44:43 Yeah good point. I'm also a big supporter of sizeo
46 }
47
48 TEST_F(DiscardableMemoryAllocatorTest, Basic) {
49 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(128 * sizeof(int)));
50 ASSERT_TRUE(memory);
51 WriteToDiscardableMemory(memory.get());
52 }
53
54 TEST_F(DiscardableMemoryAllocatorTest, LargeAllocation) {
55 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(64 * 1024 * 1024));
56 ASSERT_TRUE(memory);
57 WriteToDiscardableMemory(memory.get());
58 }
59
60 TEST_F(DiscardableMemoryAllocatorTest, ChunksArePageAligned) {
61 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize));
62 ASSERT_TRUE(memory);
63 EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory->Memory()) % kPageSize);
64 WriteToDiscardableMemory(memory.get());
65 }
66
67 TEST_F(DiscardableMemoryAllocatorTest, AllocateFreeAllocate) {
68 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize));
69 ASSERT_TRUE(memory);
70 void* const address = memory->Memory();
71 memory->Unlock(); // Tests that the recycled chunk is being locked correctly.
72 memory.reset();
73 memory = allocator_.Allocate(kPageSize);
74 ASSERT_TRUE(memory);
75 // The previously freed chunk should be reused.
76 EXPECT_EQ(address, memory->Memory());
77 WriteToDiscardableMemory(memory.get());
78 }
79
80 TEST_F(DiscardableMemoryAllocatorTest, FreeingWholeAshmemRegionClosesAshmem) {
81 scoped_ptr<DiscardableMemory> memory(
82 allocator_.Allocate(kMinAshmemRegionSize));
83 ASSERT_TRUE(memory);
84 const int kMagic = 0xdeadbeef;
85 *static_cast<int*>(memory->Memory()) = kMagic;
86 memory.reset();
87 // The previous ashmem region should have been closed thus it should not be
pasko 2013/10/25 15:21:13 Does this rely on the region having all chunks unl
Philippe 2013/10/28 09:44:43 As we discussed offline GTest is full of (nice) su
88 // recycled.
89 memory = allocator_.Allocate(kPageSize);
90 ASSERT_TRUE(memory);
91 EXPECT_NE(kMagic, *static_cast<const int*>(memory->Memory()));
92 }
93
94 TEST_F(DiscardableMemoryAllocatorTest, AllocateUsesBestFitAlgorithm) {
95 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(3 * kPageSize));
96 ASSERT_TRUE(memory1);
97 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(2 * kPageSize));
98 ASSERT_TRUE(memory2);
99 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(1 * kPageSize));
100 ASSERT_TRUE(memory3);
101 void* const address_3 = memory3->Memory();
102 memory1.reset();
103 // Don't free |memory2| to avoid merging the 3 blocks together.
104 memory3.reset();
105 memory1 = allocator_.Allocate(1 * kPageSize);
106 ASSERT_TRUE(memory1);
107 // The chunk whose size is closest to the requested size should be recycled.
108 EXPECT_EQ(address_3, memory1->Memory());
109 WriteToDiscardableMemory(memory1.get());
110 }
111
112 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunks) {
113 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(kPageSize));
114 ASSERT_TRUE(memory1);
115 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(kPageSize));
116 ASSERT_TRUE(memory2);
117 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize));
118 ASSERT_TRUE(memory3);
119 void* const memory1_address = memory1->Memory();
120 memory1.reset();
121 memory3.reset();
122 // Freeing |memory2| (located between memory1 and memory3) should merge the
123 // three free blocks together.
124 memory2.reset();
125 memory1.reset(allocator_.Allocate(3 * kPageSize).release());
126 EXPECT_EQ(memory1_address, memory1->Memory());
127 }
128
129 TEST_F(DiscardableMemoryAllocatorTest,
130 TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled) {
131 // Keep |memory_1| below allocated so that the ashmem region doesn't get
132 // closed when |memory_2| is deleted.
133 scoped_ptr<DiscardableMemory> memory_1(allocator_.Allocate(64 * 1024));
134 ASSERT_TRUE(memory_1);
135 scoped_ptr<DiscardableMemory> memory_2(allocator_.Allocate(32 * 1024));
136 ASSERT_TRUE(memory_2);
137 void* const address = memory_2->Memory();
138 memory_2.reset();
139 memory_2 = allocator_.Allocate(16 * 1024);
140 ASSERT_TRUE(memory_2);
141 EXPECT_EQ(address, memory_2->Memory());
142 WriteToDiscardableMemory(memory_2.get());
143 scoped_ptr<DiscardableMemory> memory_3(allocator_.Allocate(16 * 1024));
144 // The unused tail (16 KBytes large) of the previously freed chunk should be
145 // recycled.
146 EXPECT_EQ(static_cast<char*>(address) + memory_2->Size(), memory_3->Memory());
147 WriteToDiscardableMemory(memory_3.get());
148 }
149
150 TEST_F(DiscardableMemoryAllocatorTest, UseMultipleAshmemRegions) {
151 // Leave one page untouched at the end of the ashmem region.
152 scoped_ptr<DiscardableMemory> memory1(
153 allocator_.Allocate(kMinAshmemRegionSize - kPageSize));
154 ASSERT_TRUE(memory1);
155 WriteToDiscardableMemory(memory1.get());
156
157 scoped_ptr<DiscardableMemory> memory2(
158 allocator_.Allocate(kMinAshmemRegionSize));
159 ASSERT_TRUE(memory2);
160 WriteToDiscardableMemory(memory2.get());
161 // The last page of the first ashmem region should be used for this
162 // allocation.
pasko 2013/10/25 15:21:13 does ashmem guarantee the ranges of virtual addres
Philippe 2013/10/28 09:44:43 I'm not sure I see what you mean here? :) The firs
pasko 2013/10/28 14:40:06 sorry, please disregard, I misread the test
163 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize));
164 ASSERT_TRUE(memory3);
165 WriteToDiscardableMemory(memory3.get());
166 EXPECT_EQ(memory3->Memory(),
167 static_cast<char*>(memory1->Memory()) + memory1->Size());
168 }
169
170 // Returns -1 if an error happened during parsing.
171 int GetAllocatorDirtyMemoryInKBytes() {
pasko 2013/10/25 15:21:13 there is a similar function in disk_cache_memory_t
Philippe 2013/10/28 09:44:43 Yeah, I think I will just remove this function and
pasko 2013/10/28 14:40:06 np! I am glad this ended up with less code :)
172 const std::string smaps_path = StringPrintf("/proc/%d/smaps", getpid());
173 std::ifstream stream(smaps_path.c_str());
174 if (!stream.good())
175 return -1;
176 std::vector<std::string> lines;
177 for (std::string line; std::getline(stream, line); lines.push_back(line));
178 int dirty_kbytes = 0;
179 for (std::vector<std::string>::const_iterator it = lines.begin();
180 it != lines.end(); ++it) {
181 if (it->find(kAllocatorName) == std::string::npos)
182 continue;
183 const std::string& private_dirty_line = it[7];
184 if (private_dirty_line.find("Private_Dirty") == std::string::npos)
185 return -1;
186 std::vector<std::string> tokens;
187 SplitString(private_dirty_line, ' ', &tokens);
188 if (tokens.size() < 3U)
189 return -1;
190 int size;
191 if (!base::StringToInt(tokens[tokens.size() - 2], &size))
192 return -1;
193 dirty_kbytes += size;
194 it += 6;
pasko 2013/10/25 15:21:13 creating a vector of lines just to be able to skip
195 }
196 return dirty_kbytes;
197 }
198
199 TEST_F(DiscardableMemoryAllocatorTest, PagesAreCommittedLazily) {
200 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(2 * 4096));
201 ASSERT_TRUE(memory);
202 EXPECT_EQ(0, GetAllocatorDirtyMemoryInKBytes());
203 static_cast<char*>(memory->Memory())[0] = 'a';
204 EXPECT_EQ(4, GetAllocatorDirtyMemoryInKBytes());
pasko 2013/10/25 15:21:13 isn't this a subject to race conditions with /proc
Philippe 2013/10/28 09:44:43 It's possible indeed if /proc reporting works by r
205 // Write to the second page.
206 static_cast<char*>(memory->Memory())[kPageSize] = 'b';
207 EXPECT_EQ(8, GetAllocatorDirtyMemoryInKBytes());
208 }
209
210 } // namespace internal
211 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698