Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(74)

Side by Side Diff: base/memory/discardable_memory_allocator_android_unittest.cc

Issue 25293002: Add DiscardableMemoryAllocator to work around FD limit issue. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Fix comment Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/memory/discardable_memory_allocator_android.h"
6
7 #include <sys/types.h>
8 #include <unistd.h>
9
10 #include "base/memory/discardable_memory.h"
11 #include "base/memory/scoped_ptr.h"
12 #include "base/strings/string_number_conversions.h"
13 #include "base/strings/string_split.h"
14 #include "base/strings/stringprintf.h"
15 #include "build/build_config.h"
16 #include "testing/gtest/include/gtest/gtest.h"
17
18 namespace base {
19 namespace internal {
20
21 const char kAllocatorName[] = "allocator-for-testing";
22
23 const size_t kPageSize = 4096;
24 const size_t kMinAshmemRegionSize =
25 DiscardableMemoryAllocator::kMinAshmemRegionSize;
26
27 class DiscardableMemoryAllocatorTest : public testing::Test {
28 protected:
29 DiscardableMemoryAllocatorTest() : allocator_(kAllocatorName) {}
30
31 DiscardableMemoryAllocator allocator_;
32 };
33
34 void WriteToDiscardableMemory(DiscardableMemory* memory, size_t size) {
35 // Write to the first and the last pages only to avoid paging in up to 64
36 // MBytes.
37 static_cast<char*>(memory->Memory())[0] = 'a';
38 static_cast<char*>(memory->Memory())[size - 1] = 'a';
39 }
40
41 TEST_F(DiscardableMemoryAllocatorTest, Basic) {
42 const size_t size = 128;
43 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(size));
44 ASSERT_TRUE(memory);
45 WriteToDiscardableMemory(memory.get(), size);
46 }
47
48 TEST_F(DiscardableMemoryAllocatorTest, LargeAllocation) {
49 const size_t size = 64 * 1024 * 1024;
50 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(size));
51 ASSERT_TRUE(memory);
52 WriteToDiscardableMemory(memory.get(), size);
53 }
54
55 TEST_F(DiscardableMemoryAllocatorTest, ChunksArePageAligned) {
56 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize));
57 ASSERT_TRUE(memory);
58 EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory->Memory()) % kPageSize);
59 WriteToDiscardableMemory(memory.get(), kPageSize);
60 }
61
62 TEST_F(DiscardableMemoryAllocatorTest, AllocateFreeAllocate) {
63 scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize));
64 ASSERT_TRUE(memory);
65 void* const address = memory->Memory();
66 memory->Unlock(); // Tests that the recycled chunk is being locked correctly.
67 memory.reset();
68 memory = allocator_.Allocate(kPageSize);
69 ASSERT_TRUE(memory);
70 // The previously freed chunk should be reused.
71 EXPECT_EQ(address, memory->Memory());
72 WriteToDiscardableMemory(memory.get(), kPageSize);
73 }
74
75 TEST_F(DiscardableMemoryAllocatorTest, FreeingWholeAshmemRegionClosesAshmem) {
76 scoped_ptr<DiscardableMemory> memory(
77 allocator_.Allocate(kMinAshmemRegionSize));
78 ASSERT_TRUE(memory);
79 const int kMagic = 0xdeadbeef;
80 *static_cast<int*>(memory->Memory()) = kMagic;
81 memory.reset();
82 // The previous ashmem region should have been closed thus it should not be
83 // recycled.
84 memory = allocator_.Allocate(kPageSize);
85 ASSERT_TRUE(memory);
86 EXPECT_NE(kMagic, *static_cast<const int*>(memory->Memory()));
87 }
88
89 TEST_F(DiscardableMemoryAllocatorTest, AllocateUsesBestFitAlgorithm) {
90 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(3 * kPageSize));
91 ASSERT_TRUE(memory1);
92 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(2 * kPageSize));
93 ASSERT_TRUE(memory2);
94 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(1 * kPageSize));
95 ASSERT_TRUE(memory3);
96 void* const address_3 = memory3->Memory();
97 memory1.reset();
98 // Don't free |memory2| to avoid merging the 3 blocks together.
99 memory3.reset();
100 memory1 = allocator_.Allocate(1 * kPageSize);
101 ASSERT_TRUE(memory1);
102 // The chunk whose size is closest to the requested size should be recycled.
103 EXPECT_EQ(address_3, memory1->Memory());
104 WriteToDiscardableMemory(memory1.get(), kPageSize);
105 }
106
107 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunks) {
108 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(kPageSize));
109 ASSERT_TRUE(memory1);
110 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(kPageSize));
111 ASSERT_TRUE(memory2);
112 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize));
113 ASSERT_TRUE(memory3);
114 void* const memory1_address = memory1->Memory();
115 memory1.reset();
116 memory3.reset();
117 // Freeing |memory2| (located between memory1 and memory3) should merge the
118 // three free blocks together.
119 memory2.reset();
120 memory1.reset(allocator_.Allocate(3 * kPageSize).release());
121 EXPECT_EQ(memory1_address, memory1->Memory());
122 }
123
124 TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunksAdvanced) {
125 const size_t kHalfRegionSize = kMinAshmemRegionSize / 2;
126 const size_t kQuarterRegionSize = kMinAshmemRegionSize / 4;
127 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(kHalfRegionSize));
128 ASSERT_TRUE(memory1);
129 scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(kHalfRegionSize));
130 ASSERT_TRUE(memory2);
131 void* const memory1_address = memory1->Memory();
132 memory1.reset();
133 memory1 = allocator_.Allocate(kQuarterRegionSize);
134 memory2.reset();
135 // At this point, the region should be in this state:
136 // 8 MBytes (used), 24 MBytes (free).
137 memory2 = allocator_.Allocate(kMinAshmemRegionSize - kQuarterRegionSize);
138 EXPECT_EQ(
139 static_cast<const char*>(memory2->Memory()),
140 static_cast<const char*>(memory1_address) + kQuarterRegionSize);
141 }
142
143 TEST_F(DiscardableMemoryAllocatorTest,
144 TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled) {
145 // Keep |memory_1| below allocated so that the ashmem region doesn't get
146 // closed when |memory_2| is deleted.
147 scoped_ptr<DiscardableMemory> memory_1(allocator_.Allocate(64 * 1024));
148 ASSERT_TRUE(memory_1);
149 scoped_ptr<DiscardableMemory> memory_2(allocator_.Allocate(32 * 1024));
150 ASSERT_TRUE(memory_2);
151 void* const address = memory_2->Memory();
152 memory_2.reset();
153 const size_t size = 16 * 1024;
154 memory_2 = allocator_.Allocate(size);
155 ASSERT_TRUE(memory_2);
156 EXPECT_EQ(address, memory_2->Memory());
157 WriteToDiscardableMemory(memory_2.get(), size);
158 scoped_ptr<DiscardableMemory> memory_3(allocator_.Allocate(size));
159 // The unused tail (16 KBytes large) of the previously freed chunk should be
160 // recycled.
161 EXPECT_EQ(static_cast<char*>(address) + size, memory_3->Memory());
162 WriteToDiscardableMemory(memory_3.get(), size);
163 }
164
165 TEST_F(DiscardableMemoryAllocatorTest, UseMultipleAshmemRegions) {
166 // Leave one page untouched at the end of the ashmem region.
167 const size_t size = kMinAshmemRegionSize - kPageSize;
168 scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(size));
169 ASSERT_TRUE(memory1);
170 WriteToDiscardableMemory(memory1.get(), size);
171
172 scoped_ptr<DiscardableMemory> memory2(
173 allocator_.Allocate(kMinAshmemRegionSize));
174 ASSERT_TRUE(memory2);
175 WriteToDiscardableMemory(memory2.get(), kMinAshmemRegionSize);
176 // The last page of the first ashmem region should be used for this
177 // allocation.
178 scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize));
179 ASSERT_TRUE(memory3);
180 WriteToDiscardableMemory(memory3.get(), kPageSize);
181 EXPECT_EQ(memory3->Memory(), static_cast<char*>(memory1->Memory()) + size);
182 }
183
184 } // namespace internal
185 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698