Index: base/memory/discardable_memory_allocator_android_unittest.cc |
diff --git a/base/memory/discardable_memory_allocator_android_unittest.cc b/base/memory/discardable_memory_allocator_android_unittest.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..4143d9c9283f8a4b5e9ca35cd578ad3a0fa11520 |
--- /dev/null |
+++ b/base/memory/discardable_memory_allocator_android_unittest.cc |
@@ -0,0 +1,211 @@ |
+// Copyright 2013 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "base/memory/discardable_memory_allocator_android.h" |
+ |
+#include <sys/types.h> |
+#include <unistd.h> |
+ |
+#include <fstream> |
+#include <iostream> |
+#include <string> |
+#include <vector> |
+ |
+#include "base/logging.h" |
+#include "base/memory/discardable_memory.h" |
+#include "base/memory/scoped_ptr.h" |
+#include "base/strings/string_number_conversions.h" |
+#include "base/strings/string_split.h" |
+#include "base/strings/stringprintf.h" |
+#include "build/build_config.h" |
+#include "testing/gtest/include/gtest/gtest.h" |
+ |
+namespace base { |
+namespace internal { |
+ |
+const char kAllocatorName[] = "allocator-for-testing"; |
+ |
+const size_t kPageSize = 4096; |
+const size_t kMinAshmemRegionSize = |
+ DiscardableMemoryAllocator::kMinAshmemRegionSize; |
+ |
+class DiscardableMemoryAllocatorTest : public testing::Test { |
+ protected: |
+ DiscardableMemoryAllocatorTest() : allocator_(kAllocatorName) {} |
+ |
+ DiscardableMemoryAllocator allocator_; |
+}; |
+ |
+void WriteToDiscardableMemory(DiscardableMemory* memory) { |
+ DCHECK_EQ(0U, memory->Size() % sizeof(int)); |
+ // Write to the first and the last pages only to avoid paging in up to 64 |
+ // MBytes. |
+ static_cast<int*>(memory->Memory())[0] = 1; |
+ static_cast<int*>(memory->Memory())[memory->Size() / sizeof(int) - 1] = 1; |
pasko
2013/10/25 15:21:13
if you wrote 'char', you would not have had to div
Philippe
2013/10/28 09:44:43
Yeah good point. I'm also a big supporter of sizeo
|
+} |
+ |
+TEST_F(DiscardableMemoryAllocatorTest, Basic) { |
+ scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(128 * sizeof(int))); |
+ ASSERT_TRUE(memory); |
+ WriteToDiscardableMemory(memory.get()); |
+} |
+ |
+TEST_F(DiscardableMemoryAllocatorTest, LargeAllocation) { |
+ scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(64 * 1024 * 1024)); |
+ ASSERT_TRUE(memory); |
+ WriteToDiscardableMemory(memory.get()); |
+} |
+ |
+TEST_F(DiscardableMemoryAllocatorTest, ChunksArePageAligned) { |
+ scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize)); |
+ ASSERT_TRUE(memory); |
+ EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory->Memory()) % kPageSize); |
+ WriteToDiscardableMemory(memory.get()); |
+} |
+ |
+TEST_F(DiscardableMemoryAllocatorTest, AllocateFreeAllocate) { |
+ scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(kPageSize)); |
+ ASSERT_TRUE(memory); |
+ void* const address = memory->Memory(); |
+ memory->Unlock(); // Tests that the recycled chunk is being locked correctly. |
+ memory.reset(); |
+ memory = allocator_.Allocate(kPageSize); |
+ ASSERT_TRUE(memory); |
+ // The previously freed chunk should be reused. |
+ EXPECT_EQ(address, memory->Memory()); |
+ WriteToDiscardableMemory(memory.get()); |
+} |
+ |
+TEST_F(DiscardableMemoryAllocatorTest, FreeingWholeAshmemRegionClosesAshmem) { |
+ scoped_ptr<DiscardableMemory> memory( |
+ allocator_.Allocate(kMinAshmemRegionSize)); |
+ ASSERT_TRUE(memory); |
+ const int kMagic = 0xdeadbeef; |
+ *static_cast<int*>(memory->Memory()) = kMagic; |
+ memory.reset(); |
+ // The previous ashmem region should have been closed thus it should not be |
pasko
2013/10/25 15:21:13
Does this rely on the region having all chunks unl
Philippe
2013/10/28 09:44:43
As we discussed offline GTest is full of (nice) su
|
+ // recycled. |
+ memory = allocator_.Allocate(kPageSize); |
+ ASSERT_TRUE(memory); |
+ EXPECT_NE(kMagic, *static_cast<const int*>(memory->Memory())); |
+} |
+ |
+TEST_F(DiscardableMemoryAllocatorTest, AllocateUsesBestFitAlgorithm) { |
+ scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(3 * kPageSize)); |
+ ASSERT_TRUE(memory1); |
+ scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(2 * kPageSize)); |
+ ASSERT_TRUE(memory2); |
+ scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(1 * kPageSize)); |
+ ASSERT_TRUE(memory3); |
+ void* const address_3 = memory3->Memory(); |
+ memory1.reset(); |
+ // Don't free |memory2| to avoid merging the 3 blocks together. |
+ memory3.reset(); |
+ memory1 = allocator_.Allocate(1 * kPageSize); |
+ ASSERT_TRUE(memory1); |
+ // The chunk whose size is closest to the requested size should be recycled. |
+ EXPECT_EQ(address_3, memory1->Memory()); |
+ WriteToDiscardableMemory(memory1.get()); |
+} |
+ |
+TEST_F(DiscardableMemoryAllocatorTest, MergeFreeChunks) { |
+ scoped_ptr<DiscardableMemory> memory1(allocator_.Allocate(kPageSize)); |
+ ASSERT_TRUE(memory1); |
+ scoped_ptr<DiscardableMemory> memory2(allocator_.Allocate(kPageSize)); |
+ ASSERT_TRUE(memory2); |
+ scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize)); |
+ ASSERT_TRUE(memory3); |
+ void* const memory1_address = memory1->Memory(); |
+ memory1.reset(); |
+ memory3.reset(); |
+ // Freeing |memory2| (located between memory1 and memory3) should merge the |
+ // three free blocks together. |
+ memory2.reset(); |
+ memory1.reset(allocator_.Allocate(3 * kPageSize).release()); |
+ EXPECT_EQ(memory1_address, memory1->Memory()); |
+} |
+ |
+TEST_F(DiscardableMemoryAllocatorTest, |
+ TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled) { |
+ // Keep |memory_1| below allocated so that the ashmem region doesn't get |
+ // closed when |memory_2| is deleted. |
+ scoped_ptr<DiscardableMemory> memory_1(allocator_.Allocate(64 * 1024)); |
+ ASSERT_TRUE(memory_1); |
+ scoped_ptr<DiscardableMemory> memory_2(allocator_.Allocate(32 * 1024)); |
+ ASSERT_TRUE(memory_2); |
+ void* const address = memory_2->Memory(); |
+ memory_2.reset(); |
+ memory_2 = allocator_.Allocate(16 * 1024); |
+ ASSERT_TRUE(memory_2); |
+ EXPECT_EQ(address, memory_2->Memory()); |
+ WriteToDiscardableMemory(memory_2.get()); |
+ scoped_ptr<DiscardableMemory> memory_3(allocator_.Allocate(16 * 1024)); |
+ // The unused tail (16 KBytes large) of the previously freed chunk should be |
+ // recycled. |
+ EXPECT_EQ(static_cast<char*>(address) + memory_2->Size(), memory_3->Memory()); |
+ WriteToDiscardableMemory(memory_3.get()); |
+} |
+ |
+TEST_F(DiscardableMemoryAllocatorTest, UseMultipleAshmemRegions) { |
+ // Leave one page untouched at the end of the ashmem region. |
+ scoped_ptr<DiscardableMemory> memory1( |
+ allocator_.Allocate(kMinAshmemRegionSize - kPageSize)); |
+ ASSERT_TRUE(memory1); |
+ WriteToDiscardableMemory(memory1.get()); |
+ |
+ scoped_ptr<DiscardableMemory> memory2( |
+ allocator_.Allocate(kMinAshmemRegionSize)); |
+ ASSERT_TRUE(memory2); |
+ WriteToDiscardableMemory(memory2.get()); |
+ // The last page of the first ashmem region should be used for this |
+ // allocation. |
pasko
2013/10/25 15:21:13
does ashmem guarantee the ranges of virtual addres
Philippe
2013/10/28 09:44:43
I'm not sure I see what you mean here? :) The firs
pasko
2013/10/28 14:40:06
sorry, please disregard, I misread the test
|
+ scoped_ptr<DiscardableMemory> memory3(allocator_.Allocate(kPageSize)); |
+ ASSERT_TRUE(memory3); |
+ WriteToDiscardableMemory(memory3.get()); |
+ EXPECT_EQ(memory3->Memory(), |
+ static_cast<char*>(memory1->Memory()) + memory1->Size()); |
+} |
+ |
+// Returns -1 if an error happened during parsing. |
+int GetAllocatorDirtyMemoryInKBytes() { |
pasko
2013/10/25 15:21:13
there is a similar function in disk_cache_memory_t
Philippe
2013/10/28 09:44:43
Yeah, I think I will just remove this function and
pasko
2013/10/28 14:40:06
np! I am glad this ended up with less code :)
|
+ const std::string smaps_path = StringPrintf("/proc/%d/smaps", getpid()); |
+ std::ifstream stream(smaps_path.c_str()); |
+ if (!stream.good()) |
+ return -1; |
+ std::vector<std::string> lines; |
+ for (std::string line; std::getline(stream, line); lines.push_back(line)); |
+ int dirty_kbytes = 0; |
+ for (std::vector<std::string>::const_iterator it = lines.begin(); |
+ it != lines.end(); ++it) { |
+ if (it->find(kAllocatorName) == std::string::npos) |
+ continue; |
+ const std::string& private_dirty_line = it[7]; |
+ if (private_dirty_line.find("Private_Dirty") == std::string::npos) |
+ return -1; |
+ std::vector<std::string> tokens; |
+ SplitString(private_dirty_line, ' ', &tokens); |
+ if (tokens.size() < 3U) |
+ return -1; |
+ int size; |
+ if (!base::StringToInt(tokens[tokens.size() - 2], &size)) |
+ return -1; |
+ dirty_kbytes += size; |
+ it += 6; |
pasko
2013/10/25 15:21:13
creating a vector of lines just to be able to skip
|
+ } |
+ return dirty_kbytes; |
+} |
+ |
+TEST_F(DiscardableMemoryAllocatorTest, PagesAreCommittedLazily) { |
+ scoped_ptr<DiscardableMemory> memory(allocator_.Allocate(2 * 4096)); |
+ ASSERT_TRUE(memory); |
+ EXPECT_EQ(0, GetAllocatorDirtyMemoryInKBytes()); |
+ static_cast<char*>(memory->Memory())[0] = 'a'; |
+ EXPECT_EQ(4, GetAllocatorDirtyMemoryInKBytes()); |
pasko
2013/10/25 15:21:13
isn't this a subject to race conditions with /proc
Philippe
2013/10/28 09:44:43
It's possible indeed if /proc reporting works by r
|
+ // Write to the second page. |
+ static_cast<char*>(memory->Memory())[kPageSize] = 'b'; |
+ EXPECT_EQ(8, GetAllocatorDirtyMemoryInKBytes()); |
+} |
+ |
+} // namespace internal |
+} // namespace base |