| Index: base/memory/discardable_memory_allocator_unittest.cc
|
| diff --git a/base/memory/discardable_memory_allocator_unittest.cc b/base/memory/discardable_memory_allocator_unittest.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..5c4344037d6f33305c9d122b0c773148c8153695
|
| --- /dev/null
|
| +++ b/base/memory/discardable_memory_allocator_unittest.cc
|
| @@ -0,0 +1,239 @@
|
| +// Copyright 2013 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "base/memory/discardable_memory_allocator.h"
|
| +
|
| +#include "base/logging.h"
|
| +#include "base/memory/discardable_memory.h"
|
| +#include "base/memory/scoped_ptr.h"
|
| +#include "build/build_config.h"
|
| +#include "testing/gtest/include/gtest/gtest.h"
|
| +
|
| +#if defined(OS_ANDROID)
|
| +#include <sys/types.h>
|
| +#include <unistd.h>
|
| +
|
| +#include <fstream>
|
| +#include <iostream>
|
| +#include <string>
|
| +#include <vector>
|
| +
|
| +#include "base/strings/string_number_conversions.h"
|
| +#include "base/strings/string_split.h"
|
| +#include "base/strings/stringprintf.h"
|
| +#endif
|
| +
|
| +namespace base {
|
| +
|
| +scoped_ptr<DiscardableMemoryAllocator>
|
| +CreateDiscardableMemoryAllocatorForTesting(const std::string& name);
|
| +
|
| +namespace {
|
| +
|
| +const char kAllocatorName[] = "allocator-for-testing";
|
| +const size_t kPageSize = 4096;
|
| +
|
| +scoped_ptr<DiscardableMemoryAllocator> CreateAllocator() {
|
| + return CreateDiscardableMemoryAllocatorForTesting(kAllocatorName);
|
| +}
|
| +
|
| +void WriteToDiscardableMemory(DiscardableMemory* memory) {
|
| + DCHECK_EQ(0U, memory->Size() % sizeof(int));
|
| + // Write to the first and the last pages only to avoid paging in up to 64
|
| + // MBytes.
|
| + static_cast<int*>(memory->Memory())[0] = 1;
|
| + static_cast<int*>(memory->Memory())[memory->Size() / sizeof(int) - 1] = 1;
|
| +}
|
| +
|
| +TEST(DiscardableMemoryAllocatorTest, Basic) {
|
| + const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
|
| + ASSERT_TRUE(allocator);
|
| + scoped_ptr<DiscardableMemory> memory(allocator->Allocate(128 * sizeof(int)));
|
| + ASSERT_TRUE(memory);
|
| + WriteToDiscardableMemory(memory.get());
|
| +}
|
| +
|
| +TEST(DiscardableMemoryAllocatorTest, LargeAllocation) {
|
| + const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
|
| + ASSERT_TRUE(allocator);
|
| + scoped_ptr<DiscardableMemory> memory(allocator->Allocate(64 * 1024 * 1024));
|
| + ASSERT_TRUE(memory);
|
| + WriteToDiscardableMemory(memory.get());
|
| +}
|
| +
|
| +TEST(DiscardableMemoryAllocatorTest, ChunksArePageAligned) {
|
| + const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
|
| + ASSERT_TRUE(allocator);
|
| + scoped_ptr<DiscardableMemory> memory(allocator->Allocate(kPageSize));
|
| + ASSERT_TRUE(memory);
|
| + EXPECT_EQ(0U, reinterpret_cast<uint64_t>(memory->Memory()) % kPageSize);
|
| + WriteToDiscardableMemory(memory.get());
|
| +}
|
| +
|
| +#if defined(OS_ANDROID)
|
| +
|
| +const size_t kAshmemRegionSize = 32 * 1024 * 1024;
|
| +
|
| +TEST(DiscardableMemoryAllocatorTest, AllocateFreeAllocate) {
|
| + const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
|
| + ASSERT_TRUE(allocator);
|
| + scoped_ptr<DiscardableMemory> memory(allocator->Allocate(kPageSize));
|
| + ASSERT_TRUE(memory);
|
| + void* const address = memory->Memory();
|
| + memory->Unlock(); // Tests that the recycled chunk is being locked correctly.
|
| + memory.reset();
|
| + memory = allocator->Allocate(kPageSize);
|
| + ASSERT_TRUE(memory);
|
| + // The previously freed chunk should be reused.
|
| + EXPECT_EQ(address, memory->Memory());
|
| + WriteToDiscardableMemory(memory.get());
|
| +}
|
| +
|
| +TEST(DiscardableMemoryAllocatorTest, FreeingWholeAshmemRegionClosesAshmem) {
|
| + const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
|
| + ASSERT_TRUE(allocator);
|
| + scoped_ptr<DiscardableMemory> memory(allocator->Allocate(kAshmemRegionSize));
|
| + ASSERT_TRUE(memory);
|
| + const int kMagic = 0xdeadbeef;
|
| + *static_cast<int*>(memory->Memory()) = kMagic;
|
| + memory.reset();
|
| + // The previous ashmem region should have been closed thus it should not be
|
| + // recycled.
|
| + memory = allocator->Allocate(kPageSize);
|
| + ASSERT_TRUE(memory);
|
| + EXPECT_NE(kMagic, *static_cast<const int*>(memory->Memory()));
|
| +}
|
| +
|
| +TEST(DiscardableMemoryAllocatorTest, AllocateUsesBestFitAlgorithm) {
|
| + const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
|
| + ASSERT_TRUE(allocator);
|
| + scoped_ptr<DiscardableMemory> memory1(allocator->Allocate(3 * kPageSize));
|
| + ASSERT_TRUE(memory1);
|
| + scoped_ptr<DiscardableMemory> memory2(allocator->Allocate(2 * kPageSize));
|
| + ASSERT_TRUE(memory2);
|
| + scoped_ptr<DiscardableMemory> memory3(allocator->Allocate(1 * kPageSize));
|
| + ASSERT_TRUE(memory3);
|
| + void* const address_3 = memory3->Memory();
|
| + memory1.reset();
|
| + // Don't free |memory2| to avoid merging the 3 blocks together.
|
| + memory3.reset();
|
| + memory1 = allocator->Allocate(1 * kPageSize);
|
| + ASSERT_TRUE(memory1);
|
| + // The chunk whose size is closest to the requested size should be recycled.
|
| + EXPECT_EQ(address_3, memory1->Memory());
|
| + WriteToDiscardableMemory(memory1.get());
|
| +}
|
| +
|
| +TEST(DiscardableMemoryAllocatorTest, MergeFreeChunks) {
|
| + const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
|
| + ASSERT_TRUE(allocator);
|
| + scoped_ptr<DiscardableMemory> memory1(allocator->Allocate(kPageSize));
|
| + ASSERT_TRUE(memory1);
|
| + scoped_ptr<DiscardableMemory> memory2(allocator->Allocate(kPageSize));
|
| + ASSERT_TRUE(memory2);
|
| + scoped_ptr<DiscardableMemory> memory3(allocator->Allocate(kPageSize));
|
| + ASSERT_TRUE(memory3);
|
| + void* const memory1_address = memory1->Memory();
|
| + memory1.reset();
|
| + memory3.reset();
|
| + // Freeing |memory2| (located between memory1 and memory3) should merge the
|
| + // three free blocks together.
|
| + memory2.reset();
|
| + memory1.reset(allocator->Allocate(3 * kPageSize).release());
|
| + EXPECT_EQ(memory1_address, memory1->Memory());
|
| +}
|
| +
|
| +TEST(DiscardableMemoryAllocatorTest,
|
| + TooLargeFreeChunksDontCauseTooMuchFragmentationWhenRecycled) {
|
| + const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
|
| + ASSERT_TRUE(allocator);
|
| + // Keep |memory_1| below allocated so that the ashmem region doesn't get
|
| + // closed when |memory_2| is deleted.
|
| + scoped_ptr<DiscardableMemory> memory_1(allocator->Allocate(64 * 1024));
|
| + ASSERT_TRUE(memory_1);
|
| + scoped_ptr<DiscardableMemory> memory_2(allocator->Allocate(32 * 1024));
|
| + ASSERT_TRUE(memory_2);
|
| + void* const address = memory_2->Memory();
|
| + memory_2.reset();
|
| + memory_2 = allocator->Allocate(16 * 1024);
|
| + ASSERT_TRUE(memory_2);
|
| + EXPECT_EQ(address, memory_2->Memory());
|
| + WriteToDiscardableMemory(memory_2.get());
|
| + scoped_ptr<DiscardableMemory> memory_3(allocator->Allocate(16 * 1024));
|
| + // The unused tail (16 KBytes large) of the previously freed chunk should be
|
| + // recycled.
|
| + EXPECT_EQ(static_cast<char*>(address) + memory_2->Size(), memory_3->Memory());
|
| + WriteToDiscardableMemory(memory_3.get());
|
| +}
|
| +
|
| +TEST(DiscardableMemoryAllocatorTest, UseMultipleAshmemRegions) {
|
| + const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
|
| + ASSERT_TRUE(allocator);
|
| + const size_t kAshmemRegionSize = 32 * 1024 * 1024;
|
| + // Leave one page untouched at the end of the ashmem region.
|
| + scoped_ptr<DiscardableMemory> memory1(
|
| + allocator->Allocate(kAshmemRegionSize - kPageSize));
|
| + ASSERT_TRUE(memory1);
|
| + WriteToDiscardableMemory(memory1.get());
|
| +
|
| + scoped_ptr<DiscardableMemory> memory2(
|
| + allocator->Allocate(kAshmemRegionSize));
|
| + ASSERT_TRUE(memory2);
|
| + WriteToDiscardableMemory(memory2.get());
|
| + // The last page of the first ashmem region should be used for this
|
| + // allocation.
|
| + scoped_ptr<DiscardableMemory> memory3(
|
| + allocator->Allocate(kPageSize));
|
| + ASSERT_TRUE(memory3);
|
| + WriteToDiscardableMemory(memory3.get());
|
| + EXPECT_EQ(memory3->Memory(),
|
| + static_cast<char*>(memory1->Memory()) + memory1->Size());
|
| +}
|
| +
|
| +// Returns -1 if an error happened during parsing.
|
| +int GetAllocatorDirtyMemoryInKBytes() {
|
| + const std::string smaps_path = StringPrintf("/proc/%d/smaps", getpid());
|
| + std::ifstream stream(smaps_path.c_str());
|
| + if (!stream.good())
|
| + return -1;
|
| + std::vector<std::string> lines;
|
| + for (std::string line; std::getline(stream, line); lines.push_back(line));
|
| + int dirty_kbytes = 0;
|
| + for (std::vector<std::string>::const_iterator it = lines.begin();
|
| + it != lines.end(); ++it) {
|
| + if (it->find(kAllocatorName) == std::string::npos)
|
| + continue;
|
| + const std::string& private_dirty_line = it[7];
|
| + if (private_dirty_line.find("Private_Dirty") == std::string::npos)
|
| + return -1;
|
| + std::vector<std::string> tokens;
|
| + SplitString(private_dirty_line, ' ', &tokens);
|
| + if (tokens.size() < 3U)
|
| + return -1;
|
| + int size;
|
| + if (!base::StringToInt(tokens[tokens.size() - 2], &size))
|
| + return -1;
|
| + dirty_kbytes += size;
|
| + it += 6;
|
| + }
|
| + return dirty_kbytes;
|
| +}
|
| +
|
| +TEST(DiscardableMemoryAllocatorTest, PagesAreCommittedLazily) {
|
| + const scoped_ptr<DiscardableMemoryAllocator> allocator(CreateAllocator());
|
| + ASSERT_TRUE(allocator);
|
| + scoped_ptr<DiscardableMemory> memory(allocator->Allocate(2 * 4096));
|
| + ASSERT_TRUE(memory);
|
| + EXPECT_EQ(0, GetAllocatorDirtyMemoryInKBytes());
|
| + static_cast<char*>(memory->Memory())[0] = 'a';
|
| + EXPECT_EQ(4, GetAllocatorDirtyMemoryInKBytes());
|
| + // Write to the second page.
|
| + static_cast<char*>(memory->Memory())[kPageSize] = 'b';
|
| + EXPECT_EQ(8, GetAllocatorDirtyMemoryInKBytes());
|
| +}
|
| +
|
| +#endif // OS_ANDROID
|
| +
|
| +} // namespace
|
| +} // namespace base
|
|
|