Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(131)

Unified Diff: gpu/command_buffer/client/mapped_memory_unittest.cc

Issue 116863003: gpu: Reuse transfer buffers more aggresively (Closed) Base URL: http://git.chromium.org/chromium/src.git@master
Patch Set: Added AsyncUploadSync test, FencedAllocator test, addressed review issues Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: gpu/command_buffer/client/mapped_memory_unittest.cc
diff --git a/gpu/command_buffer/client/mapped_memory_unittest.cc b/gpu/command_buffer/client/mapped_memory_unittest.cc
index 90d1ce7666b8215377fc1facb915879b1291d193..53667f8685e1d1f35dc8730bf5c3488c0a32c79f 100644
--- a/gpu/command_buffer/client/mapped_memory_unittest.cc
+++ b/gpu/command_buffer/client/mapped_memory_unittest.cc
@@ -4,6 +4,7 @@
#include "gpu/command_buffer/client/mapped_memory.h"
+#include <list>
#include "base/bind.h"
#include "base/memory/scoped_ptr.h"
#include "base/message_loop/message_loop.h"
@@ -85,6 +86,11 @@ class MappedMemoryTestBase : public testing::Test {
const unsigned int MappedMemoryTestBase::kBufferSize;
#endif
+namespace {
+void EmptyPoll() {
+}
+}
+
// Test fixture for MemoryChunk test - Creates a MemoryChunk, using a
// CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
// it directly, not through the RPC mechanism), making sure Noops are ignored
@@ -98,7 +104,10 @@ class MemoryChunkTest : public MappedMemoryTestBase {
gpu::Buffer buf;
buf.size = kBufferSize;
buf.ptr = buffer_.get();
- chunk_.reset(new MemoryChunk(kShmId, buf, helper_.get()));
+ chunk_.reset(new MemoryChunk(kShmId,
+ buf,
+ helper_.get(),
+ base::Bind(&EmptyPoll)));
}
virtual void TearDown() {
@@ -146,11 +155,16 @@ TEST_F(MemoryChunkTest, Basic) {
}
class MappedMemoryManagerTest : public MappedMemoryTestBase {
+ public:
+ MappedMemoryManager* manager() const {
+ return manager_.get();
+ }
+
protected:
virtual void SetUp() {
MappedMemoryTestBase::SetUp();
manager_.reset(new MappedMemoryManager(
- helper_.get(), MappedMemoryManager::kNoLimit));
+ helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
}
virtual void TearDown() {
@@ -310,7 +324,8 @@ TEST_F(MappedMemoryManagerTest, ChunkSizeMultiple) {
TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) {
const unsigned int kChunkSize = 2048;
// Reset the manager with a memory limit.
- manager_.reset(new MappedMemoryManager(helper_.get(), kChunkSize));
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), kChunkSize));
manager_->set_chunk_size_multiple(kChunkSize);
// Allocate one chunk worth of memory.
@@ -338,7 +353,8 @@ TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) {
TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) {
const unsigned int kSize = 1024;
// Reset the manager with a memory limit.
- manager_.reset(new MappedMemoryManager(helper_.get(), kSize));
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(), base::Bind(&EmptyPoll), kSize));
const unsigned int kChunkSize = 2 * 1024;
manager_->set_chunk_size_multiple(kChunkSize);
@@ -384,4 +400,55 @@ TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) {
EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory());
}
+namespace {
+void Poll(MappedMemoryManagerTest *test, std::list<void*>* list) {
+ std::list<void*>::iterator it = list->begin();
+ while (it != list->end()) {
+ void* address = *it;
+ test->manager()->Free(address);
+ it = list->erase(it);
+ }
+}
+}
+
+TEST_F(MappedMemoryManagerTest, Poll) {
+ std::list<void*> unmanaged_memory_list;
+
+ const unsigned int kSize = 1024;
+ // Reset the manager with a memory limit.
+ manager_.reset(new MappedMemoryManager(
+ helper_.get(),
+ base::Bind(&Poll, this, &unmanaged_memory_list),
+ kSize));
+
+ // Allocate kSize bytes. Don't add the address to
+ // the unmanaged memory list, so that it won't be free:ed just yet.
+ int32 id1;
+ unsigned int offset1;
+ void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
+ EXPECT_EQ(manager_->bytes_in_use(), kSize);
+
+ // Allocate kSize more bytes, and make sure we grew.
+ int32 id2;
+ unsigned int offset2;
+ void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
+ EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
+
+ // Make the unmanaged buffer be released next time FreeUnused() is called
+ // in MappedMemoryManager/FencedAllocator. This happens for example when
+ // allocating new memory.
+ unmanaged_memory_list.push_back(mem1);
+
+ // Allocate kSize more bytes. This should poll unmanaged memory, which now
+ // should free the previously allocated unmanaged memory.
+ int32 id3;
+ unsigned int offset3;
+ void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
+ EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
+
+ manager_->Free(mem2);
+ manager_->Free(mem3);
+ EXPECT_EQ(manager_->bytes_in_use(), static_cast<size_t>(0));
+}
+
} // namespace gpu

Powered by Google App Engine
This is Rietveld 408576698