Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(868)

Side by Side Diff: gpu/command_buffer/client/mapped_memory_unittest.cc

Issue 116863003: gpu: Reuse transfer buffers more aggresively (Closed) Base URL: http://git.chromium.org/chromium/src.git@master
Patch Set: Added AsyncUploadSync test, FencedAllocator test, addressed review issues Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/command_buffer/client/mapped_memory.h" 5 #include "gpu/command_buffer/client/mapped_memory.h"
6 6
7 #include <list>
7 #include "base/bind.h" 8 #include "base/bind.h"
8 #include "base/memory/scoped_ptr.h" 9 #include "base/memory/scoped_ptr.h"
9 #include "base/message_loop/message_loop.h" 10 #include "base/message_loop/message_loop.h"
10 #include "gpu/command_buffer/client/cmd_buffer_helper.h" 11 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
11 #include "gpu/command_buffer/service/command_buffer_service.h" 12 #include "gpu/command_buffer/service/command_buffer_service.h"
12 #include "gpu/command_buffer/service/gpu_scheduler.h" 13 #include "gpu/command_buffer/service/gpu_scheduler.h"
13 #include "gpu/command_buffer/service/mocks.h" 14 #include "gpu/command_buffer/service/mocks.h"
14 #include "gpu/command_buffer/service/transfer_buffer_manager.h" 15 #include "gpu/command_buffer/service/transfer_buffer_manager.h"
15 #include "testing/gtest/include/gtest/gtest.h" 16 #include "testing/gtest/include/gtest/gtest.h"
16 17
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
78 scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_; 79 scoped_ptr<TransferBufferManagerInterface> transfer_buffer_manager_;
79 scoped_ptr<CommandBufferService> command_buffer_; 80 scoped_ptr<CommandBufferService> command_buffer_;
80 scoped_ptr<GpuScheduler> gpu_scheduler_; 81 scoped_ptr<GpuScheduler> gpu_scheduler_;
81 scoped_ptr<CommandBufferHelper> helper_; 82 scoped_ptr<CommandBufferHelper> helper_;
82 }; 83 };
83 84
84 #ifndef _MSC_VER 85 #ifndef _MSC_VER
85 const unsigned int MappedMemoryTestBase::kBufferSize; 86 const unsigned int MappedMemoryTestBase::kBufferSize;
86 #endif 87 #endif
87 88
89 namespace {
90 void EmptyPoll() {
91 }
92 }
93
88 // Test fixture for MemoryChunk test - Creates a MemoryChunk, using a 94 // Test fixture for MemoryChunk test - Creates a MemoryChunk, using a
89 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling 95 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
90 // it directly, not through the RPC mechanism), making sure Noops are ignored 96 // it directly, not through the RPC mechanism), making sure Noops are ignored
91 // and SetToken are properly forwarded to the engine. 97 // and SetToken are properly forwarded to the engine.
92 class MemoryChunkTest : public MappedMemoryTestBase { 98 class MemoryChunkTest : public MappedMemoryTestBase {
93 protected: 99 protected:
94 static const int32 kShmId = 123; 100 static const int32 kShmId = 123;
95 virtual void SetUp() { 101 virtual void SetUp() {
96 MappedMemoryTestBase::SetUp(); 102 MappedMemoryTestBase::SetUp();
97 buffer_.reset(new uint8[kBufferSize]); 103 buffer_.reset(new uint8[kBufferSize]);
98 gpu::Buffer buf; 104 gpu::Buffer buf;
99 buf.size = kBufferSize; 105 buf.size = kBufferSize;
100 buf.ptr = buffer_.get(); 106 buf.ptr = buffer_.get();
101 chunk_.reset(new MemoryChunk(kShmId, buf, helper_.get())); 107 chunk_.reset(new MemoryChunk(kShmId,
108 buf,
109 helper_.get(),
110 base::Bind(&EmptyPoll)));
102 } 111 }
103 112
104 virtual void TearDown() { 113 virtual void TearDown() {
105 // If the GpuScheduler posts any tasks, this forces them to run. 114 // If the GpuScheduler posts any tasks, this forces them to run.
106 base::MessageLoop::current()->RunUntilIdle(); 115 base::MessageLoop::current()->RunUntilIdle();
107 116
108 MappedMemoryTestBase::TearDown(); 117 MappedMemoryTestBase::TearDown();
109 } 118 }
110 119
111 scoped_ptr<MemoryChunk> chunk_; 120 scoped_ptr<MemoryChunk> chunk_;
(...skipping 27 matching lines...) Expand all
139 EXPECT_LE(buffer_.get(), pointer_char); 148 EXPECT_LE(buffer_.get(), pointer_char);
140 EXPECT_GE(buffer_.get() + kBufferSize, pointer_char + kSize); 149 EXPECT_GE(buffer_.get() + kBufferSize, pointer_char + kSize);
141 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting()); 150 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
142 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting()); 151 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
143 chunk_->Free(pointer_char); 152 chunk_->Free(pointer_char);
144 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting()); 153 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
145 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting()); 154 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
146 } 155 }
147 156
148 class MappedMemoryManagerTest : public MappedMemoryTestBase { 157 class MappedMemoryManagerTest : public MappedMemoryTestBase {
158 public:
159 MappedMemoryManager* manager() const {
160 return manager_.get();
161 }
162
149 protected: 163 protected:
150 virtual void SetUp() { 164 virtual void SetUp() {
151 MappedMemoryTestBase::SetUp(); 165 MappedMemoryTestBase::SetUp();
152 manager_.reset(new MappedMemoryManager( 166 manager_.reset(new MappedMemoryManager(
153 helper_.get(), MappedMemoryManager::kNoLimit)); 167 helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit));
154 } 168 }
155 169
156 virtual void TearDown() { 170 virtual void TearDown() {
157 // If the GpuScheduler posts any tasks, this forces them to run. 171 // If the GpuScheduler posts any tasks, this forces them to run.
158 base::MessageLoop::current()->RunUntilIdle(); 172 base::MessageLoop::current()->RunUntilIdle();
159 manager_.reset(); 173 manager_.reset();
160 MappedMemoryTestBase::TearDown(); 174 MappedMemoryTestBase::TearDown();
161 } 175 }
162 176
163 scoped_ptr<MappedMemoryManager> manager_; 177 scoped_ptr<MappedMemoryManager> manager_;
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
303 EXPECT_EQ(id1, id2); 317 EXPECT_EQ(id1, id2);
304 EXPECT_NE(id2, id3); 318 EXPECT_NE(id2, id3);
305 EXPECT_EQ(0u, offset1); 319 EXPECT_EQ(0u, offset1);
306 EXPECT_EQ(kSize, offset2); 320 EXPECT_EQ(kSize, offset2);
307 EXPECT_EQ(0u, offset3); 321 EXPECT_EQ(0u, offset3);
308 } 322 }
309 323
310 TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) { 324 TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) {
311 const unsigned int kChunkSize = 2048; 325 const unsigned int kChunkSize = 2048;
312 // Reset the manager with a memory limit. 326 // Reset the manager with a memory limit.
313 manager_.reset(new MappedMemoryManager(helper_.get(), kChunkSize)); 327 manager_.reset(new MappedMemoryManager(
328 helper_.get(), base::Bind(&EmptyPoll), kChunkSize));
314 manager_->set_chunk_size_multiple(kChunkSize); 329 manager_->set_chunk_size_multiple(kChunkSize);
315 330
316 // Allocate one chunk worth of memory. 331 // Allocate one chunk worth of memory.
317 int32 id1 = -1; 332 int32 id1 = -1;
318 unsigned int offset1 = 0xFFFFFFFFU; 333 unsigned int offset1 = 0xFFFFFFFFU;
319 void* mem1 = manager_->Alloc(kChunkSize, &id1, &offset1); 334 void* mem1 = manager_->Alloc(kChunkSize, &id1, &offset1);
320 ASSERT_TRUE(mem1); 335 ASSERT_TRUE(mem1);
321 EXPECT_NE(-1, id1); 336 EXPECT_NE(-1, id1);
322 EXPECT_EQ(0u, offset1); 337 EXPECT_EQ(0u, offset1);
323 338
324 // Allocate half a chunk worth of memory again. 339 // Allocate half a chunk worth of memory again.
325 // The same chunk will be used. 340 // The same chunk will be used.
326 int32 id2 = -1; 341 int32 id2 = -1;
327 unsigned int offset2 = 0xFFFFFFFFU; 342 unsigned int offset2 = 0xFFFFFFFFU;
328 void* mem2 = manager_->Alloc(kChunkSize, &id2, &offset2); 343 void* mem2 = manager_->Alloc(kChunkSize, &id2, &offset2);
329 ASSERT_TRUE(mem2); 344 ASSERT_TRUE(mem2);
330 EXPECT_NE(-1, id2); 345 EXPECT_NE(-1, id2);
331 EXPECT_EQ(0u, offset2); 346 EXPECT_EQ(0u, offset2);
332 347
333 // Expect two chunks to be allocated, exceeding the limit, 348 // Expect two chunks to be allocated, exceeding the limit,
334 // since all memory is in use. 349 // since all memory is in use.
335 EXPECT_EQ(2 * kChunkSize, manager_->allocated_memory()); 350 EXPECT_EQ(2 * kChunkSize, manager_->allocated_memory());
336 } 351 }
337 352
338 TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) { 353 TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) {
339 const unsigned int kSize = 1024; 354 const unsigned int kSize = 1024;
340 // Reset the manager with a memory limit. 355 // Reset the manager with a memory limit.
341 manager_.reset(new MappedMemoryManager(helper_.get(), kSize)); 356 manager_.reset(new MappedMemoryManager(
357 helper_.get(), base::Bind(&EmptyPoll), kSize));
342 const unsigned int kChunkSize = 2 * 1024; 358 const unsigned int kChunkSize = 2 * 1024;
343 manager_->set_chunk_size_multiple(kChunkSize); 359 manager_->set_chunk_size_multiple(kChunkSize);
344 360
345 // Allocate half a chunk worth of memory. 361 // Allocate half a chunk worth of memory.
346 int32 id1 = -1; 362 int32 id1 = -1;
347 unsigned int offset1 = 0xFFFFFFFFU; 363 unsigned int offset1 = 0xFFFFFFFFU;
348 void* mem1 = manager_->Alloc(kSize, &id1, &offset1); 364 void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
349 ASSERT_TRUE(mem1); 365 ASSERT_TRUE(mem1);
350 EXPECT_NE(-1, id1); 366 EXPECT_NE(-1, id1);
351 EXPECT_EQ(0u, offset1); 367 EXPECT_EQ(0u, offset1);
(...skipping 25 matching lines...) Expand all
377 void* mem3 = manager_->Alloc(kSize, &id3, &offset3); 393 void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
378 ASSERT_TRUE(mem3); 394 ASSERT_TRUE(mem3);
379 EXPECT_NE(-1, id3); 395 EXPECT_NE(-1, id3);
380 // It will reuse the space from the second allocation just freed. 396 // It will reuse the space from the second allocation just freed.
381 EXPECT_EQ(kSize, offset3); 397 EXPECT_EQ(kSize, offset3);
382 398
383 // Expect one chunk to be allocated 399 // Expect one chunk to be allocated
384 EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory()); 400 EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory());
385 } 401 }
386 402
403 namespace {
404 void Poll(MappedMemoryManagerTest *test, std::list<void*>* list) {
405 std::list<void*>::iterator it = list->begin();
406 while (it != list->end()) {
407 void* address = *it;
408 test->manager()->Free(address);
409 it = list->erase(it);
410 }
411 }
412 }
413
414 TEST_F(MappedMemoryManagerTest, Poll) {
415 std::list<void*> unmanaged_memory_list;
416
417 const unsigned int kSize = 1024;
418 // Reset the manager with a memory limit.
419 manager_.reset(new MappedMemoryManager(
420 helper_.get(),
421 base::Bind(&Poll, this, &unmanaged_memory_list),
422 kSize));
423
424 // Allocate kSize bytes. Don't add the address to
425 // the unmanaged memory list, so that it won't be free:ed just yet.
426 int32 id1;
427 unsigned int offset1;
428 void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
429 EXPECT_EQ(manager_->bytes_in_use(), kSize);
430
431 // Allocate kSize more bytes, and make sure we grew.
432 int32 id2;
433 unsigned int offset2;
434 void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
435 EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
436
437 // Make the unmanaged buffer be released next time FreeUnused() is called
438 // in MappedMemoryManager/FencedAllocator. This happens for example when
439 // allocating new memory.
440 unmanaged_memory_list.push_back(mem1);
441
442 // Allocate kSize more bytes. This should poll unmanaged memory, which now
443 // should free the previously allocated unmanaged memory.
444 int32 id3;
445 unsigned int offset3;
446 void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
447 EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
448
449 manager_->Free(mem2);
450 manager_->Free(mem3);
451 EXPECT_EQ(manager_->bytes_in_use(), static_cast<size_t>(0));
452 }
453
387 } // namespace gpu 454 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698