Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(638)

Side by Side Diff: gpu/command_buffer/client/mapped_memory_unittest.cc

Issue 1186393004: gpu: Remove async texture uploads. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: rebase Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « gpu/command_buffer/client/mapped_memory.cc ('k') | gpu/command_buffer/client/query_tracker.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/command_buffer/client/mapped_memory.h" 5 #include "gpu/command_buffer/client/mapped_memory.h"
6 6
7 #include <list> 7 #include <list>
8 #include "base/bind.h" 8 #include "base/bind.h"
9 #include "base/memory/scoped_ptr.h" 9 #include "base/memory/scoped_ptr.h"
10 #include "base/message_loop/message_loop.h" 10 #include "base/message_loop/message_loop.h"
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
71 scoped_refptr<TransferBufferManagerInterface> transfer_buffer_manager_; 71 scoped_refptr<TransferBufferManagerInterface> transfer_buffer_manager_;
72 scoped_ptr<CommandBufferService> command_buffer_; 72 scoped_ptr<CommandBufferService> command_buffer_;
73 scoped_ptr<GpuScheduler> gpu_scheduler_; 73 scoped_ptr<GpuScheduler> gpu_scheduler_;
74 scoped_ptr<CommandBufferHelper> helper_; 74 scoped_ptr<CommandBufferHelper> helper_;
75 }; 75 };
76 76
77 #ifndef _MSC_VER 77 #ifndef _MSC_VER
78 const unsigned int MappedMemoryTestBase::kBufferSize; 78 const unsigned int MappedMemoryTestBase::kBufferSize;
79 #endif 79 #endif
80 80
81 namespace {
82 void EmptyPoll() {
83 }
84 }
85
86 // Test fixture for MemoryChunk test - Creates a MemoryChunk, using a 81 // Test fixture for MemoryChunk test - Creates a MemoryChunk, using a
87 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling 82 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
88 // it directly, not through the RPC mechanism), making sure Noops are ignored 83 // it directly, not through the RPC mechanism), making sure Noops are ignored
89 // and SetToken are properly forwarded to the engine. 84 // and SetToken are properly forwarded to the engine.
90 class MemoryChunkTest : public MappedMemoryTestBase { 85 class MemoryChunkTest : public MappedMemoryTestBase {
91 protected: 86 protected:
92 static const int32 kShmId = 123; 87 static const int32 kShmId = 123;
93 void SetUp() override { 88 void SetUp() override {
94 MappedMemoryTestBase::SetUp(); 89 MappedMemoryTestBase::SetUp();
95 scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory()); 90 scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory());
96 shared_memory->CreateAndMapAnonymous(kBufferSize); 91 shared_memory->CreateAndMapAnonymous(kBufferSize);
97 buffer_ = MakeBufferFromSharedMemory(shared_memory.Pass(), kBufferSize); 92 buffer_ = MakeBufferFromSharedMemory(shared_memory.Pass(), kBufferSize);
98 chunk_.reset(new MemoryChunk(kShmId, 93 chunk_.reset(new MemoryChunk(kShmId, buffer_, helper_.get()));
99 buffer_,
100 helper_.get(),
101 base::Bind(&EmptyPoll)));
102 } 94 }
103 95
104 void TearDown() override { 96 void TearDown() override {
105 // If the GpuScheduler posts any tasks, this forces them to run. 97 // If the GpuScheduler posts any tasks, this forces them to run.
106 base::MessageLoop::current()->RunUntilIdle(); 98 base::MessageLoop::current()->RunUntilIdle();
107 99
108 MappedMemoryTestBase::TearDown(); 100 MappedMemoryTestBase::TearDown();
109 } 101 }
110 102
111 uint8* buffer_memory() { return static_cast<uint8*>(buffer_->memory()); } 103 uint8* buffer_memory() { return static_cast<uint8*>(buffer_->memory()); }
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
150 142
151 class MappedMemoryManagerTest : public MappedMemoryTestBase { 143 class MappedMemoryManagerTest : public MappedMemoryTestBase {
152 public: 144 public:
153 MappedMemoryManager* manager() const { 145 MappedMemoryManager* manager() const {
154 return manager_.get(); 146 return manager_.get();
155 } 147 }
156 148
157 protected: 149 protected:
158 void SetUp() override { 150 void SetUp() override {
159 MappedMemoryTestBase::SetUp(); 151 MappedMemoryTestBase::SetUp();
160 manager_.reset(new MappedMemoryManager( 152 manager_.reset(
161 helper_.get(), base::Bind(&EmptyPoll), MappedMemoryManager::kNoLimit)); 153 new MappedMemoryManager(helper_.get(), MappedMemoryManager::kNoLimit));
162 } 154 }
163 155
164 void TearDown() override { 156 void TearDown() override {
165 // If the GpuScheduler posts any tasks, this forces them to run. 157 // If the GpuScheduler posts any tasks, this forces them to run.
166 base::MessageLoop::current()->RunUntilIdle(); 158 base::MessageLoop::current()->RunUntilIdle();
167 manager_.reset(); 159 manager_.reset();
168 MappedMemoryTestBase::TearDown(); 160 MappedMemoryTestBase::TearDown();
169 } 161 }
170 162
171 scoped_ptr<MappedMemoryManager> manager_; 163 scoped_ptr<MappedMemoryManager> manager_;
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
305 EXPECT_EQ(0u, offset3); 297 EXPECT_EQ(0u, offset3);
306 298
307 manager_->Free(mem1); 299 manager_->Free(mem1);
308 manager_->Free(mem2); 300 manager_->Free(mem2);
309 manager_->Free(mem3); 301 manager_->Free(mem3);
310 } 302 }
311 303
312 TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) { 304 TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) {
313 const unsigned int kChunkSize = 2048; 305 const unsigned int kChunkSize = 2048;
314 // Reset the manager with a memory limit. 306 // Reset the manager with a memory limit.
315 manager_.reset(new MappedMemoryManager( 307 manager_.reset(new MappedMemoryManager(helper_.get(), kChunkSize));
316 helper_.get(), base::Bind(&EmptyPoll), kChunkSize));
317 manager_->set_chunk_size_multiple(kChunkSize); 308 manager_->set_chunk_size_multiple(kChunkSize);
318 309
319 // Allocate one chunk worth of memory. 310 // Allocate one chunk worth of memory.
320 int32 id1 = -1; 311 int32 id1 = -1;
321 unsigned int offset1 = 0xFFFFFFFFU; 312 unsigned int offset1 = 0xFFFFFFFFU;
322 void* mem1 = manager_->Alloc(kChunkSize, &id1, &offset1); 313 void* mem1 = manager_->Alloc(kChunkSize, &id1, &offset1);
323 ASSERT_TRUE(mem1); 314 ASSERT_TRUE(mem1);
324 EXPECT_NE(-1, id1); 315 EXPECT_NE(-1, id1);
325 EXPECT_EQ(0u, offset1); 316 EXPECT_EQ(0u, offset1);
326 317
(...skipping 10 matching lines...) Expand all
337 // since all memory is in use. 328 // since all memory is in use.
338 EXPECT_EQ(2 * kChunkSize, manager_->allocated_memory()); 329 EXPECT_EQ(2 * kChunkSize, manager_->allocated_memory());
339 330
340 manager_->Free(mem1); 331 manager_->Free(mem1);
341 manager_->Free(mem2); 332 manager_->Free(mem2);
342 } 333 }
343 334
344 TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) { 335 TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) {
345 const unsigned int kSize = 1024; 336 const unsigned int kSize = 1024;
346 // Reset the manager with a memory limit. 337 // Reset the manager with a memory limit.
347 manager_.reset(new MappedMemoryManager( 338 manager_.reset(new MappedMemoryManager(helper_.get(), kSize));
348 helper_.get(), base::Bind(&EmptyPoll), kSize));
349 const unsigned int kChunkSize = 2 * 1024; 339 const unsigned int kChunkSize = 2 * 1024;
350 manager_->set_chunk_size_multiple(kChunkSize); 340 manager_->set_chunk_size_multiple(kChunkSize);
351 341
352 // Allocate half a chunk worth of memory. 342 // Allocate half a chunk worth of memory.
353 int32 id1 = -1; 343 int32 id1 = -1;
354 unsigned int offset1 = 0xFFFFFFFFU; 344 unsigned int offset1 = 0xFFFFFFFFU;
355 void* mem1 = manager_->Alloc(kSize, &id1, &offset1); 345 void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
356 ASSERT_TRUE(mem1); 346 ASSERT_TRUE(mem1);
357 EXPECT_NE(-1, id1); 347 EXPECT_NE(-1, id1);
358 EXPECT_EQ(0u, offset1); 348 EXPECT_EQ(0u, offset1);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
390 // Expect one chunk to be allocated 380 // Expect one chunk to be allocated
391 EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory()); 381 EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory());
392 382
393 manager_->Free(mem1); 383 manager_->Free(mem1);
394 manager_->Free(mem3); 384 manager_->Free(mem3);
395 } 385 }
396 386
397 TEST_F(MappedMemoryManagerTest, MaxAllocationTest) { 387 TEST_F(MappedMemoryManagerTest, MaxAllocationTest) {
398 const unsigned int kSize = 1024; 388 const unsigned int kSize = 1024;
399 // Reset the manager with a memory limit. 389 // Reset the manager with a memory limit.
400 manager_.reset(new MappedMemoryManager( 390 manager_.reset(new MappedMemoryManager(helper_.get(), kSize));
401 helper_.get(), base::Bind(&EmptyPoll), kSize));
402 391
403 const size_t kLimit = 512; 392 const size_t kLimit = 512;
404 manager_->set_chunk_size_multiple(kLimit); 393 manager_->set_chunk_size_multiple(kLimit);
405 394
406 // Allocate twice the limit worth of memory (currently unbounded). 395 // Allocate twice the limit worth of memory (currently unbounded).
407 int32 id1 = -1; 396 int32 id1 = -1;
408 unsigned int offset1 = 0xFFFFFFFFU; 397 unsigned int offset1 = 0xFFFFFFFFU;
409 void* mem1 = manager_->Alloc(kLimit, &id1, &offset1); 398 void* mem1 = manager_->Alloc(kLimit, &id1, &offset1);
410 ASSERT_TRUE(mem1); 399 ASSERT_TRUE(mem1);
411 EXPECT_NE(-1, id1); 400 EXPECT_NE(-1, id1);
(...skipping 23 matching lines...) Expand all
435 unsigned int offset4 = 0xFFFFFFFFU; 424 unsigned int offset4 = 0xFFFFFFFFU;
436 void* mem4 = manager_->Alloc(kLimit, &id4, &offset4); 425 void* mem4 = manager_->Alloc(kLimit, &id4, &offset4);
437 ASSERT_TRUE(mem4); 426 ASSERT_TRUE(mem4);
438 EXPECT_EQ(id2, id4); 427 EXPECT_EQ(id2, id4);
439 EXPECT_EQ(offset2, offset4); 428 EXPECT_EQ(offset2, offset4);
440 429
441 manager_->Free(mem1); 430 manager_->Free(mem1);
442 manager_->Free(mem4); 431 manager_->Free(mem4);
443 } 432 }
444 433
445 namespace {
446 void Poll(MappedMemoryManagerTest *test, std::list<void*>* list) {
447 std::list<void*>::iterator it = list->begin();
448 while (it != list->end()) {
449 void* address = *it;
450 test->manager()->Free(address);
451 it = list->erase(it);
452 }
453 }
454 }
455
456 TEST_F(MappedMemoryManagerTest, Poll) {
457 std::list<void*> unmanaged_memory_list;
458
459 const unsigned int kSize = 1024;
460 // Reset the manager with a memory limit.
461 manager_.reset(new MappedMemoryManager(
462 helper_.get(),
463 base::Bind(&Poll, this, &unmanaged_memory_list),
464 kSize));
465
466 // Allocate kSize bytes. Don't add the address to
467 // the unmanaged memory list, so that it won't be free:ed just yet.
468 int32 id1;
469 unsigned int offset1;
470 void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
471 EXPECT_EQ(manager_->bytes_in_use(), kSize);
472
473 // Allocate kSize more bytes, and make sure we grew.
474 int32 id2;
475 unsigned int offset2;
476 void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
477 EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
478
479 // Make the unmanaged buffer be released next time FreeUnused() is called
480 // in MappedMemoryManager/FencedAllocator. This happens for example when
481 // allocating new memory.
482 unmanaged_memory_list.push_back(mem1);
483
484 // Allocate kSize more bytes. This should poll unmanaged memory, which now
485 // should free the previously allocated unmanaged memory.
486 int32 id3;
487 unsigned int offset3;
488 void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
489 EXPECT_EQ(manager_->bytes_in_use(), kSize * 2);
490
491 manager_->Free(mem2);
492 manager_->Free(mem3);
493 EXPECT_EQ(manager_->bytes_in_use(), static_cast<size_t>(0));
494 }
495
496 } // namespace gpu 434 } // namespace gpu
OLDNEW
« no previous file with comments | « gpu/command_buffer/client/mapped_memory.cc ('k') | gpu/command_buffer/client/query_tracker.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698