| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "gpu/command_buffer/client/mapped_memory.h" | 5 #include "gpu/command_buffer/client/mapped_memory.h" |
| 6 | 6 |
| 7 #include <stddef.h> |
| 8 #include <stdint.h> |
| 9 |
| 7 #include <list> | 10 #include <list> |
| 8 #include "base/bind.h" | 11 #include "base/bind.h" |
| 9 #include "base/memory/scoped_ptr.h" | 12 #include "base/memory/scoped_ptr.h" |
| 10 #include "base/message_loop/message_loop.h" | 13 #include "base/message_loop/message_loop.h" |
| 11 #include "gpu/command_buffer/client/cmd_buffer_helper.h" | 14 #include "gpu/command_buffer/client/cmd_buffer_helper.h" |
| 12 #include "gpu/command_buffer/service/command_buffer_service.h" | 15 #include "gpu/command_buffer/service/command_buffer_service.h" |
| 13 #include "gpu/command_buffer/service/gpu_scheduler.h" | 16 #include "gpu/command_buffer/service/gpu_scheduler.h" |
| 14 #include "gpu/command_buffer/service/mocks.h" | 17 #include "gpu/command_buffer/service/mocks.h" |
| 15 #include "gpu/command_buffer/service/transfer_buffer_manager.h" | 18 #include "gpu/command_buffer/service/transfer_buffer_manager.h" |
| 16 #include "testing/gtest/include/gtest/gtest.h" | 19 #include "testing/gtest/include/gtest/gtest.h" |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 56 &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get()))); | 59 &GpuScheduler::PutChanged, base::Unretained(gpu_scheduler_.get()))); |
| 57 command_buffer_->SetGetBufferChangeCallback(base::Bind( | 60 command_buffer_->SetGetBufferChangeCallback(base::Bind( |
| 58 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get()))); | 61 &GpuScheduler::SetGetBuffer, base::Unretained(gpu_scheduler_.get()))); |
| 59 | 62 |
| 60 api_mock_->set_engine(gpu_scheduler_.get()); | 63 api_mock_->set_engine(gpu_scheduler_.get()); |
| 61 | 64 |
| 62 helper_.reset(new CommandBufferHelper(command_buffer_.get())); | 65 helper_.reset(new CommandBufferHelper(command_buffer_.get())); |
| 63 helper_->Initialize(kBufferSize); | 66 helper_->Initialize(kBufferSize); |
| 64 } | 67 } |
| 65 | 68 |
| 66 int32 GetToken() { | 69 int32_t GetToken() { return command_buffer_->GetLastState().token; } |
| 67 return command_buffer_->GetLastState().token; | |
| 68 } | |
| 69 | 70 |
| 70 scoped_ptr<AsyncAPIMock> api_mock_; | 71 scoped_ptr<AsyncAPIMock> api_mock_; |
| 71 scoped_refptr<TransferBufferManagerInterface> transfer_buffer_manager_; | 72 scoped_refptr<TransferBufferManagerInterface> transfer_buffer_manager_; |
| 72 scoped_ptr<CommandBufferService> command_buffer_; | 73 scoped_ptr<CommandBufferService> command_buffer_; |
| 73 scoped_ptr<GpuScheduler> gpu_scheduler_; | 74 scoped_ptr<GpuScheduler> gpu_scheduler_; |
| 74 scoped_ptr<CommandBufferHelper> helper_; | 75 scoped_ptr<CommandBufferHelper> helper_; |
| 75 base::MessageLoop message_loop_; | 76 base::MessageLoop message_loop_; |
| 76 }; | 77 }; |
| 77 | 78 |
| 78 #ifndef _MSC_VER | 79 #ifndef _MSC_VER |
| 79 const unsigned int MappedMemoryTestBase::kBufferSize; | 80 const unsigned int MappedMemoryTestBase::kBufferSize; |
| 80 #endif | 81 #endif |
| 81 | 82 |
| 82 // Test fixture for MemoryChunk test - Creates a MemoryChunk, using a | 83 // Test fixture for MemoryChunk test - Creates a MemoryChunk, using a |
| 83 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling | 84 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling |
| 84 // it directly, not through the RPC mechanism), making sure Noops are ignored | 85 // it directly, not through the RPC mechanism), making sure Noops are ignored |
| 85 // and SetToken are properly forwarded to the engine. | 86 // and SetToken are properly forwarded to the engine. |
| 86 class MemoryChunkTest : public MappedMemoryTestBase { | 87 class MemoryChunkTest : public MappedMemoryTestBase { |
| 87 protected: | 88 protected: |
| 88 static const int32 kShmId = 123; | 89 static const int32_t kShmId = 123; |
| 89 void SetUp() override { | 90 void SetUp() override { |
| 90 MappedMemoryTestBase::SetUp(); | 91 MappedMemoryTestBase::SetUp(); |
| 91 scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory()); | 92 scoped_ptr<base::SharedMemory> shared_memory(new base::SharedMemory()); |
| 92 shared_memory->CreateAndMapAnonymous(kBufferSize); | 93 shared_memory->CreateAndMapAnonymous(kBufferSize); |
| 93 buffer_ = MakeBufferFromSharedMemory(std::move(shared_memory), kBufferSize); | 94 buffer_ = MakeBufferFromSharedMemory(std::move(shared_memory), kBufferSize); |
| 94 chunk_.reset(new MemoryChunk(kShmId, buffer_, helper_.get())); | 95 chunk_.reset(new MemoryChunk(kShmId, buffer_, helper_.get())); |
| 95 } | 96 } |
| 96 | 97 |
| 97 void TearDown() override { | 98 void TearDown() override { |
| 98 // If the GpuScheduler posts any tasks, this forces them to run. | 99 // If the GpuScheduler posts any tasks, this forces them to run. |
| 99 base::MessageLoop::current()->RunUntilIdle(); | 100 base::MessageLoop::current()->RunUntilIdle(); |
| 100 | 101 |
| 101 MappedMemoryTestBase::TearDown(); | 102 MappedMemoryTestBase::TearDown(); |
| 102 } | 103 } |
| 103 | 104 |
| 104 uint8* buffer_memory() { return static_cast<uint8*>(buffer_->memory()); } | 105 uint8_t* buffer_memory() { return static_cast<uint8_t*>(buffer_->memory()); } |
| 105 | 106 |
| 106 scoped_ptr<MemoryChunk> chunk_; | 107 scoped_ptr<MemoryChunk> chunk_; |
| 107 scoped_refptr<gpu::Buffer> buffer_; | 108 scoped_refptr<gpu::Buffer> buffer_; |
| 108 }; | 109 }; |
| 109 | 110 |
| 110 #ifndef _MSC_VER | 111 #ifndef _MSC_VER |
| 111 const int32 MemoryChunkTest::kShmId; | 112 const int32_t MemoryChunkTest::kShmId; |
| 112 #endif | 113 #endif |
| 113 | 114 |
| 114 TEST_F(MemoryChunkTest, Basic) { | 115 TEST_F(MemoryChunkTest, Basic) { |
| 115 const unsigned int kSize = 16; | 116 const unsigned int kSize = 16; |
| 116 EXPECT_EQ(kShmId, chunk_->shm_id()); | 117 EXPECT_EQ(kShmId, chunk_->shm_id()); |
| 117 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting()); | 118 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting()); |
| 118 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting()); | 119 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting()); |
| 119 EXPECT_EQ(kBufferSize, chunk_->GetSize()); | 120 EXPECT_EQ(kBufferSize, chunk_->GetSize()); |
| 120 void *pointer = chunk_->Alloc(kSize); | 121 void *pointer = chunk_->Alloc(kSize); |
| 121 ASSERT_TRUE(pointer); | 122 ASSERT_TRUE(pointer); |
| 122 EXPECT_LE(buffer_->memory(), static_cast<uint8*>(pointer)); | 123 EXPECT_LE(buffer_->memory(), static_cast<uint8_t*>(pointer)); |
| 123 EXPECT_GE(kBufferSize, | 124 EXPECT_GE(kBufferSize, |
| 124 static_cast<uint8*>(pointer) - buffer_memory() + kSize); | 125 static_cast<uint8_t*>(pointer) - buffer_memory() + kSize); |
| 125 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting()); | 126 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting()); |
| 126 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting()); | 127 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting()); |
| 127 EXPECT_EQ(kBufferSize, chunk_->GetSize()); | 128 EXPECT_EQ(kBufferSize, chunk_->GetSize()); |
| 128 | 129 |
| 129 chunk_->Free(pointer); | 130 chunk_->Free(pointer); |
| 130 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting()); | 131 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting()); |
| 131 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting()); | 132 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting()); |
| 132 | 133 |
| 133 uint8 *pointer_char = static_cast<uint8*>(chunk_->Alloc(kSize)); | 134 uint8_t* pointer_char = static_cast<uint8_t*>(chunk_->Alloc(kSize)); |
| 134 ASSERT_TRUE(pointer_char); | 135 ASSERT_TRUE(pointer_char); |
| 135 EXPECT_LE(buffer_memory(), pointer_char); | 136 EXPECT_LE(buffer_memory(), pointer_char); |
| 136 EXPECT_GE(buffer_memory() + kBufferSize, pointer_char + kSize); | 137 EXPECT_GE(buffer_memory() + kBufferSize, pointer_char + kSize); |
| 137 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting()); | 138 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting()); |
| 138 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting()); | 139 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting()); |
| 139 chunk_->Free(pointer_char); | 140 chunk_->Free(pointer_char); |
| 140 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting()); | 141 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting()); |
| 141 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting()); | 142 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting()); |
| 142 } | 143 } |
| 143 | 144 |
| (...skipping 16 matching lines...) Expand all Loading... |
| 160 manager_.reset(); | 161 manager_.reset(); |
| 161 MappedMemoryTestBase::TearDown(); | 162 MappedMemoryTestBase::TearDown(); |
| 162 } | 163 } |
| 163 | 164 |
| 164 scoped_ptr<MappedMemoryManager> manager_; | 165 scoped_ptr<MappedMemoryManager> manager_; |
| 165 }; | 166 }; |
| 166 | 167 |
| 167 TEST_F(MappedMemoryManagerTest, Basic) { | 168 TEST_F(MappedMemoryManagerTest, Basic) { |
| 168 const unsigned int kSize = 1024; | 169 const unsigned int kSize = 1024; |
| 169 // Check we can alloc. | 170 // Check we can alloc. |
| 170 int32 id1 = -1; | 171 int32_t id1 = -1; |
| 171 unsigned int offset1 = 0xFFFFFFFFU; | 172 unsigned int offset1 = 0xFFFFFFFFU; |
| 172 void* mem1 = manager_->Alloc(kSize, &id1, &offset1); | 173 void* mem1 = manager_->Alloc(kSize, &id1, &offset1); |
| 173 ASSERT_TRUE(mem1); | 174 ASSERT_TRUE(mem1); |
| 174 EXPECT_NE(-1, id1); | 175 EXPECT_NE(-1, id1); |
| 175 EXPECT_EQ(0u, offset1); | 176 EXPECT_EQ(0u, offset1); |
| 176 // Check if we free and realloc the same size we get the same memory | 177 // Check if we free and realloc the same size we get the same memory |
| 177 int32 id2 = -1; | 178 int32_t id2 = -1; |
| 178 unsigned int offset2 = 0xFFFFFFFFU; | 179 unsigned int offset2 = 0xFFFFFFFFU; |
| 179 manager_->Free(mem1); | 180 manager_->Free(mem1); |
| 180 void* mem2 = manager_->Alloc(kSize, &id2, &offset2); | 181 void* mem2 = manager_->Alloc(kSize, &id2, &offset2); |
| 181 EXPECT_EQ(mem1, mem2); | 182 EXPECT_EQ(mem1, mem2); |
| 182 EXPECT_EQ(id1, id2); | 183 EXPECT_EQ(id1, id2); |
| 183 EXPECT_EQ(offset1, offset2); | 184 EXPECT_EQ(offset1, offset2); |
| 184 // Check if we allocate again we get different shared memory | 185 // Check if we allocate again we get different shared memory |
| 185 int32 id3 = -1; | 186 int32_t id3 = -1; |
| 186 unsigned int offset3 = 0xFFFFFFFFU; | 187 unsigned int offset3 = 0xFFFFFFFFU; |
| 187 void* mem3 = manager_->Alloc(kSize, &id3, &offset3); | 188 void* mem3 = manager_->Alloc(kSize, &id3, &offset3); |
| 188 ASSERT_TRUE(mem3 != NULL); | 189 ASSERT_TRUE(mem3 != NULL); |
| 189 EXPECT_NE(mem2, mem3); | 190 EXPECT_NE(mem2, mem3); |
| 190 EXPECT_NE(id2, id3); | 191 EXPECT_NE(id2, id3); |
| 191 EXPECT_EQ(0u, offset3); | 192 EXPECT_EQ(0u, offset3); |
| 192 // Free 3 and allocate 2 half size blocks. | 193 // Free 3 and allocate 2 half size blocks. |
| 193 manager_->Free(mem3); | 194 manager_->Free(mem3); |
| 194 int32 id4 = -1; | 195 int32_t id4 = -1; |
| 195 int32 id5 = -1; | 196 int32_t id5 = -1; |
| 196 unsigned int offset4 = 0xFFFFFFFFU; | 197 unsigned int offset4 = 0xFFFFFFFFU; |
| 197 unsigned int offset5 = 0xFFFFFFFFU; | 198 unsigned int offset5 = 0xFFFFFFFFU; |
| 198 void* mem4 = manager_->Alloc(kSize / 2, &id4, &offset4); | 199 void* mem4 = manager_->Alloc(kSize / 2, &id4, &offset4); |
| 199 void* mem5 = manager_->Alloc(kSize / 2, &id5, &offset5); | 200 void* mem5 = manager_->Alloc(kSize / 2, &id5, &offset5); |
| 200 ASSERT_TRUE(mem4 != NULL); | 201 ASSERT_TRUE(mem4 != NULL); |
| 201 ASSERT_TRUE(mem5 != NULL); | 202 ASSERT_TRUE(mem5 != NULL); |
| 202 EXPECT_EQ(id3, id4); | 203 EXPECT_EQ(id3, id4); |
| 203 EXPECT_EQ(id4, id5); | 204 EXPECT_EQ(id4, id5); |
| 204 EXPECT_EQ(0u, offset4); | 205 EXPECT_EQ(0u, offset4); |
| 205 EXPECT_EQ(kSize / 2u, offset5); | 206 EXPECT_EQ(kSize / 2u, offset5); |
| 206 manager_->Free(mem4); | 207 manager_->Free(mem4); |
| 207 manager_->Free(mem2); | 208 manager_->Free(mem2); |
| 208 manager_->Free(mem5); | 209 manager_->Free(mem5); |
| 209 } | 210 } |
| 210 | 211 |
| 211 TEST_F(MappedMemoryManagerTest, FreePendingToken) { | 212 TEST_F(MappedMemoryManagerTest, FreePendingToken) { |
| 212 const unsigned int kSize = 128; | 213 const unsigned int kSize = 128; |
| 213 const unsigned int kAllocCount = (kBufferSize / kSize) * 2; | 214 const unsigned int kAllocCount = (kBufferSize / kSize) * 2; |
| 214 CHECK(kAllocCount * kSize == kBufferSize * 2); | 215 CHECK(kAllocCount * kSize == kBufferSize * 2); |
| 215 | 216 |
| 216 // Allocate several buffers across multiple chunks. | 217 // Allocate several buffers across multiple chunks. |
| 217 void *pointers[kAllocCount]; | 218 void *pointers[kAllocCount]; |
| 218 for (unsigned int i = 0; i < kAllocCount; ++i) { | 219 for (unsigned int i = 0; i < kAllocCount; ++i) { |
| 219 int32 id = -1; | 220 int32_t id = -1; |
| 220 unsigned int offset = 0xFFFFFFFFu; | 221 unsigned int offset = 0xFFFFFFFFu; |
| 221 pointers[i] = manager_->Alloc(kSize, &id, &offset); | 222 pointers[i] = manager_->Alloc(kSize, &id, &offset); |
| 222 EXPECT_TRUE(pointers[i]); | 223 EXPECT_TRUE(pointers[i]); |
| 223 EXPECT_NE(id, -1); | 224 EXPECT_NE(id, -1); |
| 224 EXPECT_NE(offset, 0xFFFFFFFFu); | 225 EXPECT_NE(offset, 0xFFFFFFFFu); |
| 225 } | 226 } |
| 226 | 227 |
| 227 // Free one successful allocation, pending fence. | 228 // Free one successful allocation, pending fence. |
| 228 int32 token = helper_.get()->InsertToken(); | 229 int32_t token = helper_.get()->InsertToken(); |
| 229 manager_->FreePendingToken(pointers[0], token); | 230 manager_->FreePendingToken(pointers[0], token); |
| 230 | 231 |
| 231 // The way we hooked up the helper and engine, it won't process commands | 232 // The way we hooked up the helper and engine, it won't process commands |
| 232 // until it has to wait for something. Which means the token shouldn't have | 233 // until it has to wait for something. Which means the token shouldn't have |
| 233 // passed yet at this point. | 234 // passed yet at this point. |
| 234 EXPECT_GT(token, GetToken()); | 235 EXPECT_GT(token, GetToken()); |
| 235 // Force it to read up to the token | 236 // Force it to read up to the token |
| 236 helper_->Finish(); | 237 helper_->Finish(); |
| 237 // Check that the token has indeed passed. | 238 // Check that the token has indeed passed. |
| 238 EXPECT_LE(token, GetToken()); | 239 EXPECT_LE(token, GetToken()); |
| 239 | 240 |
| 240 // This allocation should use the spot just freed above. | 241 // This allocation should use the spot just freed above. |
| 241 int32 new_id = -1; | 242 int32_t new_id = -1; |
| 242 unsigned int new_offset = 0xFFFFFFFFu; | 243 unsigned int new_offset = 0xFFFFFFFFu; |
| 243 void* new_ptr = manager_->Alloc(kSize, &new_id, &new_offset); | 244 void* new_ptr = manager_->Alloc(kSize, &new_id, &new_offset); |
| 244 EXPECT_TRUE(new_ptr); | 245 EXPECT_TRUE(new_ptr); |
| 245 EXPECT_EQ(new_ptr, pointers[0]); | 246 EXPECT_EQ(new_ptr, pointers[0]); |
| 246 EXPECT_NE(new_id, -1); | 247 EXPECT_NE(new_id, -1); |
| 247 EXPECT_NE(new_offset, 0xFFFFFFFFu); | 248 EXPECT_NE(new_offset, 0xFFFFFFFFu); |
| 248 | 249 |
| 249 // Free up everything. | 250 // Free up everything. |
| 250 manager_->Free(new_ptr); | 251 manager_->Free(new_ptr); |
| 251 for (unsigned int i = 1; i < kAllocCount; ++i) { | 252 for (unsigned int i = 1; i < kAllocCount; ++i) { |
| 252 manager_->Free(pointers[i]); | 253 manager_->Free(pointers[i]); |
| 253 } | 254 } |
| 254 } | 255 } |
| 255 | 256 |
| 256 TEST_F(MappedMemoryManagerTest, FreeUnused) { | 257 TEST_F(MappedMemoryManagerTest, FreeUnused) { |
| 257 int32 id = -1; | 258 int32_t id = -1; |
| 258 unsigned int offset = 0xFFFFFFFFU; | 259 unsigned int offset = 0xFFFFFFFFU; |
| 259 void* m1 = manager_->Alloc(kBufferSize, &id, &offset); | 260 void* m1 = manager_->Alloc(kBufferSize, &id, &offset); |
| 260 void* m2 = manager_->Alloc(kBufferSize, &id, &offset); | 261 void* m2 = manager_->Alloc(kBufferSize, &id, &offset); |
| 261 ASSERT_TRUE(m1 != NULL); | 262 ASSERT_TRUE(m1 != NULL); |
| 262 ASSERT_TRUE(m2 != NULL); | 263 ASSERT_TRUE(m2 != NULL); |
| 263 EXPECT_EQ(2u, manager_->num_chunks()); | 264 EXPECT_EQ(2u, manager_->num_chunks()); |
| 264 manager_->FreeUnused(); | 265 manager_->FreeUnused(); |
| 265 EXPECT_EQ(2u, manager_->num_chunks()); | 266 EXPECT_EQ(2u, manager_->num_chunks()); |
| 266 manager_->Free(m2); | 267 manager_->Free(m2); |
| 267 EXPECT_EQ(2u, manager_->num_chunks()); | 268 EXPECT_EQ(2u, manager_->num_chunks()); |
| 268 manager_->FreeUnused(); | 269 manager_->FreeUnused(); |
| 269 EXPECT_EQ(1u, manager_->num_chunks()); | 270 EXPECT_EQ(1u, manager_->num_chunks()); |
| 270 manager_->Free(m1); | 271 manager_->Free(m1); |
| 271 EXPECT_EQ(1u, manager_->num_chunks()); | 272 EXPECT_EQ(1u, manager_->num_chunks()); |
| 272 manager_->FreeUnused(); | 273 manager_->FreeUnused(); |
| 273 EXPECT_EQ(0u, manager_->num_chunks()); | 274 EXPECT_EQ(0u, manager_->num_chunks()); |
| 274 } | 275 } |
| 275 | 276 |
| 276 TEST_F(MappedMemoryManagerTest, ChunkSizeMultiple) { | 277 TEST_F(MappedMemoryManagerTest, ChunkSizeMultiple) { |
| 277 const unsigned int kSize = 1024; | 278 const unsigned int kSize = 1024; |
| 278 manager_->set_chunk_size_multiple(kSize * 2); | 279 manager_->set_chunk_size_multiple(kSize * 2); |
| 279 // Check if we allocate less than the chunk size multiple we get | 280 // Check if we allocate less than the chunk size multiple we get |
| 280 // chunks arounded up. | 281 // chunks arounded up. |
| 281 int32 id1 = -1; | 282 int32_t id1 = -1; |
| 282 unsigned int offset1 = 0xFFFFFFFFU; | 283 unsigned int offset1 = 0xFFFFFFFFU; |
| 283 void* mem1 = manager_->Alloc(kSize, &id1, &offset1); | 284 void* mem1 = manager_->Alloc(kSize, &id1, &offset1); |
| 284 int32 id2 = -1; | 285 int32_t id2 = -1; |
| 285 unsigned int offset2 = 0xFFFFFFFFU; | 286 unsigned int offset2 = 0xFFFFFFFFU; |
| 286 void* mem2 = manager_->Alloc(kSize, &id2, &offset2); | 287 void* mem2 = manager_->Alloc(kSize, &id2, &offset2); |
| 287 int32 id3 = -1; | 288 int32_t id3 = -1; |
| 288 unsigned int offset3 = 0xFFFFFFFFU; | 289 unsigned int offset3 = 0xFFFFFFFFU; |
| 289 void* mem3 = manager_->Alloc(kSize, &id3, &offset3); | 290 void* mem3 = manager_->Alloc(kSize, &id3, &offset3); |
| 290 ASSERT_TRUE(mem1); | 291 ASSERT_TRUE(mem1); |
| 291 ASSERT_TRUE(mem2); | 292 ASSERT_TRUE(mem2); |
| 292 ASSERT_TRUE(mem3); | 293 ASSERT_TRUE(mem3); |
| 293 EXPECT_NE(-1, id1); | 294 EXPECT_NE(-1, id1); |
| 294 EXPECT_EQ(id1, id2); | 295 EXPECT_EQ(id1, id2); |
| 295 EXPECT_NE(id2, id3); | 296 EXPECT_NE(id2, id3); |
| 296 EXPECT_EQ(0u, offset1); | 297 EXPECT_EQ(0u, offset1); |
| 297 EXPECT_EQ(kSize, offset2); | 298 EXPECT_EQ(kSize, offset2); |
| 298 EXPECT_EQ(0u, offset3); | 299 EXPECT_EQ(0u, offset3); |
| 299 | 300 |
| 300 manager_->Free(mem1); | 301 manager_->Free(mem1); |
| 301 manager_->Free(mem2); | 302 manager_->Free(mem2); |
| 302 manager_->Free(mem3); | 303 manager_->Free(mem3); |
| 303 } | 304 } |
| 304 | 305 |
| 305 TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) { | 306 TEST_F(MappedMemoryManagerTest, UnusedMemoryLimit) { |
| 306 const unsigned int kChunkSize = 2048; | 307 const unsigned int kChunkSize = 2048; |
| 307 // Reset the manager with a memory limit. | 308 // Reset the manager with a memory limit. |
| 308 manager_.reset(new MappedMemoryManager(helper_.get(), kChunkSize)); | 309 manager_.reset(new MappedMemoryManager(helper_.get(), kChunkSize)); |
| 309 manager_->set_chunk_size_multiple(kChunkSize); | 310 manager_->set_chunk_size_multiple(kChunkSize); |
| 310 | 311 |
| 311 // Allocate one chunk worth of memory. | 312 // Allocate one chunk worth of memory. |
| 312 int32 id1 = -1; | 313 int32_t id1 = -1; |
| 313 unsigned int offset1 = 0xFFFFFFFFU; | 314 unsigned int offset1 = 0xFFFFFFFFU; |
| 314 void* mem1 = manager_->Alloc(kChunkSize, &id1, &offset1); | 315 void* mem1 = manager_->Alloc(kChunkSize, &id1, &offset1); |
| 315 ASSERT_TRUE(mem1); | 316 ASSERT_TRUE(mem1); |
| 316 EXPECT_NE(-1, id1); | 317 EXPECT_NE(-1, id1); |
| 317 EXPECT_EQ(0u, offset1); | 318 EXPECT_EQ(0u, offset1); |
| 318 | 319 |
| 319 // Allocate half a chunk worth of memory again. | 320 // Allocate half a chunk worth of memory again. |
| 320 // The same chunk will be used. | 321 // The same chunk will be used. |
| 321 int32 id2 = -1; | 322 int32_t id2 = -1; |
| 322 unsigned int offset2 = 0xFFFFFFFFU; | 323 unsigned int offset2 = 0xFFFFFFFFU; |
| 323 void* mem2 = manager_->Alloc(kChunkSize, &id2, &offset2); | 324 void* mem2 = manager_->Alloc(kChunkSize, &id2, &offset2); |
| 324 ASSERT_TRUE(mem2); | 325 ASSERT_TRUE(mem2); |
| 325 EXPECT_NE(-1, id2); | 326 EXPECT_NE(-1, id2); |
| 326 EXPECT_EQ(0u, offset2); | 327 EXPECT_EQ(0u, offset2); |
| 327 | 328 |
| 328 // Expect two chunks to be allocated, exceeding the limit, | 329 // Expect two chunks to be allocated, exceeding the limit, |
| 329 // since all memory is in use. | 330 // since all memory is in use. |
| 330 EXPECT_EQ(2 * kChunkSize, manager_->allocated_memory()); | 331 EXPECT_EQ(2 * kChunkSize, manager_->allocated_memory()); |
| 331 | 332 |
| 332 manager_->Free(mem1); | 333 manager_->Free(mem1); |
| 333 manager_->Free(mem2); | 334 manager_->Free(mem2); |
| 334 } | 335 } |
| 335 | 336 |
| 336 TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) { | 337 TEST_F(MappedMemoryManagerTest, MemoryLimitWithReuse) { |
| 337 const unsigned int kSize = 1024; | 338 const unsigned int kSize = 1024; |
| 338 // Reset the manager with a memory limit. | 339 // Reset the manager with a memory limit. |
| 339 manager_.reset(new MappedMemoryManager(helper_.get(), kSize)); | 340 manager_.reset(new MappedMemoryManager(helper_.get(), kSize)); |
| 340 const unsigned int kChunkSize = 2 * 1024; | 341 const unsigned int kChunkSize = 2 * 1024; |
| 341 manager_->set_chunk_size_multiple(kChunkSize); | 342 manager_->set_chunk_size_multiple(kChunkSize); |
| 342 | 343 |
| 343 // Allocate half a chunk worth of memory. | 344 // Allocate half a chunk worth of memory. |
| 344 int32 id1 = -1; | 345 int32_t id1 = -1; |
| 345 unsigned int offset1 = 0xFFFFFFFFU; | 346 unsigned int offset1 = 0xFFFFFFFFU; |
| 346 void* mem1 = manager_->Alloc(kSize, &id1, &offset1); | 347 void* mem1 = manager_->Alloc(kSize, &id1, &offset1); |
| 347 ASSERT_TRUE(mem1); | 348 ASSERT_TRUE(mem1); |
| 348 EXPECT_NE(-1, id1); | 349 EXPECT_NE(-1, id1); |
| 349 EXPECT_EQ(0u, offset1); | 350 EXPECT_EQ(0u, offset1); |
| 350 | 351 |
| 351 // Allocate half a chunk worth of memory again. | 352 // Allocate half a chunk worth of memory again. |
| 352 // The same chunk will be used. | 353 // The same chunk will be used. |
| 353 int32 id2 = -1; | 354 int32_t id2 = -1; |
| 354 unsigned int offset2 = 0xFFFFFFFFU; | 355 unsigned int offset2 = 0xFFFFFFFFU; |
| 355 void* mem2 = manager_->Alloc(kSize, &id2, &offset2); | 356 void* mem2 = manager_->Alloc(kSize, &id2, &offset2); |
| 356 ASSERT_TRUE(mem2); | 357 ASSERT_TRUE(mem2); |
| 357 EXPECT_NE(-1, id2); | 358 EXPECT_NE(-1, id2); |
| 358 EXPECT_EQ(kSize, offset2); | 359 EXPECT_EQ(kSize, offset2); |
| 359 | 360 |
| 360 // Free one successful allocation, pending fence. | 361 // Free one successful allocation, pending fence. |
| 361 int32 token = helper_.get()->InsertToken(); | 362 int32_t token = helper_.get()->InsertToken(); |
| 362 manager_->FreePendingToken(mem2, token); | 363 manager_->FreePendingToken(mem2, token); |
| 363 | 364 |
| 364 // The way we hooked up the helper and engine, it won't process commands | 365 // The way we hooked up the helper and engine, it won't process commands |
| 365 // until it has to wait for something. Which means the token shouldn't have | 366 // until it has to wait for something. Which means the token shouldn't have |
| 366 // passed yet at this point. | 367 // passed yet at this point. |
| 367 EXPECT_GT(token, GetToken()); | 368 EXPECT_GT(token, GetToken()); |
| 368 | 369 |
| 369 // Since we didn't call helper_.finish() the token did not pass. | 370 // Since we didn't call helper_.finish() the token did not pass. |
| 370 // We won't be able to claim the free memory without waiting and | 371 // We won't be able to claim the free memory without waiting and |
| 371 // as we've already met the memory limit we'll have to wait | 372 // as we've already met the memory limit we'll have to wait |
| 372 // on the token. | 373 // on the token. |
| 373 int32 id3 = -1; | 374 int32_t id3 = -1; |
| 374 unsigned int offset3 = 0xFFFFFFFFU; | 375 unsigned int offset3 = 0xFFFFFFFFU; |
| 375 void* mem3 = manager_->Alloc(kSize, &id3, &offset3); | 376 void* mem3 = manager_->Alloc(kSize, &id3, &offset3); |
| 376 ASSERT_TRUE(mem3); | 377 ASSERT_TRUE(mem3); |
| 377 EXPECT_NE(-1, id3); | 378 EXPECT_NE(-1, id3); |
| 378 // It will reuse the space from the second allocation just freed. | 379 // It will reuse the space from the second allocation just freed. |
| 379 EXPECT_EQ(kSize, offset3); | 380 EXPECT_EQ(kSize, offset3); |
| 380 | 381 |
| 381 // Expect one chunk to be allocated | 382 // Expect one chunk to be allocated |
| 382 EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory()); | 383 EXPECT_EQ(1 * kChunkSize, manager_->allocated_memory()); |
| 383 | 384 |
| 384 manager_->Free(mem1); | 385 manager_->Free(mem1); |
| 385 manager_->Free(mem3); | 386 manager_->Free(mem3); |
| 386 } | 387 } |
| 387 | 388 |
| 388 TEST_F(MappedMemoryManagerTest, MaxAllocationTest) { | 389 TEST_F(MappedMemoryManagerTest, MaxAllocationTest) { |
| 389 const unsigned int kSize = 1024; | 390 const unsigned int kSize = 1024; |
| 390 // Reset the manager with a memory limit. | 391 // Reset the manager with a memory limit. |
| 391 manager_.reset(new MappedMemoryManager(helper_.get(), kSize)); | 392 manager_.reset(new MappedMemoryManager(helper_.get(), kSize)); |
| 392 | 393 |
| 393 const size_t kLimit = 512; | 394 const size_t kLimit = 512; |
| 394 manager_->set_chunk_size_multiple(kLimit); | 395 manager_->set_chunk_size_multiple(kLimit); |
| 395 | 396 |
| 396 // Allocate twice the limit worth of memory (currently unbounded). | 397 // Allocate twice the limit worth of memory (currently unbounded). |
| 397 int32 id1 = -1; | 398 int32_t id1 = -1; |
| 398 unsigned int offset1 = 0xFFFFFFFFU; | 399 unsigned int offset1 = 0xFFFFFFFFU; |
| 399 void* mem1 = manager_->Alloc(kLimit, &id1, &offset1); | 400 void* mem1 = manager_->Alloc(kLimit, &id1, &offset1); |
| 400 ASSERT_TRUE(mem1); | 401 ASSERT_TRUE(mem1); |
| 401 EXPECT_NE(-1, id1); | 402 EXPECT_NE(-1, id1); |
| 402 EXPECT_EQ(0u, offset1); | 403 EXPECT_EQ(0u, offset1); |
| 403 | 404 |
| 404 int32 id2 = -1; | 405 int32_t id2 = -1; |
| 405 unsigned int offset2 = 0xFFFFFFFFU; | 406 unsigned int offset2 = 0xFFFFFFFFU; |
| 406 void* mem2 = manager_->Alloc(kLimit, &id2, &offset2); | 407 void* mem2 = manager_->Alloc(kLimit, &id2, &offset2); |
| 407 ASSERT_TRUE(mem2); | 408 ASSERT_TRUE(mem2); |
| 408 EXPECT_NE(-1, id2); | 409 EXPECT_NE(-1, id2); |
| 409 EXPECT_EQ(0u, offset2); | 410 EXPECT_EQ(0u, offset2); |
| 410 | 411 |
| 411 manager_->set_max_allocated_bytes(kLimit); | 412 manager_->set_max_allocated_bytes(kLimit); |
| 412 | 413 |
| 413 // A new allocation should now fail. | 414 // A new allocation should now fail. |
| 414 int32 id3 = -1; | 415 int32_t id3 = -1; |
| 415 unsigned int offset3 = 0xFFFFFFFFU; | 416 unsigned int offset3 = 0xFFFFFFFFU; |
| 416 void* mem3 = manager_->Alloc(kLimit, &id3, &offset3); | 417 void* mem3 = manager_->Alloc(kLimit, &id3, &offset3); |
| 417 ASSERT_FALSE(mem3); | 418 ASSERT_FALSE(mem3); |
| 418 EXPECT_EQ(-1, id3); | 419 EXPECT_EQ(-1, id3); |
| 419 EXPECT_EQ(0xFFFFFFFFU, offset3); | 420 EXPECT_EQ(0xFFFFFFFFU, offset3); |
| 420 | 421 |
| 421 manager_->Free(mem2); | 422 manager_->Free(mem2); |
| 422 | 423 |
| 423 // New allocation is over the limit but should reuse allocated space | 424 // New allocation is over the limit but should reuse allocated space |
| 424 int32 id4 = -1; | 425 int32_t id4 = -1; |
| 425 unsigned int offset4 = 0xFFFFFFFFU; | 426 unsigned int offset4 = 0xFFFFFFFFU; |
| 426 void* mem4 = manager_->Alloc(kLimit, &id4, &offset4); | 427 void* mem4 = manager_->Alloc(kLimit, &id4, &offset4); |
| 427 ASSERT_TRUE(mem4); | 428 ASSERT_TRUE(mem4); |
| 428 EXPECT_EQ(id2, id4); | 429 EXPECT_EQ(id2, id4); |
| 429 EXPECT_EQ(offset2, offset4); | 430 EXPECT_EQ(offset2, offset4); |
| 430 | 431 |
| 431 manager_->Free(mem1); | 432 manager_->Free(mem1); |
| 432 manager_->Free(mem4); | 433 manager_->Free(mem4); |
| 433 } | 434 } |
| 434 | 435 |
| 435 } // namespace gpu | 436 } // namespace gpu |
| OLD | NEW |