Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(762)

Side by Side Diff: gpu/command_buffer/client/mapped_memory_unittest.cc

Issue 2956005: Adds MapBufferSubData and MapTexSubImage2D.... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: '' Created 10 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « gpu/command_buffer/client/mapped_memory.cc ('k') | gpu/gpu.gyp » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Property Changes:
Added: svn:eol-style
+ LF
OLDNEW
(Empty)
1 // Copyright (c) 2009 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "gpu/command_buffer/client/mapped_memory.h"
6 #include "base/callback.h"
7 #include "base/message_loop.h"
8 #include "base/scoped_nsautorelease_pool.h"
9 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
10 #include "gpu/command_buffer/service/mocks.h"
11 #include "gpu/command_buffer/service/command_buffer_service.h"
12 #include "gpu/command_buffer/service/gpu_processor.h"
13 #include "testing/gtest/include/gtest/gtest.h"
14
15 namespace gpu {
16
17 using testing::Return;
18 using testing::Mock;
19 using testing::Truly;
20 using testing::Sequence;
21 using testing::DoAll;
22 using testing::Invoke;
23 using testing::_;
24
25 class MappedMemoryTestBase : public testing::Test {
26 protected:
27 static const unsigned int kBufferSize = 1024;
28
29 virtual void SetUp() {
30 api_mock_.reset(new AsyncAPIMock);
31 // ignore noops in the mock - we don't want to inspect the internals of the
32 // helper.
33 EXPECT_CALL(*api_mock_, DoCommand(cmd::kNoop, 0, _))
34 .WillRepeatedly(Return(error::kNoError));
35 // Forward the SetToken calls to the engine
36 EXPECT_CALL(*api_mock_.get(), DoCommand(cmd::kSetToken, 1, _))
37 .WillRepeatedly(DoAll(Invoke(api_mock_.get(), &AsyncAPIMock::SetToken),
38 Return(error::kNoError)));
39
40 command_buffer_.reset(new CommandBufferService);
41 command_buffer_->Initialize(kBufferSize);
42 Buffer ring_buffer = command_buffer_->GetRingBuffer();
43
44 parser_ = new CommandParser(ring_buffer.ptr,
45 ring_buffer.size,
46 0,
47 ring_buffer.size,
48 0,
49 api_mock_.get());
50
51 gpu_processor_.reset(new GPUProcessor(
52 command_buffer_.get(), NULL, parser_, INT_MAX));
53 command_buffer_->SetPutOffsetChangeCallback(NewCallback(
54 gpu_processor_.get(), &GPUProcessor::ProcessCommands));
55
56 api_mock_->set_engine(gpu_processor_.get());
57
58 helper_.reset(new CommandBufferHelper(command_buffer_.get()));
59 helper_->Initialize(kBufferSize);
60 }
61
62 int32 GetToken() {
63 return command_buffer_->GetState().token;
64 }
65
66 base::ScopedNSAutoreleasePool autorelease_pool_;
67 MessageLoop message_loop_;
68 scoped_ptr<AsyncAPIMock> api_mock_;
69 scoped_ptr<CommandBufferService> command_buffer_;
70 scoped_ptr<GPUProcessor> gpu_processor_;
71 CommandParser* parser_;
72 scoped_ptr<CommandBufferHelper> helper_;
73 };
74
75 #ifndef _MSC_VER
76 const unsigned int MappedMemoryTestBase::kBufferSize;
77 #endif
78
79 // Test fixture for MemoryChunk test - Creates a MemoryChunk, using a
80 // CommandBufferHelper with a mock AsyncAPIInterface for its interface (calling
81 // it directly, not through the RPC mechanism), making sure Noops are ignored
82 // and SetToken are properly forwarded to the engine.
83 class MemoryChunkTest : public MappedMemoryTestBase {
84 protected:
85 static const int32 kShmId = 123;
86 virtual void SetUp() {
87 MappedMemoryTestBase::SetUp();
88 buffer_.reset(new uint8[kBufferSize]);
89 gpu::Buffer buf;
90 buf.size = kBufferSize;
91 buf.ptr = buffer_.get();
92 chunk_ = new MemoryChunk(kShmId, buf, helper_.get());
93 }
94
95 virtual void TearDown() {
96 // If the GPUProcessor posts any tasks, this forces them to run.
97 MessageLoop::current()->RunAllPending();
98
99 MappedMemoryTestBase::TearDown();
100 }
101
102 MemoryChunk::Ref chunk_;
103 scoped_array<uint8> buffer_;
104 };
105
106 #ifndef _MSC_VER
107 const int32 MemoryChunkTest::kShmId;
108 #endif
109
110 TEST_F(MemoryChunkTest, Basic) {
111 const unsigned int kSize = 16;
112 EXPECT_EQ(kShmId, chunk_->shm_id());
113 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
114 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
115 EXPECT_EQ(kBufferSize, chunk_->GetSize());
116 void *pointer = chunk_->Alloc(kSize);
117 ASSERT_TRUE(pointer);
118 EXPECT_LE(buffer_.get(), static_cast<uint8 *>(pointer));
119 EXPECT_GE(kBufferSize, static_cast<uint8 *>(pointer) - buffer_.get() + kSize);
120 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
121 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
122 EXPECT_EQ(kBufferSize, chunk_->GetSize());
123
124 chunk_->Free(pointer);
125 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
126 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
127
128 uint8 *pointer_char = static_cast<uint8*>(chunk_->Alloc(kSize));
129 ASSERT_TRUE(pointer_char);
130 EXPECT_LE(buffer_.get(), pointer_char);
131 EXPECT_GE(buffer_.get() + kBufferSize, pointer_char + kSize);
132 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithoutWaiting());
133 EXPECT_EQ(kBufferSize - kSize, chunk_->GetLargestFreeSizeWithWaiting());
134 chunk_->Free(pointer_char);
135 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithoutWaiting());
136 EXPECT_EQ(kBufferSize, chunk_->GetLargestFreeSizeWithWaiting());
137 }
138
139 class MappedMemoryManagerTest : public MappedMemoryTestBase {
140 protected:
141 virtual void SetUp() {
142 MappedMemoryTestBase::SetUp();
143 manager_.reset(new MappedMemoryManager(helper_.get()));
144 }
145
146 virtual void TearDown() {
147 // If the GPUProcessor posts any tasks, this forces them to run.
148 MessageLoop::current()->RunAllPending();
149 manager_.reset();
150 MappedMemoryTestBase::TearDown();
151 }
152
153 scoped_ptr<MappedMemoryManager> manager_;
154 };
155
156 TEST_F(MappedMemoryManagerTest, Basic) {
157 const unsigned int kSize = 1024;
158 // Check we can alloc.
159 int32 id1 = -1;
160 unsigned int offset1 = 0xFFFFFFFFU;
161 void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
162 ASSERT_TRUE(mem1);
163 EXPECT_NE(-1, id1);
164 EXPECT_EQ(0u, offset1);
165 // Check if we free and realloc the same size we get the same memory
166 int32 id2 = -1;
167 unsigned int offset2 = 0xFFFFFFFFU;
168 manager_->Free(mem1);
169 void* mem2 = manager_->Alloc(kSize, &id2, &offset2);
170 EXPECT_EQ(mem1, mem2);
171 EXPECT_EQ(id1, id2);
172 EXPECT_EQ(offset1, offset2);
173 // Check if we allocate again we get different shared memory
174 int32 id3 = -1;
175 unsigned int offset3 = 0xFFFFFFFFU;
176 void* mem3 = manager_->Alloc(kSize, &id3, &offset3);
177 ASSERT_TRUE(mem3 != NULL);
178 EXPECT_NE(mem2, mem3);
179 EXPECT_NE(id2, id3);
180 EXPECT_EQ(0u, offset3);
181 // Free 3 and allocate 2 half size blocks.
182 manager_->Free(mem3);
183 int32 id4 = -1;
184 int32 id5 = -1;
185 unsigned int offset4 = 0xFFFFFFFFU;
186 unsigned int offset5 = 0xFFFFFFFFU;
187 void* mem4 = manager_->Alloc(kSize / 2, &id4, &offset4);
188 void* mem5 = manager_->Alloc(kSize / 2, &id5, &offset5);
189 ASSERT_TRUE(mem4 != NULL);
190 ASSERT_TRUE(mem5 != NULL);
191 EXPECT_EQ(id3, id4);
192 EXPECT_EQ(id4, id5);
193 EXPECT_EQ(0u, offset4);
194 EXPECT_EQ(kSize / 2u, offset5);
195 manager_->Free(mem4);
196 manager_->Free(mem2);
197 manager_->Free(mem5);
198 }
199
200 TEST_F(MappedMemoryManagerTest, FreePendingToken) {
201 const unsigned int kSize = 128;
202 const unsigned int kAllocCount = (kBufferSize / kSize) * 2;
203 CHECK(kAllocCount * kSize == kBufferSize * 2);
204
205 // Allocate several buffers across multiple chunks.
206 void *pointers[kAllocCount];
207 for (unsigned int i = 0; i < kAllocCount; ++i) {
208 int32 id = -1;
209 unsigned int offset = 0xFFFFFFFFu;
210 pointers[i] = manager_->Alloc(kSize, &id, &offset);
211 EXPECT_TRUE(pointers[i]);
212 EXPECT_NE(id, -1);
213 EXPECT_NE(offset, 0xFFFFFFFFu);
214 }
215
216 // Free one successful allocation, pending fence.
217 int32 token = helper_.get()->InsertToken();
218 manager_->FreePendingToken(pointers[0], token);
219
220 // The way we hooked up the helper and engine, it won't process commands
221 // until it has to wait for something. Which means the token shouldn't have
222 // passed yet at this point.
223 EXPECT_GT(token, GetToken());
224 // Force it to read up to the token
225 helper_->Finish();
226 // Check that the token has indeed passed.
227 EXPECT_LE(token, GetToken());
228
229 // This allocation should use the spot just freed above.
230 int32 new_id = -1;
231 unsigned int new_offset = 0xFFFFFFFFu;
232 void* new_ptr = manager_->Alloc(kSize, &new_id, &new_offset);
233 EXPECT_TRUE(new_ptr);
234 EXPECT_EQ(new_ptr, pointers[0]);
235 EXPECT_NE(new_id, -1);
236 EXPECT_NE(new_offset, 0xFFFFFFFFu);
237
238 // Free up everything.
239 manager_->Free(new_ptr);
240 for (unsigned int i = 1; i < kAllocCount; ++i) {
241 manager_->Free(pointers[i]);
242 }
243 }
244
245 // Check if we don't free we don't crash.
246 TEST_F(MappedMemoryManagerTest, DontFree) {
247 const unsigned int kSize = 1024;
248 // Check we can alloc.
249 int32 id1 = -1;
250 unsigned int offset1 = 0xFFFFFFFFU;
251 void* mem1 = manager_->Alloc(kSize, &id1, &offset1);
252 ASSERT_TRUE(mem1);
253 }
254
255 } // namespace gpu
256
257
OLDNEW
« no previous file with comments | « gpu/command_buffer/client/mapped_memory.cc ('k') | gpu/gpu.gyp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698