Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(396)

Side by Side Diff: chrome/renderer/command_buffer_proxy.cc

Issue 6557006: Moved creation of GPU transfer buffers into the browser process.... (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: '' Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2009 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/logging.h" 5 #include "base/logging.h"
6 #include "base/process_util.h" 6 #include "base/process_util.h"
7 #include "base/shared_memory.h" 7 #include "base/shared_memory.h"
8 #include "base/task.h" 8 #include "base/task.h"
9 #include "chrome/common/gpu_messages.h" 9 #include "chrome/common/gpu_messages.h"
10 #include "chrome/common/plugin_messages.h" 10 #include "chrome/common/plugin_messages.h"
11 #include "chrome/common/render_messages.h"
11 #include "chrome/renderer/command_buffer_proxy.h" 12 #include "chrome/renderer/command_buffer_proxy.h"
12 #include "chrome/renderer/plugin_channel_host.h" 13 #include "chrome/renderer/plugin_channel_host.h"
14 #include "chrome/renderer/render_thread.h"
13 #include "gpu/command_buffer/common/cmd_buffer_common.h" 15 #include "gpu/command_buffer/common/cmd_buffer_common.h"
14 #include "ui/gfx/size.h" 16 #include "ui/gfx/size.h"
15 17
16 using gpu::Buffer; 18 using gpu::Buffer;
17 19
18 CommandBufferProxy::CommandBufferProxy( 20 CommandBufferProxy::CommandBufferProxy(
19 IPC::Channel::Sender* channel, 21 IPC::Channel::Sender* channel,
20 int route_id) 22 int route_id)
21 : num_entries_(0), 23 : num_entries_(0),
22 channel_(channel), 24 channel_(channel),
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
116 118
117 return last_state_; 119 return last_state_;
118 } 120 }
119 121
120 void CommandBufferProxy::SetGetOffset(int32 get_offset) { 122 void CommandBufferProxy::SetGetOffset(int32 get_offset) {
121 // Not implemented in proxy. 123 // Not implemented in proxy.
122 NOTREACHED(); 124 NOTREACHED();
123 } 125 }
124 126
125 int32 CommandBufferProxy::CreateTransferBuffer(size_t size) { 127 int32 CommandBufferProxy::CreateTransferBuffer(size_t size) {
126 if (last_state_.error == gpu::error::kNoError) { 128 if (last_state_.error != gpu::error::kNoError)
127 int32 id; 129 return -1;
128 if (Send(new GpuCommandBufferMsg_CreateTransferBuffer(route_id_, 130
129 size, 131 RenderThread* render_thread = RenderThread::current();
130 &id))) { 132 if (!render_thread)
131 return id; 133 return -1;
132 } 134
135 base::SharedMemoryHandle handle;
136 if (!render_thread->Send(new ViewHostMsg_AllocateSharedMemoryBuffer(
137 size,
138 &handle))) {
139 return -1;
133 } 140 }
134 141
135 return -1; 142 // Take ownership of shared memory. This will close the handle if Send below
143 // fails. Otherwise, callee takes ownership before this variable
144 // goes out of scope by duping the handle.
145 base::SharedMemory shared_memory(handle, false);
piman 2011/02/25 04:13:48 This is wrong on posix. When sending the handle (b
146
147 int32 id;
148 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_,
149 handle,
150 size,
151 &id))) {
152 return -1;
153 }
154
155 return id;
156 }
157
158 int32 CommandBufferProxy::RegisterTransferBuffer(
159 base::SharedMemory* shared_memory,
160 size_t size) {
161 if (last_state_.error != gpu::error::kNoError)
162 return -1;
163
164 int32 id;
165 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(
166 route_id_,
167 shared_memory->handle(),
168 size,
169 &id))) {
170 return -1;
171 }
172
173 return id;
136 } 174 }
137 175
138 void CommandBufferProxy::DestroyTransferBuffer(int32 id) { 176 void CommandBufferProxy::DestroyTransferBuffer(int32 id) {
139 if (last_state_.error != gpu::error::kNoError) 177 if (last_state_.error != gpu::error::kNoError)
140 return; 178 return;
141 179
142 // Remove the transfer buffer from the client side4 cache. 180 // Remove the transfer buffer from the client side cache.
143 TransferBufferMap::iterator it = transfer_buffers_.find(id); 181 TransferBufferMap::iterator it = transfer_buffers_.find(id);
144 DCHECK(it != transfer_buffers_.end()); 182 if (it != transfer_buffers_.end()) {
145 183 delete it->second.shared_memory;
146 // Delete the shared memory object, closing the handle in this process. 184 transfer_buffers_.erase(it);
147 delete it->second.shared_memory; 185 }
148
149 transfer_buffers_.erase(it);
150 186
151 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id)); 187 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id));
152 } 188 }
153 189
154 Buffer CommandBufferProxy::GetTransferBuffer(int32 id) { 190 Buffer CommandBufferProxy::GetTransferBuffer(int32 id) {
155 if (last_state_.error != gpu::error::kNoError) 191 if (last_state_.error != gpu::error::kNoError)
156 return Buffer(); 192 return Buffer();
157 193
158 // Check local cache to see if there is already a client side shared memory 194 // Check local cache to see if there is already a client side shared memory
159 // object for this id. 195 // object for this id.
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after
314 pending_async_flush_tasks_.pop(); 350 pending_async_flush_tasks_.pop();
315 351
316 if (task.get()) { 352 if (task.get()) {
317 // Although we need need to update last_state_ while potentially waiting 353 // Although we need need to update last_state_ while potentially waiting
318 // for a synchronous flush to complete, we do not need to invoke the 354 // for a synchronous flush to complete, we do not need to invoke the
319 // callback synchonously. Also, post it as a non nestable task so it is 355 // callback synchonously. Also, post it as a non nestable task so it is
320 // always invoked by the outermost message loop. 356 // always invoked by the outermost message loop.
321 MessageLoop::current()->PostNonNestableTask(FROM_HERE, task.release()); 357 MessageLoop::current()->PostNonNestableTask(FROM_HERE, task.release());
322 } 358 }
323 } 359 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698