Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(34)

Side by Side Diff: content/common/gpu/client/gpu_channel_host.cc

Issue 19762004: Add multi-process GpuMemoryBuffer framework. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/client/gpu_channel_host.h" 5 #include "content/common/gpu/client/gpu_channel_host.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/debug/trace_event.h" 10 #include "base/debug/trace_event.h"
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
48 48
49 GpuChannelHost::GpuChannelHost(GpuChannelHostFactory* factory, 49 GpuChannelHost::GpuChannelHost(GpuChannelHostFactory* factory,
50 int gpu_host_id, 50 int gpu_host_id,
51 int client_id, 51 int client_id,
52 const gpu::GPUInfo& gpu_info) 52 const gpu::GPUInfo& gpu_info)
53 : factory_(factory), 53 : factory_(factory),
54 client_id_(client_id), 54 client_id_(client_id),
55 gpu_host_id_(gpu_host_id), 55 gpu_host_id_(gpu_host_id),
56 gpu_info_(gpu_info) { 56 gpu_info_(gpu_info) {
57 next_transfer_buffer_id_.GetNext(); 57 next_transfer_buffer_id_.GetNext();
58 next_gpu_memory_buffer_id_.GetNext();
58 } 59 }
59 60
60 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle) { 61 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle) {
61 // Open a channel to the GPU process. We pass NULL as the main listener here 62 // Open a channel to the GPU process. We pass NULL as the main listener here
62 // since we need to filter everything to route it to the right thread. 63 // since we need to filter everything to route it to the right thread.
63 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy(); 64 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
64 channel_.reset(new IPC::SyncChannel(channel_handle, 65 channel_.reset(new IPC::SyncChannel(channel_handle,
65 IPC::Channel::MODE_CLIENT, 66 IPC::Channel::MODE_CLIENT,
66 NULL, 67 NULL,
67 io_loop.get(), 68 io_loop.get(),
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after
278 if (generate_count > 0) 279 if (generate_count > 0)
279 Send(new GpuChannelMsg_GenerateMailboxNamesAsync(generate_count)); 280 Send(new GpuChannelMsg_GenerateMailboxNamesAsync(generate_count));
280 281
281 return true; 282 return true;
282 } 283 }
283 284
284 int32 GpuChannelHost::ReserveTransferBufferId() { 285 int32 GpuChannelHost::ReserveTransferBufferId() {
285 return next_transfer_buffer_id_.GetNext(); 286 return next_transfer_buffer_id_.GetNext();
286 } 287 }
287 288
289 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
290 gfx::GpuMemoryBufferHandle source_handle) {
291 switch (source_handle.type) {
292 case gfx::SHARED_MEMORY_BUFFER: {
293 gfx::GpuMemoryBufferHandle handle;
294 handle.type = gfx::SHARED_MEMORY_BUFFER;
295 handle.handle = ShareToGpuProcess(source_handle.handle);
296 return handle;
297 }
298 default:
299 NOTREACHED();
300 return gfx::GpuMemoryBufferHandle();
301 }
302 }
303
304 int32 GpuChannelHost::ReserveGpuMemoryBufferId() {
305 return next_gpu_memory_buffer_id_.GetNext();
306 }
307
288 GpuChannelHost::~GpuChannelHost() { 308 GpuChannelHost::~GpuChannelHost() {
289 // channel_ must be destroyed on the main thread. 309 // channel_ must be destroyed on the main thread.
290 if (!factory_->IsMainThread()) 310 if (!factory_->IsMainThread())
291 factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release()); 311 factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release());
292 } 312 }
293 313
294 314
295 GpuChannelHost::MessageFilter::MessageFilter() 315 GpuChannelHost::MessageFilter::MessageFilter()
296 : lost_(false), 316 : lost_(false),
297 requested_mailboxes_(0) { 317 requested_mailboxes_(0) {
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
408 AutoLock lock(lock_); 428 AutoLock lock(lock_);
409 DCHECK_LE(names.size(), requested_mailboxes_); 429 DCHECK_LE(names.size(), requested_mailboxes_);
410 requested_mailboxes_ -= names.size(); 430 requested_mailboxes_ -= names.size();
411 mailbox_name_pool_.insert(mailbox_name_pool_.end(), 431 mailbox_name_pool_.insert(mailbox_name_pool_.end(),
412 names.begin(), 432 names.begin(),
413 names.end()); 433 names.end());
414 } 434 }
415 435
416 436
417 } // namespace content 437 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698