OLD | NEW |
1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/logging.h" | 5 #include "base/logging.h" |
6 #include "base/process_util.h" | 6 #include "base/process_util.h" |
7 #include "base/shared_memory.h" | 7 #include "base/shared_memory.h" |
8 #include "base/task.h" | 8 #include "base/task.h" |
9 #include "chrome/common/gpu_messages.h" | 9 #include "chrome/common/gpu_messages.h" |
10 #include "chrome/common/plugin_messages.h" | 10 #include "chrome/common/plugin_messages.h" |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
61 channel_error_callback_->Run(); | 61 channel_error_callback_->Run(); |
62 } | 62 } |
63 | 63 |
64 void CommandBufferProxy::SetChannelErrorCallback(Callback0::Type* callback) { | 64 void CommandBufferProxy::SetChannelErrorCallback(Callback0::Type* callback) { |
65 channel_error_callback_.reset(callback); | 65 channel_error_callback_.reset(callback); |
66 } | 66 } |
67 | 67 |
68 bool CommandBufferProxy::Initialize(int32 size) { | 68 bool CommandBufferProxy::Initialize(int32 size) { |
69 DCHECK(!ring_buffer_.get()); | 69 DCHECK(!ring_buffer_.get()); |
70 | 70 |
71 // Initialize the service. Assuming we are sandboxed, the GPU | 71 RenderThread* render_thread = RenderThread::current(); |
72 // process is responsible for duplicating the handle. This might not be true | 72 if (!render_thread) |
73 // for NaCl. | 73 return false; |
| 74 |
74 base::SharedMemoryHandle handle; | 75 base::SharedMemoryHandle handle; |
75 if (Send(new GpuCommandBufferMsg_Initialize(route_id_, size, &handle)) && | 76 if (!render_thread->Send(new ViewHostMsg_AllocateSharedMemoryBuffer( |
76 base::SharedMemory::IsHandleValid(handle)) { | 77 size, |
77 ring_buffer_.reset(new base::SharedMemory(handle, false)); | 78 &handle))) { |
78 if (ring_buffer_->Map(size)) { | 79 return false; |
79 num_entries_ = size / sizeof(gpu::CommandBufferEntry); | |
80 return true; | |
81 } | |
82 | |
83 ring_buffer_.reset(); | |
84 } | 80 } |
85 | 81 |
86 return false; | 82 if (!base::SharedMemory::IsHandleValid(handle)) |
| 83 return false; |
| 84 |
| 85 #if defined(OS_POSIX) |
| 86 handle.auto_close = false; |
| 87 #endif |
| 88 |
| 89 // Take ownership of shared memory. This will close the handle if Send below |
| 90 // fails. Otherwise, callee takes ownership before this variable |
| 91 // goes out of scope. |
| 92 base::SharedMemory shared_memory(handle, false); |
| 93 |
| 94 return Initialize(&shared_memory, size); |
| 95 } |
| 96 |
| 97 bool CommandBufferProxy::Initialize(base::SharedMemory* buffer, int32 size) { |
| 98 bool result; |
| 99 if (!Send(new GpuCommandBufferMsg_Initialize(route_id_, |
| 100 buffer->handle(), |
| 101 size, |
| 102 &result))) { |
| 103 LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize."; |
| 104 return false; |
| 105 } |
| 106 |
| 107 if (!result) { |
| 108 LOG(ERROR) << "Failed to initialize command buffer service."; |
| 109 return false; |
| 110 } |
| 111 |
| 112 base::SharedMemoryHandle handle; |
| 113 if (!buffer->GiveToProcess(base::GetCurrentProcessHandle(), &handle)) { |
| 114 LOG(ERROR) << "Failed to duplicate command buffer handle."; |
| 115 return false; |
| 116 } |
| 117 |
| 118 ring_buffer_.reset(new base::SharedMemory(handle, false)); |
| 119 if (!ring_buffer_->Map(size)) { |
| 120 LOG(ERROR) << "Failed to map shared memory for command buffer."; |
| 121 ring_buffer_.reset(); |
| 122 return false; |
| 123 } |
| 124 |
| 125 num_entries_ = size / sizeof(gpu::CommandBufferEntry); |
| 126 return true; |
87 } | 127 } |
88 | 128 |
89 Buffer CommandBufferProxy::GetRingBuffer() { | 129 Buffer CommandBufferProxy::GetRingBuffer() { |
90 DCHECK(ring_buffer_.get()); | 130 DCHECK(ring_buffer_.get()); |
91 // Return locally cached ring buffer. | 131 // Return locally cached ring buffer. |
92 Buffer buffer; | 132 Buffer buffer; |
93 buffer.ptr = ring_buffer_->memory(); | 133 buffer.ptr = ring_buffer_->memory(); |
94 buffer.size = num_entries_ * sizeof(gpu::CommandBufferEntry); | 134 buffer.size = num_entries_ * sizeof(gpu::CommandBufferEntry); |
95 buffer.shared_memory = ring_buffer_.get(); | 135 buffer.shared_memory = ring_buffer_.get(); |
96 return buffer; | 136 return buffer; |
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
359 pending_async_flush_tasks_.pop(); | 399 pending_async_flush_tasks_.pop(); |
360 | 400 |
361 if (task.get()) { | 401 if (task.get()) { |
362 // Although we need need to update last_state_ while potentially waiting | 402 // Although we need need to update last_state_ while potentially waiting |
363 // for a synchronous flush to complete, we do not need to invoke the | 403 // for a synchronous flush to complete, we do not need to invoke the |
364 // callback synchonously. Also, post it as a non nestable task so it is | 404 // callback synchonously. Also, post it as a non nestable task so it is |
365 // always invoked by the outermost message loop. | 405 // always invoked by the outermost message loop. |
366 MessageLoop::current()->PostNonNestableTask(FROM_HERE, task.release()); | 406 MessageLoop::current()->PostNonNestableTask(FROM_HERE, task.release()); |
367 } | 407 } |
368 } | 408 } |
OLD | NEW |