OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h" | 5 #include "content/common/gpu/client/command_buffer_proxy_impl.h" |
6 | 6 |
7 #include <utility> | 7 #include <utility> |
8 #include <vector> | 8 #include <vector> |
9 | 9 |
10 #include "base/callback.h" | 10 #include "base/callback.h" |
11 #include "base/logging.h" | 11 #include "base/logging.h" |
12 #include "base/memory/shared_memory.h" | 12 #include "base/memory/shared_memory.h" |
13 #include "base/stl_util.h" | 13 #include "base/stl_util.h" |
14 #include "base/trace_event/trace_event.h" | 14 #include "base/trace_event/trace_event.h" |
15 #include "content/common/child_process_messages.h" | 15 #include "content/common/child_process_messages.h" |
16 #include "content/common/gpu/client/gpu_channel_host.h" | 16 #include "content/common/gpu/client/gpu_channel_host.h" |
| 17 #include "content/common/gpu/client/gpu_channel_host_factory.h" |
17 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h" | 18 #include "content/common/gpu/client/gpu_video_decode_accelerator_host.h" |
18 #include "content/common/gpu/client/gpu_video_encode_accelerator_host.h" | 19 #include "content/common/gpu/client/gpu_video_encode_accelerator_host.h" |
19 #include "content/common/gpu/gpu_messages.h" | 20 #include "content/common/gpu/client/ipc/gpu_host_ipc_transport_factory.h" |
20 #include "content/common/view_messages.h" | 21 #include "content/common/gpu/command_buffer_console_message.h" |
| 22 #include "content/common/gpu/create_image_params.h" |
21 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" | 23 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" |
22 #include "gpu/command_buffer/common/cmd_buffer_common.h" | 24 #include "gpu/command_buffer/common/cmd_buffer_common.h" |
23 #include "gpu/command_buffer/common/command_buffer_shared.h" | 25 #include "gpu/command_buffer/common/command_buffer_shared.h" |
24 #include "gpu/command_buffer/common/gpu_memory_allocation.h" | 26 #include "gpu/command_buffer/common/gpu_memory_allocation.h" |
25 #include "gpu/command_buffer/common/sync_token.h" | 27 #include "gpu/command_buffer/common/sync_token.h" |
26 #include "gpu/command_buffer/service/image_factory.h" | 28 #include "gpu/command_buffer/service/image_factory.h" |
27 #include "ui/gfx/geometry/size.h" | 29 #include "ui/gfx/geometry/size.h" |
28 #include "ui/gl/gl_bindings.h" | 30 #include "ui/gl/gl_bindings.h" |
29 | 31 |
30 namespace content { | 32 namespace content { |
31 | 33 |
32 namespace { | 34 CommandBufferProxyImpl::CommandBufferProxyImpl( |
33 | 35 GpuChannelHost* channel, |
34 uint64_t CommandBufferProxyID(int channel_id, int32_t route_id) { | 36 scoped_ptr<CommandBufferIPCTransport> transport, |
35 return (static_cast<uint64_t>(channel_id) << 32) | route_id; | 37 int32_t stream_id) |
36 } | |
37 | |
38 } // namespace | |
39 | |
40 CommandBufferProxyImpl::CommandBufferProxyImpl(GpuChannelHost* channel, | |
41 int32_t route_id, | |
42 int32_t stream_id) | |
43 : lock_(nullptr), | 38 : lock_(nullptr), |
44 channel_(channel), | 39 channel_(channel), |
45 command_buffer_id_(CommandBufferProxyID(channel->channel_id(), route_id)), | 40 transport_(std::move(transport)), |
46 route_id_(route_id), | |
47 stream_id_(stream_id), | 41 stream_id_(stream_id), |
48 flush_count_(0), | 42 flush_count_(0), |
49 last_put_offset_(-1), | 43 last_put_offset_(-1), |
50 last_barrier_put_offset_(-1), | 44 last_barrier_put_offset_(-1), |
51 next_fence_sync_release_(1), | 45 next_fence_sync_release_(1), |
52 flushed_fence_sync_release_(0), | 46 flushed_fence_sync_release_(0), |
53 verified_fence_sync_release_(0), | 47 verified_fence_sync_release_(0), |
54 next_signal_id_(0), | 48 next_signal_id_(0), |
55 weak_this_(AsWeakPtr()), | 49 weak_this_(AsWeakPtr()), |
56 callback_thread_(base::ThreadTaskRunnerHandle::Get()) { | 50 callback_thread_(base::ThreadTaskRunnerHandle::Get()) { |
| 51 transport_->SetClient(this); |
57 DCHECK(channel); | 52 DCHECK(channel); |
58 DCHECK(stream_id); | 53 DCHECK(stream_id); |
59 } | 54 } |
60 | 55 |
61 CommandBufferProxyImpl::~CommandBufferProxyImpl() { | 56 CommandBufferProxyImpl::~CommandBufferProxyImpl() { |
62 FOR_EACH_OBSERVER(DeletionObserver, | 57 FOR_EACH_OBSERVER(DeletionObserver, deletion_observers_, OnWillDeleteImpl()); |
63 deletion_observers_, | |
64 OnWillDeleteImpl()); | |
65 if (channel_) { | 58 if (channel_) { |
66 channel_->DestroyCommandBuffer(this); | 59 channel_->DestroyCommandBuffer(this); |
67 channel_ = nullptr; | 60 channel_ = nullptr; |
68 } | 61 } |
69 } | 62 } |
70 | 63 |
71 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) { | |
72 scoped_ptr<base::AutoLock> lock; | |
73 if (lock_) | |
74 lock.reset(new base::AutoLock(*lock_)); | |
75 bool handled = true; | |
76 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message) | |
77 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed); | |
78 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage); | |
79 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalAck, | |
80 OnSignalAck); | |
81 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffersCompleted, | |
82 OnSwapBuffersCompleted); | |
83 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UpdateVSyncParameters, | |
84 OnUpdateVSyncParameters); | |
85 IPC_MESSAGE_UNHANDLED(handled = false) | |
86 IPC_END_MESSAGE_MAP() | |
87 | |
88 if (!handled) { | |
89 DLOG(ERROR) << "Gpu process sent invalid message."; | |
90 InvalidGpuMessage(); | |
91 } | |
92 return handled; | |
93 } | |
94 | |
95 void CommandBufferProxyImpl::OnChannelError() { | 64 void CommandBufferProxyImpl::OnChannelError() { |
96 scoped_ptr<base::AutoLock> lock; | 65 scoped_ptr<base::AutoLock> lock; |
97 if (lock_) | 66 if (lock_) |
98 lock.reset(new base::AutoLock(*lock_)); | 67 lock.reset(new base::AutoLock(*lock_)); |
99 | 68 |
100 gpu::error::ContextLostReason context_lost_reason = | 69 gpu::error::ContextLostReason context_lost_reason = |
101 gpu::error::kGpuChannelLost; | 70 gpu::error::kGpuChannelLost; |
102 if (shared_state_shm_ && shared_state_shm_->memory()) { | 71 if (shared_state_shm_ && shared_state_shm_->memory()) { |
103 TryUpdateState(); | 72 TryUpdateState(); |
104 // The GPU process might have intentionally been crashed | 73 // The GPU process might have intentionally been crashed |
105 // (exit_on_context_lost), so try to find out the original reason. | 74 // (exit_on_context_lost), so try to find out the original reason. |
106 if (last_state_.error == gpu::error::kLostContext) | 75 if (last_state_.error == gpu::error::kLostContext) |
107 context_lost_reason = last_state_.context_lost_reason; | 76 context_lost_reason = last_state_.context_lost_reason; |
108 } | 77 } |
109 OnDestroyed(context_lost_reason, gpu::error::kLostContext); | 78 OnDestroyed(context_lost_reason, gpu::error::kLostContext); |
110 } | 79 } |
111 | 80 |
| 81 void CommandBufferProxyImpl::OnConsoleMessage( |
| 82 const CommandBufferConsoleMessage& message) { |
| 83 if (!console_message_callback_.is_null()) { |
| 84 console_message_callback_.Run(message.message, message.id); |
| 85 } |
| 86 } |
| 87 |
112 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason, | 88 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason, |
113 gpu::error::Error error) { | 89 gpu::error::Error error) { |
114 CheckLock(); | 90 CheckLock(); |
115 // Prevent any further messages from being sent. | 91 // Prevent any further messages from being sent. |
116 if (channel_) { | 92 if (channel_) { |
117 channel_->DestroyCommandBuffer(this); | 93 channel_->DestroyCommandBuffer(this); |
118 channel_ = nullptr; | 94 channel_ = nullptr; |
119 } | 95 } |
120 | 96 |
121 // When the client sees that the context is lost, they should delete this | 97 // When the client sees that the context is lost, they should delete this |
122 // CommandBufferProxyImpl and create a new one. | 98 // CommandBufferProxyImpl and create a new one. |
123 last_state_.error = error; | 99 last_state_.error = error; |
124 last_state_.context_lost_reason = reason; | 100 last_state_.context_lost_reason = reason; |
125 | 101 |
126 if (!context_lost_callback_.is_null()) { | 102 if (!context_lost_callback_.is_null()) { |
127 context_lost_callback_.Run(); | 103 context_lost_callback_.Run(); |
128 // Avoid calling the error callback more than once. | 104 // Avoid calling the error callback more than once. |
129 context_lost_callback_.Reset(); | 105 context_lost_callback_.Reset(); |
130 } | 106 } |
131 } | 107 } |
132 | 108 |
133 void CommandBufferProxyImpl::OnConsoleMessage( | 109 void CommandBufferProxyImpl::OnDidHandleMessage() { |
134 const GPUCommandBufferConsoleMessage& message) { | 110 if (lock_) { |
135 if (!console_message_callback_.is_null()) { | 111 lock_->AssertAcquired(); |
136 console_message_callback_.Run(message.message, message.id); | 112 lock_->Release(); |
137 } | 113 } |
138 } | 114 } |
139 | 115 |
140 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) { | 116 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) { |
141 scoped_ptr<base::AutoLock> lock; | 117 scoped_ptr<base::AutoLock> lock; |
142 if (lock_) | 118 if (lock_) |
143 lock.reset(new base::AutoLock(*lock_)); | 119 lock.reset(new base::AutoLock(*lock_)); |
144 deletion_observers_.AddObserver(observer); | 120 deletion_observers_.AddObserver(observer); |
145 } | 121 } |
146 | 122 |
(...skipping 18 matching lines...) Expand all Loading... |
165 } | 141 } |
166 | 142 |
167 void CommandBufferProxyImpl::SetContextLostCallback( | 143 void CommandBufferProxyImpl::SetContextLostCallback( |
168 const base::Closure& callback) { | 144 const base::Closure& callback) { |
169 CheckLock(); | 145 CheckLock(); |
170 context_lost_callback_ = callback; | 146 context_lost_callback_ = callback; |
171 } | 147 } |
172 | 148 |
173 bool CommandBufferProxyImpl::Initialize() { | 149 bool CommandBufferProxyImpl::Initialize() { |
174 TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize"); | 150 TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize"); |
175 shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory( | 151 shared_state_shm_.reset( |
176 sizeof(*shared_state())).release()); | 152 transport_->AllocateSharedMemory(sizeof(*shared_state())).release()); |
177 if (!shared_state_shm_) | 153 if (!shared_state_shm_) |
178 return false; | 154 return false; |
179 | 155 |
180 if (!shared_state_shm_->Map(sizeof(*shared_state()))) | 156 if (!shared_state_shm_->Map(sizeof(*shared_state()))) |
181 return false; | 157 return false; |
182 | 158 |
183 shared_state()->Initialize(); | 159 shared_state()->Initialize(); |
184 | 160 |
185 // This handle is owned by the GPU process and must be passed to it or it | 161 // This handle is owned by the GPU process and must be passed to it or it |
186 // will leak. In otherwords, do not early out on error between here and the | 162 // will leak. In otherwords, do not early out on error between here and the |
187 // sending of the Initialize IPC below. | 163 // sending of the Initialize IPC below. |
188 base::SharedMemoryHandle handle = | 164 base::SharedMemoryHandle handle = |
189 channel_->ShareToGpuProcess(shared_state_shm_->handle()); | 165 channel_->ShareToGpuProcess(shared_state_shm_->handle()); |
190 if (!base::SharedMemory::IsHandleValid(handle)) | 166 if (!base::SharedMemory::IsHandleValid(handle)) |
191 return false; | 167 return false; |
192 | 168 |
193 bool result = false; | 169 bool result = false; |
194 if (!Send(new GpuCommandBufferMsg_Initialize( | 170 if (!transport_->Initialize(handle, &result, &capabilities_)) { |
195 route_id_, handle, &result, &capabilities_))) { | |
196 LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize."; | 171 LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize."; |
197 return false; | 172 return false; |
198 } | 173 } |
199 | 174 |
200 if (!result) { | 175 if (!result) { |
201 LOG(ERROR) << "Failed to initialize command buffer service."; | 176 LOG(ERROR) << "Failed to initialize command buffer service."; |
202 return false; | 177 return false; |
203 } | 178 } |
204 | 179 |
205 capabilities_.image = true; | 180 capabilities_.image = true; |
206 | 181 |
207 return true; | 182 return true; |
208 } | 183 } |
209 | 184 |
210 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() { | 185 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() { |
211 return last_state_; | 186 return last_state_; |
212 } | 187 } |
213 | 188 |
214 int32_t CommandBufferProxyImpl::GetLastToken() { | 189 int32_t CommandBufferProxyImpl::GetLastToken() { |
215 TryUpdateState(); | 190 TryUpdateState(); |
216 return last_state_.token; | 191 return last_state_.token; |
217 } | 192 } |
218 | 193 |
219 void CommandBufferProxyImpl::Flush(int32_t put_offset) { | 194 void CommandBufferProxyImpl::Flush(int32_t put_offset) { |
220 CheckLock(); | 195 CheckLock(); |
221 if (last_state_.error != gpu::error::kNoError) | 196 if (last_state_.error != gpu::error::kNoError) |
222 return; | 197 return; |
223 | 198 |
224 TRACE_EVENT1("gpu", | 199 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::Flush", "put_offset", |
225 "CommandBufferProxyImpl::Flush", | |
226 "put_offset", | |
227 put_offset); | 200 put_offset); |
228 | 201 |
229 bool put_offset_changed = last_put_offset_ != put_offset; | 202 bool put_offset_changed = last_put_offset_ != put_offset; |
230 last_put_offset_ = put_offset; | 203 last_put_offset_ = put_offset; |
231 last_barrier_put_offset_ = put_offset; | 204 last_barrier_put_offset_ = put_offset; |
232 | 205 |
233 if (channel_) { | 206 if (channel_) { |
234 const uint32_t flush_id = channel_->OrderingBarrier( | 207 const uint32_t flush_id = channel_->OrderingBarrier( |
235 route_id_, stream_id_, put_offset, ++flush_count_, latency_info_, | 208 transport_.get(), stream_id_, put_offset, ++flush_count_, latency_info_, |
236 put_offset_changed, true); | 209 put_offset_changed, true); |
237 if (put_offset_changed) { | 210 if (put_offset_changed) { |
238 DCHECK(flush_id); | 211 DCHECK(flush_id); |
239 const uint64_t fence_sync_release = next_fence_sync_release_ - 1; | 212 const uint64_t fence_sync_release = next_fence_sync_release_ - 1; |
240 if (fence_sync_release > flushed_fence_sync_release_) { | 213 if (fence_sync_release > flushed_fence_sync_release_) { |
241 flushed_fence_sync_release_ = fence_sync_release; | 214 flushed_fence_sync_release_ = fence_sync_release; |
242 flushed_release_flush_id_.push( | 215 flushed_release_flush_id_.push( |
243 std::make_pair(fence_sync_release, flush_id)); | 216 std::make_pair(fence_sync_release, flush_id)); |
244 } | 217 } |
245 } | 218 } |
246 } | 219 } |
247 | 220 |
248 if (put_offset_changed) | 221 if (put_offset_changed) |
249 latency_info_.clear(); | 222 latency_info_.clear(); |
250 } | 223 } |
251 | 224 |
252 void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) { | 225 void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) { |
253 if (last_state_.error != gpu::error::kNoError) | 226 if (last_state_.error != gpu::error::kNoError) |
254 return; | 227 return; |
255 | 228 |
256 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset", | 229 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset", |
257 put_offset); | 230 put_offset); |
258 | 231 |
259 bool put_offset_changed = last_barrier_put_offset_ != put_offset; | 232 bool put_offset_changed = last_barrier_put_offset_ != put_offset; |
260 last_barrier_put_offset_ = put_offset; | 233 last_barrier_put_offset_ = put_offset; |
261 | 234 |
262 if (channel_) { | 235 if (channel_) { |
263 const uint32_t flush_id = channel_->OrderingBarrier( | 236 const uint32_t flush_id = channel_->OrderingBarrier( |
264 route_id_, stream_id_, put_offset, ++flush_count_, latency_info_, | 237 transport_.get(), stream_id_, put_offset, ++flush_count_, latency_info_, |
265 put_offset_changed, false); | 238 put_offset_changed, false); |
266 if (put_offset_changed) { | 239 if (put_offset_changed) { |
267 DCHECK(flush_id); | 240 DCHECK(flush_id); |
268 const uint64_t fence_sync_release = next_fence_sync_release_ - 1; | 241 const uint64_t fence_sync_release = next_fence_sync_release_ - 1; |
269 if (fence_sync_release > flushed_fence_sync_release_) { | 242 if (fence_sync_release > flushed_fence_sync_release_) { |
270 flushed_fence_sync_release_ = fence_sync_release; | 243 flushed_fence_sync_release_ = fence_sync_release; |
271 flushed_release_flush_id_.push( | 244 flushed_release_flush_id_.push( |
272 std::make_pair(fence_sync_release, flush_id)); | 245 std::make_pair(fence_sync_release, flush_id)); |
273 } | 246 } |
274 } | 247 } |
(...skipping 17 matching lines...) Expand all Loading... |
292 } | 265 } |
293 | 266 |
294 void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback( | 267 void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback( |
295 const UpdateVSyncParametersCallback& callback) { | 268 const UpdateVSyncParametersCallback& callback) { |
296 CheckLock(); | 269 CheckLock(); |
297 update_vsync_parameters_completion_callback_ = callback; | 270 update_vsync_parameters_completion_callback_ = callback; |
298 } | 271 } |
299 | 272 |
300 void CommandBufferProxyImpl::WaitForTokenInRange(int32_t start, int32_t end) { | 273 void CommandBufferProxyImpl::WaitForTokenInRange(int32_t start, int32_t end) { |
301 CheckLock(); | 274 CheckLock(); |
302 TRACE_EVENT2("gpu", | 275 TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForToken", "start", start, |
303 "CommandBufferProxyImpl::WaitForToken", | 276 "end", end); |
304 "start", | |
305 start, | |
306 "end", | |
307 end); | |
308 TryUpdateState(); | 277 TryUpdateState(); |
309 if (!InRange(start, end, last_state_.token) && | 278 if (!InRange(start, end, last_state_.token) && |
310 last_state_.error == gpu::error::kNoError) { | 279 last_state_.error == gpu::error::kNoError) { |
311 gpu::CommandBuffer::State state; | 280 gpu::CommandBuffer::State state; |
312 if (Send(new GpuCommandBufferMsg_WaitForTokenInRange( | 281 if (transport_->WaitForTokenInRange(start, end, &state)) |
313 route_id_, start, end, &state))) | |
314 OnUpdateState(state); | 282 OnUpdateState(state); |
315 } | 283 } |
316 if (!InRange(start, end, last_state_.token) && | 284 if (!InRange(start, end, last_state_.token) && |
317 last_state_.error == gpu::error::kNoError) { | 285 last_state_.error == gpu::error::kNoError) { |
318 DLOG(ERROR) << "GPU state invalid after WaitForTokenInRange."; | 286 DLOG(ERROR) << "GPU state invalid after WaitForTokenInRange."; |
319 InvalidGpuReply(); | 287 InvalidGpuReply(); |
320 } | 288 } |
321 } | 289 } |
322 | 290 |
323 void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32_t start, | 291 void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32_t start, |
324 int32_t end) { | 292 int32_t end) { |
325 CheckLock(); | 293 CheckLock(); |
326 TRACE_EVENT2("gpu", | 294 TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForGetOffset", "start", |
327 "CommandBufferProxyImpl::WaitForGetOffset", | 295 start, "end", end); |
328 "start", | |
329 start, | |
330 "end", | |
331 end); | |
332 TryUpdateState(); | 296 TryUpdateState(); |
333 if (!InRange(start, end, last_state_.get_offset) && | 297 if (!InRange(start, end, last_state_.get_offset) && |
334 last_state_.error == gpu::error::kNoError) { | 298 last_state_.error == gpu::error::kNoError) { |
335 gpu::CommandBuffer::State state; | 299 gpu::CommandBuffer::State state; |
336 if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange( | 300 if (transport_->WaitForGetOffsetInRange(start, end, &state)) |
337 route_id_, start, end, &state))) | |
338 OnUpdateState(state); | 301 OnUpdateState(state); |
339 } | 302 } |
340 if (!InRange(start, end, last_state_.get_offset) && | 303 if (!InRange(start, end, last_state_.get_offset) && |
341 last_state_.error == gpu::error::kNoError) { | 304 last_state_.error == gpu::error::kNoError) { |
342 DLOG(ERROR) << "GPU state invalid after WaitForGetOffsetInRange."; | 305 DLOG(ERROR) << "GPU state invalid after WaitForGetOffsetInRange."; |
343 InvalidGpuReply(); | 306 InvalidGpuReply(); |
344 } | 307 } |
345 } | 308 } |
346 | 309 |
347 void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) { | 310 void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) { |
348 CheckLock(); | 311 CheckLock(); |
349 if (last_state_.error != gpu::error::kNoError) | 312 if (last_state_.error != gpu::error::kNoError) |
350 return; | 313 return; |
351 | 314 |
352 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id)); | 315 transport_->SetGetBuffer(shm_id); |
353 last_put_offset_ = -1; | 316 last_put_offset_ = -1; |
354 } | 317 } |
355 | 318 |
356 scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer( | 319 scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer( |
357 size_t size, | 320 size_t size, |
358 int32_t* id) { | 321 int32_t* id) { |
359 CheckLock(); | 322 CheckLock(); |
360 *id = -1; | 323 *id = -1; |
361 | 324 |
362 if (last_state_.error != gpu::error::kNoError) | 325 if (last_state_.error != gpu::error::kNoError) |
363 return NULL; | 326 return NULL; |
364 | 327 |
365 int32_t new_id = channel_->ReserveTransferBufferId(); | 328 int32_t new_id = channel_->ReserveTransferBufferId(); |
366 | 329 |
367 scoped_ptr<base::SharedMemory> shared_memory( | 330 scoped_ptr<base::SharedMemory> shared_memory( |
368 channel_->factory()->AllocateSharedMemory(size)); | 331 transport_->AllocateSharedMemory(size)); |
369 if (!shared_memory) { | 332 if (!shared_memory) { |
370 if (last_state_.error == gpu::error::kNoError) | 333 if (last_state_.error == gpu::error::kNoError) |
371 last_state_.error = gpu::error::kOutOfBounds; | 334 last_state_.error = gpu::error::kOutOfBounds; |
372 return NULL; | 335 return NULL; |
373 } | 336 } |
374 | 337 |
375 DCHECK(!shared_memory->memory()); | 338 DCHECK(!shared_memory->memory()); |
376 if (!shared_memory->Map(size)) { | 339 if (!shared_memory->Map(size)) { |
377 if (last_state_.error == gpu::error::kNoError) | 340 if (last_state_.error == gpu::error::kNoError) |
378 last_state_.error = gpu::error::kOutOfBounds; | 341 last_state_.error = gpu::error::kOutOfBounds; |
379 return NULL; | 342 return NULL; |
380 } | 343 } |
381 | 344 |
382 // This handle is owned by the GPU process and must be passed to it or it | 345 // This handle is owned by the GPU process and must be passed to it or it |
383 // will leak. In otherwords, do not early out on error between here and the | 346 // will leak. In otherwords, do not early out on error between here and the |
384 // sending of the RegisterTransferBuffer IPC below. | 347 // sending of the RegisterTransferBuffer IPC below. |
385 base::SharedMemoryHandle handle = | 348 base::SharedMemoryHandle handle = |
386 channel_->ShareToGpuProcess(shared_memory->handle()); | 349 channel_->ShareToGpuProcess(shared_memory->handle()); |
387 if (!base::SharedMemory::IsHandleValid(handle)) { | 350 if (!base::SharedMemory::IsHandleValid(handle)) { |
388 if (last_state_.error == gpu::error::kNoError) | 351 if (last_state_.error == gpu::error::kNoError) |
389 last_state_.error = gpu::error::kLostContext; | 352 last_state_.error = gpu::error::kLostContext; |
390 return NULL; | 353 return NULL; |
391 } | 354 } |
392 | 355 |
393 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_, | 356 if (!transport_->RegisterTransferBuffer(new_id, handle, size)) { |
394 new_id, | |
395 handle, | |
396 size))) { | |
397 return NULL; | 357 return NULL; |
398 } | 358 } |
399 | 359 |
400 *id = new_id; | 360 *id = new_id; |
401 scoped_refptr<gpu::Buffer> buffer( | 361 scoped_refptr<gpu::Buffer> buffer( |
402 gpu::MakeBufferFromSharedMemory(std::move(shared_memory), size)); | 362 gpu::MakeBufferFromSharedMemory(std::move(shared_memory), size)); |
403 return buffer; | 363 return buffer; |
404 } | 364 } |
405 | 365 |
406 void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) { | 366 void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) { |
407 CheckLock(); | 367 CheckLock(); |
408 if (last_state_.error != gpu::error::kNoError) | 368 if (last_state_.error != gpu::error::kNoError) |
409 return; | 369 return; |
410 | 370 |
411 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id)); | 371 transport_->DestroyTransferBuffer(id); |
412 } | 372 } |
413 | 373 |
414 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() { | 374 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() { |
415 return capabilities_; | 375 return capabilities_; |
416 } | 376 } |
417 | 377 |
418 int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer, | 378 int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer, |
419 size_t width, | 379 size_t width, |
420 size_t height, | 380 size_t height, |
421 unsigned internal_format) { | 381 unsigned internal_format) { |
(...skipping 25 matching lines...) Expand all Loading... |
447 DCHECK_LE(image_fence_sync - 1, flushed_fence_sync_release_); | 407 DCHECK_LE(image_fence_sync - 1, flushed_fence_sync_release_); |
448 } | 408 } |
449 | 409 |
450 DCHECK(gpu::ImageFactory::IsGpuMemoryBufferFormatSupported( | 410 DCHECK(gpu::ImageFactory::IsGpuMemoryBufferFormatSupported( |
451 gpu_memory_buffer->GetFormat(), capabilities_)); | 411 gpu_memory_buffer->GetFormat(), capabilities_)); |
452 DCHECK(gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat( | 412 DCHECK(gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat( |
453 gfx::Size(width, height), gpu_memory_buffer->GetFormat())); | 413 gfx::Size(width, height), gpu_memory_buffer->GetFormat())); |
454 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat( | 414 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat( |
455 internal_format, gpu_memory_buffer->GetFormat())); | 415 internal_format, gpu_memory_buffer->GetFormat())); |
456 | 416 |
457 GpuCommandBufferMsg_CreateImage_Params params; | 417 CreateImageParams params; |
458 params.id = new_id; | 418 params.id = new_id; |
459 params.gpu_memory_buffer = handle; | 419 params.gpu_memory_buffer = handle; |
460 params.size = gfx::Size(width, height); | 420 params.size = gfx::Size(width, height); |
461 params.format = gpu_memory_buffer->GetFormat(); | 421 params.format = gpu_memory_buffer->GetFormat(); |
462 params.internal_format = internal_format; | 422 params.internal_format = internal_format; |
463 params.image_release_count = image_fence_sync; | 423 params.image_release_count = image_fence_sync; |
464 | 424 |
465 if (!Send(new GpuCommandBufferMsg_CreateImage(route_id_, params))) | 425 if (!transport_->CreateImage(params)) |
466 return -1; | 426 return -1; |
467 | 427 |
468 if (image_fence_sync) { | 428 if (image_fence_sync) { |
469 gpu::SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(), | 429 gpu::SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(), |
470 GetCommandBufferID(), image_fence_sync); | 430 GetCommandBufferID(), image_fence_sync); |
471 | 431 |
472 // Force a synchronous IPC to validate sync token. | 432 // Force a synchronous IPC to validate sync token. |
473 EnsureWorkVisible(); | 433 EnsureWorkVisible(); |
474 sync_token.SetVerifyFlush(); | 434 sync_token.SetVerifyFlush(); |
475 | 435 |
476 gpu_memory_buffer_manager->SetDestructionSyncToken(gpu_memory_buffer, | 436 gpu_memory_buffer_manager->SetDestructionSyncToken(gpu_memory_buffer, |
477 sync_token); | 437 sync_token); |
478 } | 438 } |
479 | 439 |
480 return new_id; | 440 return new_id; |
481 } | 441 } |
482 | 442 |
483 void CommandBufferProxyImpl::DestroyImage(int32_t id) { | 443 void CommandBufferProxyImpl::DestroyImage(int32_t id) { |
484 CheckLock(); | 444 CheckLock(); |
485 if (last_state_.error != gpu::error::kNoError) | 445 if (last_state_.error != gpu::error::kNoError) |
486 return; | 446 return; |
487 | 447 |
488 Send(new GpuCommandBufferMsg_DestroyImage(route_id_, id)); | 448 transport_->DestroyImage(id); |
489 } | 449 } |
490 | 450 |
491 int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage( | 451 int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage( |
492 size_t width, | 452 size_t width, |
493 size_t height, | 453 size_t height, |
494 unsigned internal_format, | 454 unsigned internal_format, |
495 unsigned usage) { | 455 unsigned usage) { |
496 CheckLock(); | 456 CheckLock(); |
497 scoped_ptr<gfx::GpuMemoryBuffer> buffer( | 457 scoped_ptr<gfx::GpuMemoryBuffer> buffer( |
498 channel_->gpu_memory_buffer_manager()->AllocateGpuMemoryBuffer( | 458 channel_->gpu_memory_buffer_manager()->AllocateGpuMemoryBuffer( |
499 gfx::Size(width, height), | 459 gfx::Size(width, height), |
500 gpu::ImageFactory::DefaultBufferFormatForImageFormat(internal_format), | 460 gpu::ImageFactory::DefaultBufferFormatForImageFormat(internal_format), |
501 gfx::BufferUsage::SCANOUT)); | 461 gfx::BufferUsage::SCANOUT)); |
502 if (!buffer) | 462 if (!buffer) |
503 return -1; | 463 return -1; |
504 | 464 |
505 return CreateImage(buffer->AsClientBuffer(), width, height, internal_format); | 465 return CreateImage(buffer->AsClientBuffer(), width, height, internal_format); |
506 } | 466 } |
507 | 467 |
508 uint32_t CommandBufferProxyImpl::CreateStreamTexture(uint32_t texture_id) { | 468 uint32_t CommandBufferProxyImpl::CreateStreamTexture(uint32_t texture_id) { |
509 CheckLock(); | 469 CheckLock(); |
510 if (last_state_.error != gpu::error::kNoError) | 470 if (last_state_.error != gpu::error::kNoError) |
511 return 0; | 471 return 0; |
512 | 472 |
513 int32_t stream_id = channel_->GenerateRouteID(); | 473 int32_t stream_id = MSG_ROUTING_NONE; |
514 bool succeeded = false; | 474 bool succeeded = false; |
515 Send(new GpuCommandBufferMsg_CreateStreamTexture( | 475 transport_->CreateStreamTexture(texture_id, &stream_id, &succeeded); |
516 route_id_, texture_id, stream_id, &succeeded)); | |
517 if (!succeeded) { | 476 if (!succeeded) { |
518 DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure"; | 477 DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure"; |
519 return 0; | 478 return 0; |
520 } | 479 } |
521 return stream_id; | 480 return stream_id; |
522 } | 481 } |
523 | 482 |
524 void CommandBufferProxyImpl::SetLock(base::Lock* lock) { | 483 void CommandBufferProxyImpl::SetLock(base::Lock* lock) { |
525 lock_ = lock; | 484 lock_ = lock; |
526 } | 485 } |
527 | 486 |
528 bool CommandBufferProxyImpl::IsGpuChannelLost() { | 487 bool CommandBufferProxyImpl::IsGpuChannelLost() { |
529 return !channel_ || channel_->IsLost(); | 488 return !channel_ || channel_->IsLost(); |
530 } | 489 } |
531 | 490 |
532 void CommandBufferProxyImpl::EnsureWorkVisible() { | 491 void CommandBufferProxyImpl::EnsureWorkVisible() { |
533 if (channel_) | 492 if (channel_) |
534 channel_->ValidateFlushIDReachedServer(stream_id_, true); | 493 channel_->ValidateFlushIDReachedServer(stream_id_, true); |
535 } | 494 } |
536 | 495 |
537 gpu::CommandBufferNamespace CommandBufferProxyImpl::GetNamespaceID() const { | 496 gpu::CommandBufferNamespace CommandBufferProxyImpl::GetNamespaceID() const { |
538 return gpu::CommandBufferNamespace::GPU_IO; | 497 return gpu::CommandBufferNamespace::GPU_IO; |
539 } | 498 } |
540 | 499 |
541 uint64_t CommandBufferProxyImpl::GetCommandBufferID() const { | 500 uint64_t CommandBufferProxyImpl::GetCommandBufferID() const { |
542 return command_buffer_id_; | 501 return transport_->GetCommandBufferID(); |
543 } | 502 } |
544 | 503 |
545 int32_t CommandBufferProxyImpl::GetExtraCommandBufferData() const { | 504 int32_t CommandBufferProxyImpl::GetExtraCommandBufferData() const { |
546 return stream_id_; | 505 return stream_id_; |
547 } | 506 } |
548 | 507 |
549 uint64_t CommandBufferProxyImpl::GenerateFenceSyncRelease() { | 508 uint64_t CommandBufferProxyImpl::GenerateFenceSyncRelease() { |
550 return next_fence_sync_release_++; | 509 return next_fence_sync_release_++; |
551 } | 510 } |
552 | 511 |
(...skipping 30 matching lines...) Expand all Loading... |
583 return false; | 542 return false; |
584 } | 543 } |
585 | 544 |
586 void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token, | 545 void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token, |
587 const base::Closure& callback) { | 546 const base::Closure& callback) { |
588 CheckLock(); | 547 CheckLock(); |
589 if (last_state_.error != gpu::error::kNoError) | 548 if (last_state_.error != gpu::error::kNoError) |
590 return; | 549 return; |
591 | 550 |
592 uint32_t signal_id = next_signal_id_++; | 551 uint32_t signal_id = next_signal_id_++; |
593 if (!Send(new GpuCommandBufferMsg_SignalSyncToken(route_id_, | 552 if (!transport_->SignalSyncToken(sync_token, signal_id)) { |
594 sync_token, | |
595 signal_id))) { | |
596 return; | 553 return; |
597 } | 554 } |
598 | 555 |
599 signal_tasks_.insert(std::make_pair(signal_id, callback)); | 556 signal_tasks_.insert(std::make_pair(signal_id, callback)); |
600 } | 557 } |
601 | 558 |
602 bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken( | 559 bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken( |
603 const gpu::SyncToken* sync_token) { | 560 const gpu::SyncToken* sync_token) { |
604 // Can only wait on an unverified sync token if it is from the same channel. | 561 // Can only wait on an unverified sync token if it is from the same channel. |
605 const uint64_t token_channel = sync_token->command_buffer_id() >> 32; | 562 const uint64_t token_channel = sync_token->command_buffer_id() >> 32; |
606 const uint64_t channel = command_buffer_id_ >> 32; | 563 const uint64_t channel = transport_->GetCommandBufferID() >> 32; |
607 if (sync_token->namespace_id() != gpu::CommandBufferNamespace::GPU_IO || | 564 if (sync_token->namespace_id() != gpu::CommandBufferNamespace::GPU_IO || |
608 token_channel != channel) { | 565 token_channel != channel) { |
609 return false; | 566 return false; |
610 } | 567 } |
611 | 568 |
612 // If waiting on a different stream, flush pending commands on that stream. | 569 // If waiting on a different stream, flush pending commands on that stream. |
613 const int32_t release_stream_id = sync_token->extra_data_field(); | 570 const int32_t release_stream_id = sync_token->extra_data_field(); |
614 if (release_stream_id == 0) | 571 if (release_stream_id == 0) |
615 return false; | 572 return false; |
616 | 573 |
(...skipping 11 matching lines...) Expand all Loading... |
628 | 585 |
629 // Signal identifiers are hidden, so nobody outside of this class will see | 586 // Signal identifiers are hidden, so nobody outside of this class will see |
630 // them. (And thus, they cannot save them.) The IDs themselves only last | 587 // them. (And thus, they cannot save them.) The IDs themselves only last |
631 // until the callback is invoked, which will happen as soon as the GPU | 588 // until the callback is invoked, which will happen as soon as the GPU |
632 // catches upwith the command buffer. | 589 // catches upwith the command buffer. |
633 // A malicious caller trying to create a collision by making next_signal_id | 590 // A malicious caller trying to create a collision by making next_signal_id |
634 // would have to make calls at an astounding rate (300B/s) and even if they | 591 // would have to make calls at an astounding rate (300B/s) and even if they |
635 // could do that, all they would do is to prevent some callbacks from getting | 592 // could do that, all they would do is to prevent some callbacks from getting |
636 // called, leading to stalled threads and/or memory leaks. | 593 // called, leading to stalled threads and/or memory leaks. |
637 uint32_t signal_id = next_signal_id_++; | 594 uint32_t signal_id = next_signal_id_++; |
638 if (!Send(new GpuCommandBufferMsg_SignalQuery(route_id_, | 595 if (!transport_->SignalQuery(query, signal_id)) { |
639 query, | |
640 signal_id))) { | |
641 return; | 596 return; |
642 } | 597 } |
643 | 598 |
644 signal_tasks_.insert(std::make_pair(signal_id, callback)); | 599 signal_tasks_.insert(std::make_pair(signal_id, callback)); |
645 } | 600 } |
646 | 601 |
647 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) { | 602 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) { |
648 CheckLock(); | 603 CheckLock(); |
649 if (last_state_.error != gpu::error::kNoError) | 604 if (last_state_.error != gpu::error::kNoError) |
650 return false; | 605 return false; |
651 | 606 |
652 return Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox)); | 607 return transport_->ProduceFrontBuffer(mailbox); |
653 } | 608 } |
654 | 609 |
655 scoped_ptr<media::VideoDecodeAccelerator> | 610 scoped_ptr<media::VideoDecodeAccelerator> |
656 CommandBufferProxyImpl::CreateVideoDecoder() { | 611 CommandBufferProxyImpl::CreateVideoDecoder() { |
657 if (!channel_) | 612 if (!channel_) |
658 return scoped_ptr<media::VideoDecodeAccelerator>(); | 613 return scoped_ptr<media::VideoDecodeAccelerator>(); |
| 614 scoped_ptr<GpuVideoDecodeAcceleratorHostIPCTransport> transport( |
| 615 GpuHostIPCTransportFactory::Get() |
| 616 ->CreateVideoDecodeAcceleratorHostIPCTransport()); |
659 return scoped_ptr<media::VideoDecodeAccelerator>( | 617 return scoped_ptr<media::VideoDecodeAccelerator>( |
660 new GpuVideoDecodeAcceleratorHost(channel_, this)); | 618 new GpuVideoDecodeAcceleratorHost(channel_, std::move(transport), this)); |
661 } | 619 } |
662 | 620 |
663 scoped_ptr<media::VideoEncodeAccelerator> | 621 scoped_ptr<media::VideoEncodeAccelerator> |
664 CommandBufferProxyImpl::CreateVideoEncoder() { | 622 CommandBufferProxyImpl::CreateVideoEncoder() { |
665 if (!channel_) | 623 if (!channel_) |
666 return scoped_ptr<media::VideoEncodeAccelerator>(); | 624 return scoped_ptr<media::VideoEncodeAccelerator>(); |
| 625 scoped_ptr<GpuVideoEncodeAcceleratorHostIPCTransport> transport( |
| 626 GpuHostIPCTransportFactory::Get() |
| 627 ->CreateVideoEncodeAcceleratorHostIPCTransport()); |
667 return scoped_ptr<media::VideoEncodeAccelerator>( | 628 return scoped_ptr<media::VideoEncodeAccelerator>( |
668 new GpuVideoEncodeAcceleratorHost(channel_, this)); | 629 new GpuVideoEncodeAcceleratorHost(channel_, std::move(transport), this)); |
669 } | 630 } |
670 | 631 |
671 gpu::error::Error CommandBufferProxyImpl::GetLastError() { | 632 gpu::error::Error CommandBufferProxyImpl::GetLastError() { |
672 return last_state_.error; | 633 return last_state_.error; |
673 } | 634 } |
674 | 635 |
675 bool CommandBufferProxyImpl::Send(IPC::Message* msg) { | |
676 // Caller should not intentionally send a message if the context is lost. | |
677 DCHECK(last_state_.error == gpu::error::kNoError); | |
678 | |
679 if (channel_) { | |
680 if (channel_->Send(msg)) { | |
681 return true; | |
682 } else { | |
683 // Flag the command buffer as lost. Defer deleting the channel until | |
684 // OnChannelError is called after returning to the message loop in case | |
685 // it is referenced elsewhere. | |
686 DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context."; | |
687 last_state_.error = gpu::error::kLostContext; | |
688 return false; | |
689 } | |
690 } | |
691 | |
692 // Callee takes ownership of message, regardless of whether Send is | |
693 // successful. See IPC::Sender. | |
694 delete msg; | |
695 return false; | |
696 } | |
697 | |
698 void CommandBufferProxyImpl::OnUpdateState( | |
699 const gpu::CommandBuffer::State& state) { | |
700 // Handle wraparound. It works as long as we don't have more than 2B state | |
701 // updates in flight across which reordering occurs. | |
702 if (state.generation - last_state_.generation < 0x80000000U) | |
703 last_state_ = state; | |
704 } | |
705 | |
706 void CommandBufferProxyImpl::SetOnConsoleMessageCallback( | 636 void CommandBufferProxyImpl::SetOnConsoleMessageCallback( |
707 const GpuConsoleMessageCallback& callback) { | 637 const GpuConsoleMessageCallback& callback) { |
708 CheckLock(); | 638 CheckLock(); |
709 console_message_callback_ = callback; | 639 console_message_callback_ = callback; |
710 } | 640 } |
711 | 641 |
712 void CommandBufferProxyImpl::TryUpdateState() { | 642 void CommandBufferProxyImpl::TryUpdateState() { |
713 if (last_state_.error == gpu::error::kNoError) | 643 if (last_state_.error == gpu::error::kNoError) |
714 shared_state()->Read(&last_state_); | 644 shared_state()->Read(&last_state_); |
715 } | 645 } |
(...skipping 21 matching lines...) Expand all Loading... |
737 if (!ui::LatencyInfo::Verify( | 667 if (!ui::LatencyInfo::Verify( |
738 latency_info, "CommandBufferProxyImpl::OnSwapBuffersCompleted")) { | 668 latency_info, "CommandBufferProxyImpl::OnSwapBuffersCompleted")) { |
739 swap_buffers_completion_callback_.Run(std::vector<ui::LatencyInfo>(), | 669 swap_buffers_completion_callback_.Run(std::vector<ui::LatencyInfo>(), |
740 result); | 670 result); |
741 return; | 671 return; |
742 } | 672 } |
743 swap_buffers_completion_callback_.Run(latency_info, result); | 673 swap_buffers_completion_callback_.Run(latency_info, result); |
744 } | 674 } |
745 } | 675 } |
746 | 676 |
| 677 void CommandBufferProxyImpl::OnUpdateState( |
| 678 const gpu::CommandBuffer::State& state) { |
| 679 // Handle wraparound. It works as long as we don't have more than 2B state |
| 680 // updates in flight across which reordering occurs. |
| 681 if (state.generation - last_state_.generation < 0x80000000U) |
| 682 last_state_ = state; |
| 683 } |
| 684 |
747 void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase, | 685 void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase, |
748 base::TimeDelta interval) { | 686 base::TimeDelta interval) { |
749 if (!update_vsync_parameters_completion_callback_.is_null()) | 687 if (!update_vsync_parameters_completion_callback_.is_null()) |
750 update_vsync_parameters_completion_callback_.Run(timebase, interval); | 688 update_vsync_parameters_completion_callback_.Run(timebase, interval); |
751 } | 689 } |
752 | 690 |
| 691 void CommandBufferProxyImpl::OnWillHandleMessage() { |
| 692 if (lock_) |
| 693 lock_->Acquire(); |
| 694 } |
| 695 |
753 void CommandBufferProxyImpl::InvalidGpuMessage() { | 696 void CommandBufferProxyImpl::InvalidGpuMessage() { |
754 LOG(ERROR) << "Received invalid message from the GPU process."; | 697 LOG(ERROR) << "Received invalid message from the GPU process."; |
755 OnDestroyed(gpu::error::kInvalidGpuMessage, gpu::error::kLostContext); | 698 OnDestroyed(gpu::error::kInvalidGpuMessage, gpu::error::kLostContext); |
756 } | 699 } |
757 | 700 |
758 void CommandBufferProxyImpl::InvalidGpuReply() { | 701 void CommandBufferProxyImpl::InvalidGpuReply() { |
759 CheckLock(); | 702 CheckLock(); |
760 LOG(ERROR) << "Received invalid reply from the GPU process."; | 703 LOG(ERROR) << "Received invalid reply from the GPU process."; |
761 last_state_.error = gpu::error::kLostContext; | 704 last_state_.error = gpu::error::kLostContext; |
762 last_state_.context_lost_reason = gpu::error::kInvalidGpuMessage; | 705 last_state_.context_lost_reason = gpu::error::kInvalidGpuMessage; |
763 callback_thread_->PostTask( | 706 callback_thread_->PostTask( |
764 FROM_HERE, | 707 FROM_HERE, |
765 base::Bind(&CommandBufferProxyImpl::InvalidGpuReplyOnClientThread, | 708 base::Bind(&CommandBufferProxyImpl::InvalidGpuReplyOnClientThread, |
766 weak_this_)); | 709 weak_this_)); |
767 } | 710 } |
768 | 711 |
769 void CommandBufferProxyImpl::InvalidGpuReplyOnClientThread() { | 712 void CommandBufferProxyImpl::InvalidGpuReplyOnClientThread() { |
770 scoped_ptr<base::AutoLock> lock; | 713 scoped_ptr<base::AutoLock> lock; |
771 if (lock_) | 714 if (lock_) |
772 lock.reset(new base::AutoLock(*lock_)); | 715 lock.reset(new base::AutoLock(*lock_)); |
773 OnDestroyed(gpu::error::kInvalidGpuMessage, gpu::error::kLostContext); | 716 OnDestroyed(gpu::error::kInvalidGpuMessage, gpu::error::kLostContext); |
774 } | 717 } |
775 | 718 |
776 } // namespace content | 719 } // namespace content |
OLD | NEW |