OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/client/command_buffer_proxy_impl.h" | |
6 | |
7 #include <utility> | |
8 #include <vector> | |
9 | |
10 #include "base/callback.h" | |
11 #include "base/logging.h" | |
12 #include "base/memory/shared_memory.h" | |
13 #include "base/stl_util.h" | |
14 #include "base/thread_task_runner_handle.h" | |
15 #include "base/trace_event/trace_event.h" | |
16 #include "content/common/gpu/client/gpu_channel_host.h" | |
17 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" | |
18 #include "gpu/command_buffer/common/cmd_buffer_common.h" | |
19 #include "gpu/command_buffer/common/command_buffer_id.h" | |
20 #include "gpu/command_buffer/common/command_buffer_shared.h" | |
21 #include "gpu/command_buffer/common/gpu_memory_allocation.h" | |
22 #include "gpu/command_buffer/common/sync_token.h" | |
23 #include "gpu/command_buffer/service/image_factory.h" | |
24 #include "gpu/ipc/common/gpu_messages.h" | |
25 #include "gpu/ipc/common/gpu_param_traits.h" | |
26 #include "ui/gfx/geometry/size.h" | |
27 #include "ui/gl/gl_bindings.h" | |
28 | |
29 namespace content { | |
30 | |
31 namespace { | |
32 | |
33 gpu::CommandBufferId CommandBufferProxyID(int channel_id, int32_t route_id) { | |
34 return gpu::CommandBufferId::FromUnsafeValue( | |
35 (static_cast<uint64_t>(channel_id) << 32) | route_id); | |
36 } | |
37 | |
38 } // namespace | |
39 | |
40 CommandBufferProxyImpl::CommandBufferProxyImpl(GpuChannelHost* channel, | |
41 int32_t route_id, | |
42 int32_t stream_id) | |
43 : lock_(nullptr), | |
44 channel_(channel), | |
45 command_buffer_id_(CommandBufferProxyID(channel->channel_id(), route_id)), | |
46 route_id_(route_id), | |
47 stream_id_(stream_id), | |
48 flush_count_(0), | |
49 last_put_offset_(-1), | |
50 last_barrier_put_offset_(-1), | |
51 next_fence_sync_release_(1), | |
52 flushed_fence_sync_release_(0), | |
53 verified_fence_sync_release_(0), | |
54 next_signal_id_(0), | |
55 weak_this_(AsWeakPtr()), | |
56 callback_thread_(base::ThreadTaskRunnerHandle::Get()) { | |
57 DCHECK(channel); | |
58 DCHECK(stream_id); | |
59 } | |
60 | |
61 CommandBufferProxyImpl::~CommandBufferProxyImpl() { | |
62 FOR_EACH_OBSERVER(DeletionObserver, | |
63 deletion_observers_, | |
64 OnWillDeleteImpl()); | |
65 if (channel_) { | |
66 channel_->DestroyCommandBuffer(this); | |
67 channel_ = nullptr; | |
68 } | |
69 } | |
70 | |
71 bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) { | |
72 scoped_ptr<base::AutoLock> lock; | |
73 if (lock_) | |
74 lock.reset(new base::AutoLock(*lock_)); | |
75 bool handled = true; | |
76 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxyImpl, message) | |
77 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_Destroyed, OnDestroyed); | |
78 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ConsoleMsg, OnConsoleMessage); | |
79 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalAck, | |
80 OnSignalAck); | |
81 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffersCompleted, | |
82 OnSwapBuffersCompleted); | |
83 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UpdateVSyncParameters, | |
84 OnUpdateVSyncParameters); | |
85 IPC_MESSAGE_UNHANDLED(handled = false) | |
86 IPC_END_MESSAGE_MAP() | |
87 | |
88 if (!handled) { | |
89 DLOG(ERROR) << "Gpu process sent invalid message."; | |
90 InvalidGpuMessage(); | |
91 } | |
92 return handled; | |
93 } | |
94 | |
95 void CommandBufferProxyImpl::OnChannelError() { | |
96 scoped_ptr<base::AutoLock> lock; | |
97 if (lock_) | |
98 lock.reset(new base::AutoLock(*lock_)); | |
99 | |
100 gpu::error::ContextLostReason context_lost_reason = | |
101 gpu::error::kGpuChannelLost; | |
102 if (shared_state_shm_ && shared_state_shm_->memory()) { | |
103 TryUpdateState(); | |
104 // The GPU process might have intentionally been crashed | |
105 // (exit_on_context_lost), so try to find out the original reason. | |
106 if (last_state_.error == gpu::error::kLostContext) | |
107 context_lost_reason = last_state_.context_lost_reason; | |
108 } | |
109 OnDestroyed(context_lost_reason, gpu::error::kLostContext); | |
110 } | |
111 | |
112 void CommandBufferProxyImpl::OnDestroyed(gpu::error::ContextLostReason reason, | |
113 gpu::error::Error error) { | |
114 CheckLock(); | |
115 // Prevent any further messages from being sent. | |
116 if (channel_) { | |
117 channel_->DestroyCommandBuffer(this); | |
118 channel_ = nullptr; | |
119 } | |
120 | |
121 // When the client sees that the context is lost, they should delete this | |
122 // CommandBufferProxyImpl and create a new one. | |
123 last_state_.error = error; | |
124 last_state_.context_lost_reason = reason; | |
125 | |
126 if (!context_lost_callback_.is_null()) { | |
127 context_lost_callback_.Run(); | |
128 // Avoid calling the error callback more than once. | |
129 context_lost_callback_.Reset(); | |
130 } | |
131 } | |
132 | |
133 void CommandBufferProxyImpl::OnConsoleMessage( | |
134 const GPUCommandBufferConsoleMessage& message) { | |
135 if (!console_message_callback_.is_null()) { | |
136 console_message_callback_.Run(message.message, message.id); | |
137 } | |
138 } | |
139 | |
140 void CommandBufferProxyImpl::AddDeletionObserver(DeletionObserver* observer) { | |
141 scoped_ptr<base::AutoLock> lock; | |
142 if (lock_) | |
143 lock.reset(new base::AutoLock(*lock_)); | |
144 deletion_observers_.AddObserver(observer); | |
145 } | |
146 | |
147 void CommandBufferProxyImpl::RemoveDeletionObserver( | |
148 DeletionObserver* observer) { | |
149 scoped_ptr<base::AutoLock> lock; | |
150 if (lock_) | |
151 lock.reset(new base::AutoLock(*lock_)); | |
152 deletion_observers_.RemoveObserver(observer); | |
153 } | |
154 | |
155 void CommandBufferProxyImpl::OnSignalAck(uint32_t id) { | |
156 SignalTaskMap::iterator it = signal_tasks_.find(id); | |
157 if (it == signal_tasks_.end()) { | |
158 DLOG(ERROR) << "Gpu process sent invalid SignalAck."; | |
159 InvalidGpuMessage(); | |
160 return; | |
161 } | |
162 base::Closure callback = it->second; | |
163 signal_tasks_.erase(it); | |
164 callback.Run(); | |
165 } | |
166 | |
167 void CommandBufferProxyImpl::SetContextLostCallback( | |
168 const base::Closure& callback) { | |
169 CheckLock(); | |
170 context_lost_callback_ = callback; | |
171 } | |
172 | |
173 bool CommandBufferProxyImpl::Initialize() { | |
174 TRACE_EVENT0("gpu", "CommandBufferProxyImpl::Initialize"); | |
175 shared_state_shm_.reset(channel_->factory()->AllocateSharedMemory( | |
176 sizeof(*shared_state())).release()); | |
177 if (!shared_state_shm_) | |
178 return false; | |
179 | |
180 if (!shared_state_shm_->Map(sizeof(*shared_state()))) | |
181 return false; | |
182 | |
183 shared_state()->Initialize(); | |
184 | |
185 // This handle is owned by the GPU process and must be passed to it or it | |
186 // will leak. In otherwords, do not early out on error between here and the | |
187 // sending of the Initialize IPC below. | |
188 base::SharedMemoryHandle handle = | |
189 channel_->ShareToGpuProcess(shared_state_shm_->handle()); | |
190 if (!base::SharedMemory::IsHandleValid(handle)) | |
191 return false; | |
192 | |
193 bool result = false; | |
194 if (!Send(new GpuCommandBufferMsg_Initialize( | |
195 route_id_, handle, &result, &capabilities_))) { | |
196 LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize."; | |
197 return false; | |
198 } | |
199 | |
200 if (!result) { | |
201 LOG(ERROR) << "Failed to initialize command buffer service."; | |
202 return false; | |
203 } | |
204 | |
205 capabilities_.image = true; | |
206 | |
207 return true; | |
208 } | |
209 | |
210 gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() { | |
211 return last_state_; | |
212 } | |
213 | |
214 int32_t CommandBufferProxyImpl::GetLastToken() { | |
215 TryUpdateState(); | |
216 return last_state_.token; | |
217 } | |
218 | |
219 void CommandBufferProxyImpl::Flush(int32_t put_offset) { | |
220 CheckLock(); | |
221 if (last_state_.error != gpu::error::kNoError) | |
222 return; | |
223 | |
224 TRACE_EVENT1("gpu", | |
225 "CommandBufferProxyImpl::Flush", | |
226 "put_offset", | |
227 put_offset); | |
228 | |
229 bool put_offset_changed = last_put_offset_ != put_offset; | |
230 last_put_offset_ = put_offset; | |
231 last_barrier_put_offset_ = put_offset; | |
232 | |
233 if (channel_) { | |
234 const uint32_t flush_id = channel_->OrderingBarrier( | |
235 route_id_, stream_id_, put_offset, ++flush_count_, latency_info_, | |
236 put_offset_changed, true); | |
237 if (put_offset_changed) { | |
238 DCHECK(flush_id); | |
239 const uint64_t fence_sync_release = next_fence_sync_release_ - 1; | |
240 if (fence_sync_release > flushed_fence_sync_release_) { | |
241 flushed_fence_sync_release_ = fence_sync_release; | |
242 flushed_release_flush_id_.push( | |
243 std::make_pair(fence_sync_release, flush_id)); | |
244 } | |
245 } | |
246 } | |
247 | |
248 if (put_offset_changed) | |
249 latency_info_.clear(); | |
250 } | |
251 | |
252 void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) { | |
253 if (last_state_.error != gpu::error::kNoError) | |
254 return; | |
255 | |
256 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset", | |
257 put_offset); | |
258 | |
259 bool put_offset_changed = last_barrier_put_offset_ != put_offset; | |
260 last_barrier_put_offset_ = put_offset; | |
261 | |
262 if (channel_) { | |
263 const uint32_t flush_id = channel_->OrderingBarrier( | |
264 route_id_, stream_id_, put_offset, ++flush_count_, latency_info_, | |
265 put_offset_changed, false); | |
266 if (put_offset_changed) { | |
267 DCHECK(flush_id); | |
268 const uint64_t fence_sync_release = next_fence_sync_release_ - 1; | |
269 if (fence_sync_release > flushed_fence_sync_release_) { | |
270 flushed_fence_sync_release_ = fence_sync_release; | |
271 flushed_release_flush_id_.push( | |
272 std::make_pair(fence_sync_release, flush_id)); | |
273 } | |
274 } | |
275 } | |
276 | |
277 if (put_offset_changed) | |
278 latency_info_.clear(); | |
279 } | |
280 | |
281 void CommandBufferProxyImpl::SetLatencyInfo( | |
282 const std::vector<ui::LatencyInfo>& latency_info) { | |
283 CheckLock(); | |
284 for (size_t i = 0; i < latency_info.size(); i++) | |
285 latency_info_.push_back(latency_info[i]); | |
286 } | |
287 | |
288 void CommandBufferProxyImpl::SetSwapBuffersCompletionCallback( | |
289 const SwapBuffersCompletionCallback& callback) { | |
290 CheckLock(); | |
291 swap_buffers_completion_callback_ = callback; | |
292 } | |
293 | |
294 void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback( | |
295 const UpdateVSyncParametersCallback& callback) { | |
296 CheckLock(); | |
297 update_vsync_parameters_completion_callback_ = callback; | |
298 } | |
299 | |
300 void CommandBufferProxyImpl::WaitForTokenInRange(int32_t start, int32_t end) { | |
301 CheckLock(); | |
302 TRACE_EVENT2("gpu", | |
303 "CommandBufferProxyImpl::WaitForToken", | |
304 "start", | |
305 start, | |
306 "end", | |
307 end); | |
308 TryUpdateState(); | |
309 if (!InRange(start, end, last_state_.token) && | |
310 last_state_.error == gpu::error::kNoError) { | |
311 gpu::CommandBuffer::State state; | |
312 if (Send(new GpuCommandBufferMsg_WaitForTokenInRange( | |
313 route_id_, start, end, &state))) | |
314 OnUpdateState(state); | |
315 } | |
316 if (!InRange(start, end, last_state_.token) && | |
317 last_state_.error == gpu::error::kNoError) { | |
318 DLOG(ERROR) << "GPU state invalid after WaitForTokenInRange."; | |
319 InvalidGpuReply(); | |
320 } | |
321 } | |
322 | |
323 void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32_t start, | |
324 int32_t end) { | |
325 CheckLock(); | |
326 TRACE_EVENT2("gpu", | |
327 "CommandBufferProxyImpl::WaitForGetOffset", | |
328 "start", | |
329 start, | |
330 "end", | |
331 end); | |
332 TryUpdateState(); | |
333 if (!InRange(start, end, last_state_.get_offset) && | |
334 last_state_.error == gpu::error::kNoError) { | |
335 gpu::CommandBuffer::State state; | |
336 if (Send(new GpuCommandBufferMsg_WaitForGetOffsetInRange( | |
337 route_id_, start, end, &state))) | |
338 OnUpdateState(state); | |
339 } | |
340 if (!InRange(start, end, last_state_.get_offset) && | |
341 last_state_.error == gpu::error::kNoError) { | |
342 DLOG(ERROR) << "GPU state invalid after WaitForGetOffsetInRange."; | |
343 InvalidGpuReply(); | |
344 } | |
345 } | |
346 | |
347 void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) { | |
348 CheckLock(); | |
349 if (last_state_.error != gpu::error::kNoError) | |
350 return; | |
351 | |
352 Send(new GpuCommandBufferMsg_SetGetBuffer(route_id_, shm_id)); | |
353 last_put_offset_ = -1; | |
354 } | |
355 | |
356 scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer( | |
357 size_t size, | |
358 int32_t* id) { | |
359 CheckLock(); | |
360 *id = -1; | |
361 | |
362 if (last_state_.error != gpu::error::kNoError) | |
363 return NULL; | |
364 | |
365 int32_t new_id = channel_->ReserveTransferBufferId(); | |
366 | |
367 scoped_ptr<base::SharedMemory> shared_memory( | |
368 channel_->factory()->AllocateSharedMemory(size)); | |
369 if (!shared_memory) { | |
370 if (last_state_.error == gpu::error::kNoError) | |
371 last_state_.error = gpu::error::kOutOfBounds; | |
372 return NULL; | |
373 } | |
374 | |
375 DCHECK(!shared_memory->memory()); | |
376 if (!shared_memory->Map(size)) { | |
377 if (last_state_.error == gpu::error::kNoError) | |
378 last_state_.error = gpu::error::kOutOfBounds; | |
379 return NULL; | |
380 } | |
381 | |
382 // This handle is owned by the GPU process and must be passed to it or it | |
383 // will leak. In otherwords, do not early out on error between here and the | |
384 // sending of the RegisterTransferBuffer IPC below. | |
385 base::SharedMemoryHandle handle = | |
386 channel_->ShareToGpuProcess(shared_memory->handle()); | |
387 if (!base::SharedMemory::IsHandleValid(handle)) { | |
388 if (last_state_.error == gpu::error::kNoError) | |
389 last_state_.error = gpu::error::kLostContext; | |
390 return NULL; | |
391 } | |
392 | |
393 Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_, new_id, handle, | |
394 size)); | |
395 *id = new_id; | |
396 scoped_refptr<gpu::Buffer> buffer( | |
397 gpu::MakeBufferFromSharedMemory(std::move(shared_memory), size)); | |
398 return buffer; | |
399 } | |
400 | |
401 void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) { | |
402 CheckLock(); | |
403 if (last_state_.error != gpu::error::kNoError) | |
404 return; | |
405 | |
406 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id)); | |
407 } | |
408 | |
409 gpu::Capabilities CommandBufferProxyImpl::GetCapabilities() { | |
410 return capabilities_; | |
411 } | |
412 | |
413 int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer, | |
414 size_t width, | |
415 size_t height, | |
416 unsigned internal_format) { | |
417 CheckLock(); | |
418 if (last_state_.error != gpu::error::kNoError) | |
419 return -1; | |
420 | |
421 int32_t new_id = channel_->ReserveImageId(); | |
422 | |
423 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager = | |
424 channel_->gpu_memory_buffer_manager(); | |
425 gfx::GpuMemoryBuffer* gpu_memory_buffer = | |
426 gpu_memory_buffer_manager->GpuMemoryBufferFromClientBuffer(buffer); | |
427 DCHECK(gpu_memory_buffer); | |
428 | |
429 // This handle is owned by the GPU process and must be passed to it or it | |
430 // will leak. In otherwords, do not early out on error between here and the | |
431 // sending of the CreateImage IPC below. | |
432 bool requires_sync_token = false; | |
433 gfx::GpuMemoryBufferHandle handle = | |
434 channel_->ShareGpuMemoryBufferToGpuProcess(gpu_memory_buffer->GetHandle(), | |
435 &requires_sync_token); | |
436 | |
437 uint64_t image_fence_sync = 0; | |
438 if (requires_sync_token) { | |
439 image_fence_sync = GenerateFenceSyncRelease(); | |
440 | |
441 // Make sure fence syncs were flushed before CreateImage() was called. | |
442 DCHECK_LE(image_fence_sync - 1, flushed_fence_sync_release_); | |
443 } | |
444 | |
445 DCHECK(gpu::ImageFactory::IsGpuMemoryBufferFormatSupported( | |
446 gpu_memory_buffer->GetFormat(), capabilities_)); | |
447 DCHECK(gpu::ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat( | |
448 gfx::Size(width, height), gpu_memory_buffer->GetFormat())); | |
449 DCHECK(gpu::ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat( | |
450 internal_format, gpu_memory_buffer->GetFormat())); | |
451 | |
452 GpuCommandBufferMsg_CreateImage_Params params; | |
453 params.id = new_id; | |
454 params.gpu_memory_buffer = handle; | |
455 params.size = gfx::Size(width, height); | |
456 params.format = gpu_memory_buffer->GetFormat(); | |
457 params.internal_format = internal_format; | |
458 params.image_release_count = image_fence_sync; | |
459 | |
460 Send(new GpuCommandBufferMsg_CreateImage(route_id_, params)); | |
461 | |
462 if (image_fence_sync) { | |
463 gpu::SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(), | |
464 GetCommandBufferID(), image_fence_sync); | |
465 | |
466 // Force a synchronous IPC to validate sync token. | |
467 EnsureWorkVisible(); | |
468 sync_token.SetVerifyFlush(); | |
469 | |
470 gpu_memory_buffer_manager->SetDestructionSyncToken(gpu_memory_buffer, | |
471 sync_token); | |
472 } | |
473 | |
474 return new_id; | |
475 } | |
476 | |
477 void CommandBufferProxyImpl::DestroyImage(int32_t id) { | |
478 CheckLock(); | |
479 if (last_state_.error != gpu::error::kNoError) | |
480 return; | |
481 | |
482 Send(new GpuCommandBufferMsg_DestroyImage(route_id_, id)); | |
483 } | |
484 | |
485 int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage( | |
486 size_t width, | |
487 size_t height, | |
488 unsigned internal_format, | |
489 unsigned usage) { | |
490 CheckLock(); | |
491 scoped_ptr<gfx::GpuMemoryBuffer> buffer( | |
492 channel_->gpu_memory_buffer_manager()->AllocateGpuMemoryBuffer( | |
493 gfx::Size(width, height), | |
494 gpu::ImageFactory::DefaultBufferFormatForImageFormat(internal_format), | |
495 gfx::BufferUsage::SCANOUT)); | |
496 if (!buffer) | |
497 return -1; | |
498 | |
499 return CreateImage(buffer->AsClientBuffer(), width, height, internal_format); | |
500 } | |
501 | |
502 uint32_t CommandBufferProxyImpl::CreateStreamTexture(uint32_t texture_id) { | |
503 CheckLock(); | |
504 if (last_state_.error != gpu::error::kNoError) | |
505 return 0; | |
506 | |
507 int32_t stream_id = channel_->GenerateRouteID(); | |
508 bool succeeded = false; | |
509 Send(new GpuCommandBufferMsg_CreateStreamTexture( | |
510 route_id_, texture_id, stream_id, &succeeded)); | |
511 if (!succeeded) { | |
512 DLOG(ERROR) << "GpuCommandBufferMsg_CreateStreamTexture returned failure"; | |
513 return 0; | |
514 } | |
515 return stream_id; | |
516 } | |
517 | |
518 void CommandBufferProxyImpl::SetLock(base::Lock* lock) { | |
519 lock_ = lock; | |
520 } | |
521 | |
522 bool CommandBufferProxyImpl::IsGpuChannelLost() { | |
523 return !channel_ || channel_->IsLost(); | |
524 } | |
525 | |
526 void CommandBufferProxyImpl::EnsureWorkVisible() { | |
527 if (channel_) | |
528 channel_->ValidateFlushIDReachedServer(stream_id_, true); | |
529 } | |
530 | |
531 gpu::CommandBufferNamespace CommandBufferProxyImpl::GetNamespaceID() const { | |
532 return gpu::CommandBufferNamespace::GPU_IO; | |
533 } | |
534 | |
535 gpu::CommandBufferId CommandBufferProxyImpl::GetCommandBufferID() const { | |
536 return command_buffer_id_; | |
537 } | |
538 | |
539 int32_t CommandBufferProxyImpl::GetExtraCommandBufferData() const { | |
540 return stream_id_; | |
541 } | |
542 | |
543 uint64_t CommandBufferProxyImpl::GenerateFenceSyncRelease() { | |
544 return next_fence_sync_release_++; | |
545 } | |
546 | |
547 bool CommandBufferProxyImpl::IsFenceSyncRelease(uint64_t release) { | |
548 return release != 0 && release < next_fence_sync_release_; | |
549 } | |
550 | |
551 bool CommandBufferProxyImpl::IsFenceSyncFlushed(uint64_t release) { | |
552 return release != 0 && release <= flushed_fence_sync_release_; | |
553 } | |
554 | |
555 bool CommandBufferProxyImpl::IsFenceSyncFlushReceived(uint64_t release) { | |
556 CheckLock(); | |
557 if (last_state_.error != gpu::error::kNoError) | |
558 return false; | |
559 | |
560 if (release <= verified_fence_sync_release_) | |
561 return true; | |
562 | |
563 // Check if we have actually flushed the fence sync release. | |
564 if (release <= flushed_fence_sync_release_) { | |
565 DCHECK(!flushed_release_flush_id_.empty()); | |
566 // Check if it has already been validated by another context. | |
567 UpdateVerifiedReleases(channel_->GetHighestValidatedFlushID(stream_id_)); | |
568 if (release <= verified_fence_sync_release_) | |
569 return true; | |
570 | |
571 // Has not been validated, validate it now. | |
572 UpdateVerifiedReleases( | |
573 channel_->ValidateFlushIDReachedServer(stream_id_, false)); | |
574 return release <= verified_fence_sync_release_; | |
575 } | |
576 | |
577 return false; | |
578 } | |
579 | |
580 void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token, | |
581 const base::Closure& callback) { | |
582 CheckLock(); | |
583 if (last_state_.error != gpu::error::kNoError) | |
584 return; | |
585 | |
586 uint32_t signal_id = next_signal_id_++; | |
587 Send(new GpuCommandBufferMsg_SignalSyncToken(route_id_, sync_token, | |
588 signal_id)); | |
589 signal_tasks_.insert(std::make_pair(signal_id, callback)); | |
590 } | |
591 | |
592 bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken( | |
593 const gpu::SyncToken* sync_token) { | |
594 // Can only wait on an unverified sync token if it is from the same channel. | |
595 const uint64_t token_channel = | |
596 sync_token->command_buffer_id().GetUnsafeValue() >> 32; | |
597 const uint64_t channel = command_buffer_id_.GetUnsafeValue() >> 32; | |
598 if (sync_token->namespace_id() != gpu::CommandBufferNamespace::GPU_IO || | |
599 token_channel != channel) { | |
600 return false; | |
601 } | |
602 | |
603 // If waiting on a different stream, flush pending commands on that stream. | |
604 const int32_t release_stream_id = sync_token->extra_data_field(); | |
605 if (release_stream_id == 0) | |
606 return false; | |
607 | |
608 if (release_stream_id != stream_id_) | |
609 channel_->FlushPendingStream(release_stream_id); | |
610 | |
611 return true; | |
612 } | |
613 | |
614 void CommandBufferProxyImpl::SignalQuery(uint32_t query, | |
615 const base::Closure& callback) { | |
616 CheckLock(); | |
617 if (last_state_.error != gpu::error::kNoError) | |
618 return; | |
619 | |
620 // Signal identifiers are hidden, so nobody outside of this class will see | |
621 // them. (And thus, they cannot save them.) The IDs themselves only last | |
622 // until the callback is invoked, which will happen as soon as the GPU | |
623 // catches upwith the command buffer. | |
624 // A malicious caller trying to create a collision by making next_signal_id | |
625 // would have to make calls at an astounding rate (300B/s) and even if they | |
626 // could do that, all they would do is to prevent some callbacks from getting | |
627 // called, leading to stalled threads and/or memory leaks. | |
628 uint32_t signal_id = next_signal_id_++; | |
629 Send(new GpuCommandBufferMsg_SignalQuery(route_id_, query, signal_id)); | |
630 signal_tasks_.insert(std::make_pair(signal_id, callback)); | |
631 } | |
632 | |
633 bool CommandBufferProxyImpl::ProduceFrontBuffer(const gpu::Mailbox& mailbox) { | |
634 CheckLock(); | |
635 if (last_state_.error != gpu::error::kNoError) | |
636 return false; | |
637 | |
638 Send(new GpuCommandBufferMsg_ProduceFrontBuffer(route_id_, mailbox)); | |
639 return true; | |
640 } | |
641 | |
642 gpu::error::Error CommandBufferProxyImpl::GetLastError() { | |
643 return last_state_.error; | |
644 } | |
645 | |
646 bool CommandBufferProxyImpl::Send(IPC::Message* msg) { | |
647 // Caller should not intentionally send a message if the context is lost. | |
648 DCHECK(last_state_.error == gpu::error::kNoError); | |
649 DCHECK(channel_); | |
650 | |
651 if (!msg->is_sync()) { | |
652 bool result = channel_->Send(msg); | |
653 // Send() should always return true for async messages. | |
654 DCHECK(result); | |
655 return true; | |
656 } | |
657 | |
658 if (channel_->Send(msg)) | |
659 return true; | |
660 | |
661 // Flag the command buffer as lost. Defer deleting the channel until | |
662 // OnChannelError is called after returning to the message loop in case | |
663 // it is referenced elsewhere. | |
664 DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context."; | |
665 last_state_.error = gpu::error::kLostContext; | |
666 return false; | |
667 } | |
668 | |
669 void CommandBufferProxyImpl::OnUpdateState( | |
670 const gpu::CommandBuffer::State& state) { | |
671 // Handle wraparound. It works as long as we don't have more than 2B state | |
672 // updates in flight across which reordering occurs. | |
673 if (state.generation - last_state_.generation < 0x80000000U) | |
674 last_state_ = state; | |
675 } | |
676 | |
677 void CommandBufferProxyImpl::SetOnConsoleMessageCallback( | |
678 const GpuConsoleMessageCallback& callback) { | |
679 CheckLock(); | |
680 console_message_callback_ = callback; | |
681 } | |
682 | |
683 void CommandBufferProxyImpl::TryUpdateState() { | |
684 if (last_state_.error == gpu::error::kNoError) | |
685 shared_state()->Read(&last_state_); | |
686 } | |
687 | |
688 void CommandBufferProxyImpl::UpdateVerifiedReleases(uint32_t verified_flush) { | |
689 while (!flushed_release_flush_id_.empty()) { | |
690 const std::pair<uint64_t, uint32_t>& front_item = | |
691 flushed_release_flush_id_.front(); | |
692 if (front_item.second > verified_flush) | |
693 break; | |
694 verified_fence_sync_release_ = front_item.first; | |
695 flushed_release_flush_id_.pop(); | |
696 } | |
697 } | |
698 | |
699 gpu::CommandBufferSharedState* CommandBufferProxyImpl::shared_state() const { | |
700 return reinterpret_cast<gpu::CommandBufferSharedState*>( | |
701 shared_state_shm_->memory()); | |
702 } | |
703 | |
704 void CommandBufferProxyImpl::OnSwapBuffersCompleted( | |
705 const std::vector<ui::LatencyInfo>& latency_info, | |
706 gfx::SwapResult result) { | |
707 if (!swap_buffers_completion_callback_.is_null()) { | |
708 if (!ui::LatencyInfo::Verify( | |
709 latency_info, "CommandBufferProxyImpl::OnSwapBuffersCompleted")) { | |
710 swap_buffers_completion_callback_.Run(std::vector<ui::LatencyInfo>(), | |
711 result); | |
712 return; | |
713 } | |
714 swap_buffers_completion_callback_.Run(latency_info, result); | |
715 } | |
716 } | |
717 | |
718 void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase, | |
719 base::TimeDelta interval) { | |
720 if (!update_vsync_parameters_completion_callback_.is_null()) | |
721 update_vsync_parameters_completion_callback_.Run(timebase, interval); | |
722 } | |
723 | |
724 void CommandBufferProxyImpl::InvalidGpuMessage() { | |
725 LOG(ERROR) << "Received invalid message from the GPU process."; | |
726 OnDestroyed(gpu::error::kInvalidGpuMessage, gpu::error::kLostContext); | |
727 } | |
728 | |
729 void CommandBufferProxyImpl::InvalidGpuReply() { | |
730 CheckLock(); | |
731 LOG(ERROR) << "Received invalid reply from the GPU process."; | |
732 last_state_.error = gpu::error::kLostContext; | |
733 last_state_.context_lost_reason = gpu::error::kInvalidGpuMessage; | |
734 callback_thread_->PostTask( | |
735 FROM_HERE, | |
736 base::Bind(&CommandBufferProxyImpl::InvalidGpuReplyOnClientThread, | |
737 weak_this_)); | |
738 } | |
739 | |
740 void CommandBufferProxyImpl::InvalidGpuReplyOnClientThread() { | |
741 scoped_ptr<base::AutoLock> lock; | |
742 if (lock_) | |
743 lock.reset(new base::AutoLock(*lock_)); | |
744 OnDestroyed(gpu::error::kInvalidGpuMessage, gpu::error::kLostContext); | |
745 } | |
746 | |
747 } // namespace content | |
OLD | NEW |