OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "gpu/ipc/client/command_buffer_proxy_impl.h" | 5 #include "gpu/ipc/client/command_buffer_proxy_impl.h" |
6 | 6 |
7 #include <utility> | 7 #include <utility> |
8 #include <vector> | 8 #include <vector> |
9 | 9 |
10 #include "base/callback.h" | 10 #include "base/callback.h" |
(...skipping 26 matching lines...) Expand all Loading... | |
37 | 37 |
38 namespace gpu { | 38 namespace gpu { |
39 | 39 |
40 namespace { | 40 namespace { |
41 | 41 |
42 gpu::CommandBufferId CommandBufferProxyID(int channel_id, int32_t route_id) { | 42 gpu::CommandBufferId CommandBufferProxyID(int channel_id, int32_t route_id) { |
43 return gpu::CommandBufferId::FromUnsafeValue( | 43 return gpu::CommandBufferId::FromUnsafeValue( |
44 (static_cast<uint64_t>(channel_id) << 32) | route_id); | 44 (static_cast<uint64_t>(channel_id) << 32) | route_id); |
45 } | 45 } |
46 | 46 |
47 int GetChannelID(gpu::CommandBufferId command_buffer_id) { | |
48 return static_cast<int>(command_buffer_id.GetUnsafeValue() >> 32); | |
49 } | |
50 | |
47 } // namespace | 51 } // namespace |
48 | 52 |
49 CommandBufferProxyImpl::CommandBufferProxyImpl(int channel_id, | 53 CommandBufferProxyImpl::CommandBufferProxyImpl(int channel_id, |
50 int32_t route_id, | 54 int32_t route_id, |
51 int32_t stream_id) | 55 int32_t stream_id) |
52 : lock_(nullptr), | 56 : lock_(nullptr), |
53 gpu_control_client_(nullptr), | 57 gpu_control_client_(nullptr), |
54 command_buffer_id_(CommandBufferProxyID(channel_id, route_id)), | 58 command_buffer_id_(CommandBufferProxyID(channel_id, route_id)), |
55 route_id_(route_id), | 59 route_id_(route_id), |
56 stream_id_(stream_id), | 60 stream_id_(stream_id), |
57 flush_count_(0), | |
58 last_put_offset_(-1), | |
59 last_barrier_put_offset_(-1), | |
60 next_fence_sync_release_(1), | |
61 flushed_fence_sync_release_(0), | |
62 verified_fence_sync_release_(0), | |
63 next_signal_id_(0), | |
64 weak_this_(AsWeakPtr()) { | 61 weak_this_(AsWeakPtr()) { |
65 DCHECK(route_id); | 62 DCHECK(route_id); |
66 DCHECK_NE(stream_id, GPU_STREAM_INVALID); | 63 DCHECK_NE(stream_id, GPU_STREAM_INVALID); |
67 } | 64 } |
68 | 65 |
69 // static | 66 // static |
70 std::unique_ptr<CommandBufferProxyImpl> CommandBufferProxyImpl::Create( | 67 std::unique_ptr<CommandBufferProxyImpl> CommandBufferProxyImpl::Create( |
71 scoped_refptr<GpuChannelHost> host, | 68 scoped_refptr<GpuChannelHost> host, |
72 gpu::SurfaceHandle surface_handle, | 69 gpu::SurfaceHandle surface_handle, |
73 CommandBufferProxyImpl* share_group, | 70 CommandBufferProxyImpl* share_group, |
(...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
258 put_offset); | 255 put_offset); |
259 | 256 |
260 bool put_offset_changed = last_put_offset_ != put_offset; | 257 bool put_offset_changed = last_put_offset_ != put_offset; |
261 last_put_offset_ = put_offset; | 258 last_put_offset_ = put_offset; |
262 last_barrier_put_offset_ = put_offset; | 259 last_barrier_put_offset_ = put_offset; |
263 | 260 |
264 if (channel_) { | 261 if (channel_) { |
265 uint32_t highest_verified_flush_id; | 262 uint32_t highest_verified_flush_id; |
266 const uint32_t flush_id = channel_->OrderingBarrier( | 263 const uint32_t flush_id = channel_->OrderingBarrier( |
267 route_id_, stream_id_, put_offset, ++flush_count_, latency_info_, | 264 route_id_, stream_id_, put_offset, ++flush_count_, latency_info_, |
268 put_offset_changed, true, &highest_verified_flush_id); | 265 pending_sync_token_fences_, put_offset_changed, true, |
266 &highest_verified_flush_id); | |
269 if (put_offset_changed) { | 267 if (put_offset_changed) { |
270 DCHECK(flush_id); | 268 DCHECK(flush_id); |
271 const uint64_t fence_sync_release = next_fence_sync_release_ - 1; | 269 const uint64_t fence_sync_release = next_fence_sync_release_ - 1; |
272 if (fence_sync_release > flushed_fence_sync_release_) { | 270 if (fence_sync_release > flushed_fence_sync_release_) { |
273 flushed_fence_sync_release_ = fence_sync_release; | 271 flushed_fence_sync_release_ = fence_sync_release; |
274 flushed_release_flush_id_.push( | 272 flushed_release_flush_id_.push( |
275 std::make_pair(fence_sync_release, flush_id)); | 273 std::make_pair(fence_sync_release, flush_id)); |
276 } | 274 } |
277 } | 275 } |
278 CleanupFlushedReleases(highest_verified_flush_id); | 276 CleanupFlushedReleases(highest_verified_flush_id); |
279 } | 277 } |
280 | 278 |
281 if (put_offset_changed) | 279 if (put_offset_changed) { |
282 latency_info_.clear(); | 280 latency_info_.clear(); |
281 pending_sync_token_fences_.clear(); | |
282 } | |
283 } | 283 } |
284 | 284 |
285 void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) { | 285 void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) { |
286 CheckLock(); | 286 CheckLock(); |
287 base::AutoLock lock(last_state_lock_); | 287 base::AutoLock lock(last_state_lock_); |
288 if (last_state_.error != gpu::error::kNoError) | 288 if (last_state_.error != gpu::error::kNoError) |
289 return; | 289 return; |
290 | 290 |
291 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset", | 291 TRACE_EVENT1("gpu", "CommandBufferProxyImpl::OrderingBarrier", "put_offset", |
292 put_offset); | 292 put_offset); |
293 | 293 |
294 bool put_offset_changed = last_barrier_put_offset_ != put_offset; | 294 bool put_offset_changed = last_barrier_put_offset_ != put_offset; |
295 last_barrier_put_offset_ = put_offset; | 295 last_barrier_put_offset_ = put_offset; |
296 | 296 |
297 if (channel_) { | 297 if (channel_) { |
298 uint32_t highest_verified_flush_id; | 298 uint32_t highest_verified_flush_id; |
299 const uint32_t flush_id = channel_->OrderingBarrier( | 299 const uint32_t flush_id = channel_->OrderingBarrier( |
300 route_id_, stream_id_, put_offset, ++flush_count_, latency_info_, | 300 route_id_, stream_id_, put_offset, ++flush_count_, latency_info_, |
301 put_offset_changed, false, &highest_verified_flush_id); | 301 pending_sync_token_fences_, put_offset_changed, false, |
302 &highest_verified_flush_id); | |
302 | 303 |
303 if (put_offset_changed) { | 304 if (put_offset_changed) { |
304 DCHECK(flush_id); | 305 DCHECK(flush_id); |
305 const uint64_t fence_sync_release = next_fence_sync_release_ - 1; | 306 const uint64_t fence_sync_release = next_fence_sync_release_ - 1; |
306 if (fence_sync_release > flushed_fence_sync_release_) { | 307 if (fence_sync_release > flushed_fence_sync_release_) { |
307 flushed_fence_sync_release_ = fence_sync_release; | 308 flushed_fence_sync_release_ = fence_sync_release; |
308 flushed_release_flush_id_.push( | 309 flushed_release_flush_id_.push( |
309 std::make_pair(fence_sync_release, flush_id)); | 310 std::make_pair(fence_sync_release, flush_id)); |
310 } | 311 } |
311 } | 312 } |
312 CleanupFlushedReleases(highest_verified_flush_id); | 313 CleanupFlushedReleases(highest_verified_flush_id); |
313 } | 314 } |
314 | 315 if (put_offset_changed) { |
315 if (put_offset_changed) | |
316 latency_info_.clear(); | 316 latency_info_.clear(); |
317 pending_sync_token_fences_.clear(); | |
318 } | |
317 } | 319 } |
318 | 320 |
319 void CommandBufferProxyImpl::SetLatencyInfo( | 321 void CommandBufferProxyImpl::SetLatencyInfo( |
320 const std::vector<ui::LatencyInfo>& latency_info) { | 322 const std::vector<ui::LatencyInfo>& latency_info) { |
321 CheckLock(); | 323 CheckLock(); |
322 for (size_t i = 0; i < latency_info.size(); i++) | 324 for (size_t i = 0; i < latency_info.size(); i++) |
323 latency_info_.push_back(latency_info[i]); | 325 latency_info_.push_back(latency_info[i]); |
324 } | 326 } |
325 | 327 |
326 void CommandBufferProxyImpl::SetSwapBuffersCompletionCallback( | 328 void CommandBufferProxyImpl::SetSwapBuffersCompletionCallback( |
(...skipping 320 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
647 base::AutoLock lock(last_state_lock_); | 649 base::AutoLock lock(last_state_lock_); |
648 if (last_state_.error != gpu::error::kNoError) | 650 if (last_state_.error != gpu::error::kNoError) |
649 return; | 651 return; |
650 | 652 |
651 uint32_t signal_id = next_signal_id_++; | 653 uint32_t signal_id = next_signal_id_++; |
652 Send(new GpuCommandBufferMsg_SignalSyncToken(route_id_, sync_token, | 654 Send(new GpuCommandBufferMsg_SignalSyncToken(route_id_, sync_token, |
653 signal_id)); | 655 signal_id)); |
654 signal_tasks_.insert(std::make_pair(signal_id, callback)); | 656 signal_tasks_.insert(std::make_pair(signal_id, callback)); |
655 } | 657 } |
656 | 658 |
659 void CommandBufferProxyImpl::WaitSyncToken(const SyncToken& sync_token) { | |
660 CheckLock(); | |
661 base::AutoLock lock(last_state_lock_); | |
662 if (last_state_.error != gpu::error::kNoError) | |
663 return; | |
664 | |
665 // We can only send verified sync tokens across IPC. | |
666 SyncToken verified_sync_token = sync_token; | |
667 verified_sync_token.SetVerifyFlush(); | |
piman
2017/03/13 23:17:39
nit: I would suggest moving this to GLES2Implement
| |
668 | |
669 pending_sync_token_fences_.push_back(verified_sync_token); | |
670 } | |
671 | |
657 bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken( | 672 bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken( |
658 const gpu::SyncToken* sync_token) { | 673 const gpu::SyncToken& sync_token) { |
659 // Can only wait on an unverified sync token if it is from the same channel. | 674 // Can only wait on an unverified sync token if it is from the same channel. |
660 const uint64_t token_channel = | 675 int sync_token_channel_id = GetChannelID(sync_token.command_buffer_id()); |
661 sync_token->command_buffer_id().GetUnsafeValue() >> 32; | 676 int channel_id = GetChannelID(command_buffer_id_); |
662 const uint64_t channel = command_buffer_id_.GetUnsafeValue() >> 32; | 677 if (sync_token.namespace_id() != gpu::CommandBufferNamespace::GPU_IO || |
663 if (sync_token->namespace_id() != gpu::CommandBufferNamespace::GPU_IO || | 678 sync_token_channel_id != channel_id) { |
664 token_channel != channel) { | |
665 return false; | 679 return false; |
666 } | 680 } |
667 | 681 |
668 // If waiting on a different stream, flush pending commands on that stream. | 682 // If waiting on a different stream, flush pending commands on that stream. |
669 const int32_t release_stream_id = sync_token->extra_data_field(); | 683 int32_t release_stream_id = sync_token.extra_data_field(); |
670 if (release_stream_id == gpu::GPU_STREAM_INVALID) | 684 if (release_stream_id == gpu::GPU_STREAM_INVALID) |
671 return false; | 685 return false; |
672 | 686 |
673 if (release_stream_id != stream_id_) | 687 if (release_stream_id != stream_id_) |
674 channel_->FlushPendingStream(release_stream_id); | 688 channel_->FlushPendingStream(release_stream_id); |
675 | 689 |
676 return true; | 690 return true; |
677 } | 691 } |
678 | 692 |
679 void CommandBufferProxyImpl::SignalQuery(uint32_t query, | 693 void CommandBufferProxyImpl::SignalQuery(uint32_t query, |
(...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
936 return; | 950 return; |
937 channel_->FlushPendingStream(stream_id_); | 951 channel_->FlushPendingStream(stream_id_); |
938 channel_->Send(new GpuChannelMsg_DestroyCommandBuffer(route_id_)); | 952 channel_->Send(new GpuChannelMsg_DestroyCommandBuffer(route_id_)); |
939 channel_->RemoveRoute(route_id_); | 953 channel_->RemoveRoute(route_id_); |
940 channel_ = nullptr; | 954 channel_ = nullptr; |
941 if (gpu_control_client_) | 955 if (gpu_control_client_) |
942 gpu_control_client_->OnGpuControlLostContext(); | 956 gpu_control_client_->OnGpuControlLostContext(); |
943 } | 957 } |
944 | 958 |
945 } // namespace gpu | 959 } // namespace gpu |
OLD | NEW |