OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/common/gpu/client/gpu_channel_host.h" | 5 #include "content/common/gpu/client/gpu_channel_host.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 | 8 |
9 #include "base/atomic_sequence_num.h" | 9 #include "base/atomic_sequence_num.h" |
10 #include "base/bind.h" | 10 #include "base/bind.h" |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
66 GpuChannelHostFactory* factory, | 66 GpuChannelHostFactory* factory, |
67 int channel_id, | 67 int channel_id, |
68 const gpu::GPUInfo& gpu_info, | 68 const gpu::GPUInfo& gpu_info, |
69 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) | 69 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) |
70 : factory_(factory), | 70 : factory_(factory), |
71 channel_id_(channel_id), | 71 channel_id_(channel_id), |
72 gpu_info_(gpu_info), | 72 gpu_info_(gpu_info), |
73 gpu_memory_buffer_manager_(gpu_memory_buffer_manager) { | 73 gpu_memory_buffer_manager_(gpu_memory_buffer_manager) { |
74 next_image_id_.GetNext(); | 74 next_image_id_.GetNext(); |
75 next_route_id_.GetNext(); | 75 next_route_id_.GetNext(); |
76 next_stream_id_.GetNext(); | 76 next_stream_id_.GetNext(); |
piman
2015/12/04 22:48:46
You'll need to bump this too, otherwise the first
David Yen
2015/12/04 23:30:27
Is the plan to get rid of the default stream ID an
David Yen
2015/12/05 00:01:18
I changed kDefaultStreamId to be -1 and added DCHE
piman
2015/12/05 02:16:24
Maybe we can consider getting rid of the default s
| |
77 } | 77 } |
78 | 78 |
79 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle, | 79 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle, |
80 base::WaitableEvent* shutdown_event) { | 80 base::WaitableEvent* shutdown_event) { |
81 DCHECK(factory_->IsMainThread()); | 81 DCHECK(factory_->IsMainThread()); |
82 // Open a channel to the GPU process. We pass NULL as the main listener here | 82 // Open a channel to the GPU process. We pass NULL as the main listener here |
83 // since we need to filter everything to route it to the right thread. | 83 // since we need to filter everything to route it to the right thread. |
84 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = | 84 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = |
85 factory_->GetIOThreadTaskRunner(); | 85 factory_->GetIOThreadTaskRunner(); |
86 channel_ = | 86 channel_ = |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
155 latency_info.begin(), latency_info.end()); | 155 latency_info.begin(), latency_info.end()); |
156 | 156 |
157 if (do_flush) | 157 if (do_flush) |
158 InternalFlush(&flush_info); | 158 InternalFlush(&flush_info); |
159 | 159 |
160 return flush_id; | 160 return flush_id; |
161 } | 161 } |
162 return 0; | 162 return 0; |
163 } | 163 } |
164 | 164 |
165 void GpuChannelHost::FlushPendingStream(int32 stream_id) { | |
166 AutoLock lock(context_lock_); | |
167 auto flush_info_iter = stream_flush_info_.find(stream_id); | |
168 if (flush_info_iter == stream_flush_info_.end()) | |
169 return; | |
170 | |
171 StreamFlushInfo& flush_info = flush_info_iter->second; | |
172 if (flush_info.flush_pending) | |
173 InternalFlush(&flush_info); | |
174 } | |
175 | |
165 void GpuChannelHost::InternalFlush(StreamFlushInfo* flush_info) { | 176 void GpuChannelHost::InternalFlush(StreamFlushInfo* flush_info) { |
166 context_lock_.AssertAcquired(); | 177 context_lock_.AssertAcquired(); |
167 DCHECK(flush_info); | 178 DCHECK(flush_info); |
168 DCHECK(flush_info->flush_pending); | 179 DCHECK(flush_info->flush_pending); |
169 DCHECK_LT(flush_info->flushed_stream_flush_id, flush_info->flush_id); | 180 DCHECK_LT(flush_info->flushed_stream_flush_id, flush_info->flush_id); |
170 Send(new GpuCommandBufferMsg_AsyncFlush( | 181 Send(new GpuCommandBufferMsg_AsyncFlush( |
171 flush_info->route_id, flush_info->put_offset, flush_info->flush_count, | 182 flush_info->route_id, flush_info->put_offset, flush_info->flush_count, |
172 flush_info->latency_info)); | 183 flush_info->latency_info)); |
173 flush_info->latency_info.clear(); | 184 flush_info->latency_info.clear(); |
174 flush_info->flush_pending = false; | 185 flush_info->flush_pending = false; |
(...skipping 352 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
527 | 538 |
528 listeners_.clear(); | 539 listeners_.clear(); |
529 } | 540 } |
530 | 541 |
531 bool GpuChannelHost::MessageFilter::IsLost() const { | 542 bool GpuChannelHost::MessageFilter::IsLost() const { |
532 AutoLock lock(lock_); | 543 AutoLock lock(lock_); |
533 return lost_; | 544 return lost_; |
534 } | 545 } |
535 | 546 |
536 } // namespace content | 547 } // namespace content |
OLD | NEW |