Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(159)

Side by Side Diff: content/common/gpu/client/gpu_channel_host.cc

Issue 1656433002: Sample code: IPC Transport object for GPU Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: GpuMemoryBufferService + Transport object. TODO: Eliminate ChildThreadImpl dependency Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/client/gpu_channel_host.h" 5 #include "content/common/gpu/client/gpu_channel_host.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <utility> 8 #include <utility>
9 9
10 #include "base/atomic_sequence_num.h" 10 #include "base/atomic_sequence_num.h"
11 #include "base/bind.h" 11 #include "base/bind.h"
12 #include "base/location.h" 12 #include "base/location.h"
13 #include "base/posix/eintr_wrapper.h" 13 #include "base/posix/eintr_wrapper.h"
14 #include "base/single_thread_task_runner.h" 14 #include "base/single_thread_task_runner.h"
15 #include "base/thread_task_runner_handle.h" 15 #include "base/thread_task_runner_handle.h"
16 #include "base/threading/thread_restrictions.h" 16 #include "base/threading/thread_restrictions.h"
17 #include "base/trace_event/trace_event.h" 17 #include "base/trace_event/trace_event.h"
18 #include "build/build_config.h" 18 #include "build/build_config.h"
19 #include "content/common/gpu/client/command_buffer_proxy_impl.h" 19 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
20 #include "content/common/gpu/client/gpu_channel_host_factory.h"
20 #include "content/common/gpu/client/gpu_jpeg_decode_accelerator_host.h" 21 #include "content/common/gpu/client/gpu_jpeg_decode_accelerator_host.h"
21 #include "content/common/gpu/gpu_messages.h" 22 #include "content/common/gpu/client/ipc/gpu_host_ipc_transport_factory.h"
22 #include "ipc/ipc_sync_message_filter.h" 23 #include "content/common/gpu/gpu_create_command_buffer_config.h"
23 #include "url/gurl.h" 24 #include "url/gurl.h"
24 25
25 #if defined(OS_WIN) || defined(OS_MACOSX) 26 #if defined(OS_WIN) || defined(OS_MACOSX)
26 #include "content/public/common/sandbox_init.h" 27 #include "content/public/common/sandbox_init.h"
27 #endif 28 #endif
28 29
29 using base::AutoLock; 30 using base::AutoLock;
30 31
31 namespace content { 32 namespace content {
32 namespace { 33 namespace {
33 34
34 // Global atomic to generate unique transfer buffer IDs. 35 // Global atomic to generate unique transfer buffer IDs.
35 base::StaticAtomicSequenceNumber g_next_transfer_buffer_id; 36 base::StaticAtomicSequenceNumber g_next_transfer_buffer_id;
36 37
37 } // namespace 38 } // namespace
38 39
39 GpuChannelHost::StreamFlushInfo::StreamFlushInfo() 40 GpuChannelHost::StreamFlushInfo::StreamFlushInfo()
40 : next_stream_flush_id(1), 41 : next_stream_flush_id(1),
41 flushed_stream_flush_id(0), 42 flushed_stream_flush_id(0),
42 verified_stream_flush_id(0), 43 verified_stream_flush_id(0),
43 flush_pending(false), 44 flush_pending(false),
44 route_id(MSG_ROUTING_NONE), 45 transport(nullptr),
45 put_offset(0), 46 put_offset(0),
46 flush_count(0), 47 flush_count(0),
47 flush_id(0) {} 48 flush_id(0) {}
48 49
49 GpuChannelHost::StreamFlushInfo::~StreamFlushInfo() {} 50 GpuChannelHost::StreamFlushInfo::~StreamFlushInfo() {}
50 51
51 // static 52 // static
52 scoped_refptr<GpuChannelHost> GpuChannelHost::Create( 53 scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
53 GpuChannelHostFactory* factory, 54 scoped_ptr<GpuChannelHostIPCTransport> transport,
54 int channel_id,
55 const gpu::GPUInfo& gpu_info, 55 const gpu::GPUInfo& gpu_info,
56 const IPC::ChannelHandle& channel_handle,
57 base::WaitableEvent* shutdown_event,
58 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) { 56 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) {
59 DCHECK(factory->IsMainThread()); 57 return new GpuChannelHost(std::move(transport), gpu_info,
60 scoped_refptr<GpuChannelHost> host = 58 gpu_memory_buffer_manager);
61 new GpuChannelHost(factory, channel_id, gpu_info,
62 gpu_memory_buffer_manager);
63 host->Connect(channel_handle, shutdown_event);
64 return host;
65 } 59 }
66 60
67 GpuChannelHost::GpuChannelHost( 61 GpuChannelHost::GpuChannelHost(
68 GpuChannelHostFactory* factory, 62 scoped_ptr<GpuChannelHostIPCTransport> transport,
69 int channel_id,
70 const gpu::GPUInfo& gpu_info, 63 const gpu::GPUInfo& gpu_info,
71 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) 64 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager)
72 : factory_(factory), 65 : gpu_info_(gpu_info),
73 channel_id_(channel_id), 66 gpu_memory_buffer_manager_(gpu_memory_buffer_manager),
74 gpu_info_(gpu_info), 67 transport_(std::move(transport)) {
75 gpu_memory_buffer_manager_(gpu_memory_buffer_manager) {
76 next_image_id_.GetNext(); 68 next_image_id_.GetNext();
77 next_route_id_.GetNext();
78 next_stream_id_.GetNext(); 69 next_stream_id_.GetNext();
79 } 70 }
80 71
81 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle,
82 base::WaitableEvent* shutdown_event) {
83 DCHECK(factory_->IsMainThread());
84 // Open a channel to the GPU process. We pass NULL as the main listener here
85 // since we need to filter everything to route it to the right thread.
86 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
87 factory_->GetIOThreadTaskRunner();
88 channel_ =
89 IPC::SyncChannel::Create(channel_handle, IPC::Channel::MODE_CLIENT, NULL,
90 io_task_runner.get(), true, shutdown_event);
91
92 sync_filter_ = channel_->CreateSyncMessageFilter();
93
94 channel_filter_ = new MessageFilter();
95
96 // Install the filter last, because we intercept all leftover
97 // messages.
98 channel_->AddFilter(channel_filter_.get());
99 }
100
101 bool GpuChannelHost::Send(IPC::Message* msg) {
102 // Callee takes ownership of message, regardless of whether Send is
103 // successful. See IPC::Sender.
104 scoped_ptr<IPC::Message> message(msg);
105 // The GPU process never sends synchronous IPCs so clear the unblock flag to
106 // preserve order.
107 message->set_unblock(false);
108
109 // Currently we need to choose between two different mechanisms for sending.
110 // On the main thread we use the regular channel Send() method, on another
111 // thread we use SyncMessageFilter. We also have to be careful interpreting
112 // IsMainThread() since it might return false during shutdown,
113 // impl we are actually calling from the main thread (discard message then).
114 //
115 // TODO: Can we just always use sync_filter_ since we setup the channel
116 // without a main listener?
117 if (factory_->IsMainThread()) {
118 // channel_ is only modified on the main thread, so we don't need to take a
119 // lock here.
120 if (!channel_) {
121 DVLOG(1) << "GpuChannelHost::Send failed: Channel already destroyed";
122 return false;
123 }
124 // http://crbug.com/125264
125 base::ThreadRestrictions::ScopedAllowWait allow_wait;
126 bool result = channel_->Send(message.release());
127 if (!result)
128 DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
129 return result;
130 }
131
132 bool result = sync_filter_->Send(message.release());
133 return result;
134 }
135
136 uint32_t GpuChannelHost::OrderingBarrier( 72 uint32_t GpuChannelHost::OrderingBarrier(
137 int32_t route_id, 73 CommandBufferIPCTransport* transport,
138 int32_t stream_id, 74 int32_t stream_id,
139 int32_t put_offset, 75 int32_t put_offset,
140 uint32_t flush_count, 76 uint32_t flush_count,
141 const std::vector<ui::LatencyInfo>& latency_info, 77 const std::vector<ui::LatencyInfo>& latency_info,
142 bool put_offset_changed, 78 bool put_offset_changed,
143 bool do_flush) { 79 bool do_flush) {
144 AutoLock lock(context_lock_); 80 AutoLock lock(context_lock_);
145 StreamFlushInfo& flush_info = stream_flush_info_[stream_id]; 81 StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
146 if (flush_info.flush_pending && flush_info.route_id != route_id) 82 if (flush_info.flush_pending && flush_info.transport != transport)
147 InternalFlush(&flush_info); 83 InternalFlush(&flush_info);
148 84
149 if (put_offset_changed) { 85 if (put_offset_changed) {
150 const uint32_t flush_id = flush_info.next_stream_flush_id++; 86 const uint32_t flush_id = flush_info.next_stream_flush_id++;
151 flush_info.flush_pending = true; 87 flush_info.flush_pending = true;
152 flush_info.route_id = route_id; 88 flush_info.transport = transport;
153 flush_info.put_offset = put_offset; 89 flush_info.put_offset = put_offset;
154 flush_info.flush_count = flush_count; 90 flush_info.flush_count = flush_count;
155 flush_info.flush_id = flush_id; 91 flush_info.flush_id = flush_id;
156 flush_info.latency_info.insert(flush_info.latency_info.end(), 92 flush_info.latency_info.insert(flush_info.latency_info.end(),
157 latency_info.begin(), latency_info.end()); 93 latency_info.begin(), latency_info.end());
158 94
159 if (do_flush) 95 if (do_flush)
160 InternalFlush(&flush_info); 96 InternalFlush(&flush_info);
161 97
162 return flush_id; 98 return flush_id;
(...skipping 10 matching lines...) Expand all
173 StreamFlushInfo& flush_info = flush_info_iter->second; 109 StreamFlushInfo& flush_info = flush_info_iter->second;
174 if (flush_info.flush_pending) 110 if (flush_info.flush_pending)
175 InternalFlush(&flush_info); 111 InternalFlush(&flush_info);
176 } 112 }
177 113
178 void GpuChannelHost::InternalFlush(StreamFlushInfo* flush_info) { 114 void GpuChannelHost::InternalFlush(StreamFlushInfo* flush_info) {
179 context_lock_.AssertAcquired(); 115 context_lock_.AssertAcquired();
180 DCHECK(flush_info); 116 DCHECK(flush_info);
181 DCHECK(flush_info->flush_pending); 117 DCHECK(flush_info->flush_pending);
182 DCHECK_LT(flush_info->flushed_stream_flush_id, flush_info->flush_id); 118 DCHECK_LT(flush_info->flushed_stream_flush_id, flush_info->flush_id);
183 Send(new GpuCommandBufferMsg_AsyncFlush( 119 transport_->AsyncFlush(flush_info->transport, flush_info->put_offset,
184 flush_info->route_id, flush_info->put_offset, flush_info->flush_count, 120 flush_info->flush_count, flush_info->latency_info);
185 flush_info->latency_info));
186 flush_info->latency_info.clear(); 121 flush_info->latency_info.clear();
187 flush_info->flush_pending = false; 122 flush_info->flush_pending = false;
188 123
189 flush_info->flushed_stream_flush_id = flush_info->flush_id; 124 flush_info->flushed_stream_flush_id = flush_info->flush_id;
190 } 125 }
191 126
192 scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateViewCommandBuffer( 127 scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateViewCommandBuffer(
193 int32_t surface_id, 128 int32_t surface_id,
194 CommandBufferProxyImpl* share_group, 129 CommandBufferProxyImpl* share_group,
195 int32_t stream_id, 130 int32_t stream_id,
196 GpuStreamPriority stream_priority, 131 GpuStreamPriority stream_priority,
197 const std::vector<int32_t>& attribs, 132 const std::vector<int32_t>& attribs,
198 const GURL& active_url, 133 const GURL& active_url,
199 gfx::GpuPreference gpu_preference) { 134 gfx::GpuPreference gpu_preference) {
200 DCHECK(!share_group || (stream_id == share_group->stream_id())); 135 DCHECK(!share_group || (stream_id == share_group->stream_id()));
201 TRACE_EVENT1("gpu", 136 TRACE_EVENT1("gpu",
202 "GpuChannelHost::CreateViewCommandBuffer", 137 "GpuChannelHost::CreateViewCommandBuffer",
203 "surface_id", 138 "surface_id",
204 surface_id); 139 surface_id);
205 140
206 GPUCreateCommandBufferConfig init_params; 141 content::GpuCreateCommandBufferConfig init_params;
207 init_params.share_group_id = 142 init_params.share_group_id =
208 share_group ? share_group->route_id() : MSG_ROUTING_NONE; 143 share_group ? share_group->transport()->GetShareGroupID() : -2;
209 init_params.stream_id = stream_id; 144 init_params.stream_id = stream_id;
210 init_params.stream_priority = stream_priority; 145 init_params.stream_priority = stream_priority;
211 init_params.attribs = attribs; 146 init_params.attribs = attribs;
212 init_params.active_url = active_url; 147 init_params.active_url = active_url;
213 init_params.gpu_preference = gpu_preference; 148 init_params.gpu_preference = gpu_preference;
214 149
215 int32_t route_id = GenerateRouteID(); 150 CreateCommandBufferResult result =
216 151 CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST;
217 CreateCommandBufferResult result = factory_->CreateViewCommandBuffer( 152 scoped_ptr<CommandBufferIPCTransport> command_buffer_transport(
218 surface_id, init_params, route_id); 153 GpuHostIPCTransportFactory::Get()->CreateCommandBufferIPCTransport());
154 transport_->CreateViewCommandBuffer(surface_id, init_params,
155 command_buffer_transport.get(), &result);
219 if (result != CREATE_COMMAND_BUFFER_SUCCEEDED) { 156 if (result != CREATE_COMMAND_BUFFER_SUCCEEDED) {
220 LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed."; 157 LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed.";
221 158 return nullptr;
222 if (result == CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST) {
223 // The GPU channel needs to be considered lost. The caller will
224 // then set up a new connection, and the GPU channel and any
225 // view command buffers will all be associated with the same GPU
226 // process.
227 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
228 factory_->GetIOThreadTaskRunner();
229 io_task_runner->PostTask(
230 FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::OnChannelError,
231 channel_filter_.get()));
232 }
233
234 return NULL;
235 } 159 }
236 160
237 scoped_ptr<CommandBufferProxyImpl> command_buffer = 161 scoped_ptr<CommandBufferProxyImpl> command_buffer =
238 make_scoped_ptr(new CommandBufferProxyImpl(this, route_id, stream_id)); 162 make_scoped_ptr(new CommandBufferProxyImpl(
239 AddRoute(route_id, command_buffer->AsWeakPtr()); 163 this, std::move(command_buffer_transport), stream_id));
240 164
241 return command_buffer; 165 return command_buffer;
242 } 166 }
243 167
244 scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateOffscreenCommandBuffer( 168 scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateOffscreenCommandBuffer(
245 const gfx::Size& size, 169 const gfx::Size& size,
246 CommandBufferProxyImpl* share_group, 170 CommandBufferProxyImpl* share_group,
247 int32_t stream_id, 171 int32_t stream_id,
248 GpuStreamPriority stream_priority, 172 GpuStreamPriority stream_priority,
249 const std::vector<int32_t>& attribs, 173 const std::vector<int32_t>& attribs,
250 const GURL& active_url, 174 const GURL& active_url,
251 gfx::GpuPreference gpu_preference) { 175 gfx::GpuPreference gpu_preference) {
252 DCHECK(!share_group || (stream_id == share_group->stream_id())); 176 DCHECK(!share_group || (stream_id == share_group->stream_id()));
253 TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer"); 177 TRACE_EVENT0("gpu", "GpuChannelHost::CreateOffscreenCommandBuffer");
254 178
255 GPUCreateCommandBufferConfig init_params; 179 GpuCreateCommandBufferConfig init_params;
256 init_params.share_group_id = 180 init_params.share_group_id =
257 share_group ? share_group->route_id() : MSG_ROUTING_NONE; 181 share_group ? share_group->transport()->GetShareGroupID() : -2;
258 init_params.stream_id = stream_id; 182 init_params.stream_id = stream_id;
259 init_params.stream_priority = stream_priority; 183 init_params.stream_priority = stream_priority;
260 init_params.attribs = attribs; 184 init_params.attribs = attribs;
261 init_params.active_url = active_url; 185 init_params.active_url = active_url;
262 init_params.gpu_preference = gpu_preference; 186 init_params.gpu_preference = gpu_preference;
263 187
264 int32_t route_id = GenerateRouteID(); 188 scoped_ptr<CommandBufferIPCTransport> command_buffer_transport(
189 GpuHostIPCTransportFactory::Get()->CreateCommandBufferIPCTransport());
265 190
266 bool succeeded = false; 191 bool succeeded = false;
267 if (!Send(new GpuChannelMsg_CreateOffscreenCommandBuffer( 192 if (!transport_->CreateOffscreenCommandBuffer(
268 size, init_params, route_id, &succeeded))) { 193 size, init_params, command_buffer_transport.get(), &succeeded)) {
269 LOG(ERROR) << "Failed to send GpuChannelMsg_CreateOffscreenCommandBuffer."; 194 LOG(ERROR) << "Failed to send CreateOffscreenCommandBuffer.";
270 return NULL; 195 return nullptr;
271 } 196 }
272 197
273 if (!succeeded) { 198 if (!succeeded) {
274 LOG(ERROR) 199 LOG(ERROR) << "CreateOffscreenCommandBuffer returned failure.";
275 << "GpuChannelMsg_CreateOffscreenCommandBuffer returned failure."; 200 return nullptr;
276 return NULL;
277 } 201 }
278 202
279 scoped_ptr<CommandBufferProxyImpl> command_buffer = 203 scoped_ptr<CommandBufferProxyImpl> command_buffer =
280 make_scoped_ptr(new CommandBufferProxyImpl(this, route_id, stream_id)); 204 make_scoped_ptr(new CommandBufferProxyImpl(
281 AddRoute(route_id, command_buffer->AsWeakPtr()); 205 this, std::move(command_buffer_transport), stream_id));
282 206
283 return command_buffer; 207 return command_buffer;
284 } 208 }
285 209
286 scoped_ptr<media::JpegDecodeAccelerator> GpuChannelHost::CreateJpegDecoder( 210 scoped_ptr<media::JpegDecodeAccelerator> GpuChannelHost::CreateJpegDecoder(
287 media::JpegDecodeAccelerator::Client* client) { 211 media::JpegDecodeAccelerator::Client* client) {
288 TRACE_EVENT0("gpu", "GpuChannelHost::CreateJpegDecoder"); 212 scoped_ptr<GpuJpegDecodeAcceleratorHostIPCTransport> jpeg_decode_transport(
213 GpuHostIPCTransportFactory::Get()
214 ->CreateJpegDecodeAcceleratorHostIPCTransport());
289 215
290 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = 216 bool succeeded = false;
291 factory_->GetIOThreadTaskRunner(); 217 if (!transport_->CreateJpegDecoder(jpeg_decode_transport.get(), &succeeded)) {
292 int32_t route_id = GenerateRouteID(); 218 LOG(ERROR) << "Failed to send CreateJpegDecoder";
293 scoped_ptr<GpuJpegDecodeAcceleratorHost> decoder(
294 new GpuJpegDecodeAcceleratorHost(this, route_id, io_task_runner));
295 if (!decoder->Initialize(client)) {
296 return nullptr; 219 return nullptr;
297 } 220 }
298 221
299 // The reply message of jpeg decoder should run on IO thread. 222 if (!succeeded) {
300 io_task_runner->PostTask(FROM_HERE, 223 LOG(ERROR) << "CreateJpegDecoder returned failure.";
301 base::Bind(&GpuChannelHost::MessageFilter::AddRoute, 224 return nullptr;
302 channel_filter_.get(), route_id, 225 }
303 decoder->GetReceiver(), io_task_runner));
304 226
305 return std::move(decoder); 227 scoped_ptr<media::JpegDecodeAccelerator> decoder(
228 new GpuJpegDecodeAcceleratorHost(this, std::move(jpeg_decode_transport)));
229 if (!decoder->Initialize(client))
230 return nullptr;
231
232 return decoder;
306 } 233 }
307 234
308 void GpuChannelHost::DestroyCommandBuffer( 235 void GpuChannelHost::DestroyCommandBuffer(
309 CommandBufferProxyImpl* command_buffer) { 236 CommandBufferProxyImpl* command_buffer) {
310 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer"); 237 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
311 238
312 int32_t route_id = command_buffer->route_id(); 239 transport_->DestroyCommandBuffer(command_buffer->transport());
313 int32_t stream_id = command_buffer->stream_id(); 240 int32_t stream_id = command_buffer->stream_id();
314 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
315 RemoveRoute(route_id);
316 241
317 AutoLock lock(context_lock_); 242 AutoLock lock(context_lock_);
318 StreamFlushInfo& flush_info = stream_flush_info_[stream_id]; 243 StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
319 if (flush_info.flush_pending && flush_info.route_id == route_id) 244 if (flush_info.flush_pending &&
245 flush_info.transport == command_buffer->transport())
320 flush_info.flush_pending = false; 246 flush_info.flush_pending = false;
321 } 247 }
322 248
323 void GpuChannelHost::DestroyChannel() { 249 void GpuChannelHost::DestroyChannel() {
324 DCHECK(factory_->IsMainThread());
325 AutoLock lock(context_lock_); 250 AutoLock lock(context_lock_);
326 channel_.reset(); 251 transport_.reset();
327 }
328
329 void GpuChannelHost::AddRoute(
330 int route_id, base::WeakPtr<IPC::Listener> listener) {
331 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
332 factory_->GetIOThreadTaskRunner();
333 io_task_runner->PostTask(FROM_HERE,
334 base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
335 channel_filter_.get(), route_id, listener,
336 base::ThreadTaskRunnerHandle::Get()));
337 }
338
339 void GpuChannelHost::RemoveRoute(int route_id) {
340 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
341 factory_->GetIOThreadTaskRunner();
342 io_task_runner->PostTask(
343 FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute,
344 channel_filter_.get(), route_id));
345 } 252 }
346 253
347 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess( 254 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
348 base::SharedMemoryHandle source_handle) { 255 base::SharedMemoryHandle source_handle) {
349 if (IsLost()) 256 AutoLock lock(context_lock_);
350 return base::SharedMemory::NULLHandle(); 257 return transport_->ShareToGpuProcess(source_handle);
351
352 #if defined(OS_WIN) || defined(OS_MACOSX)
353 // Windows and Mac need to explicitly duplicate the handle out to another
354 // process.
355 base::SharedMemoryHandle target_handle;
356 base::ProcessId peer_pid;
357 {
358 AutoLock lock(context_lock_);
359 if (!channel_)
360 return base::SharedMemory::NULLHandle();
361 peer_pid = channel_->GetPeerPID();
362 }
363 bool success = BrokerDuplicateSharedMemoryHandle(source_handle, peer_pid,
364 &target_handle);
365 if (!success)
366 return base::SharedMemory::NULLHandle();
367
368 return target_handle;
369 #else
370 return base::SharedMemory::DuplicateHandle(source_handle);
371 #endif // defined(OS_WIN) || defined(OS_MACOSX)
372 } 258 }
373 259
374 int32_t GpuChannelHost::ReserveTransferBufferId() { 260 int32_t GpuChannelHost::ReserveTransferBufferId() {
375 // 0 is a reserved value. 261 // 0 is a reserved value.
376 return g_next_transfer_buffer_id.GetNext() + 1; 262 return g_next_transfer_buffer_id.GetNext() + 1;
377 } 263 }
378 264
379 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess( 265 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
380 const gfx::GpuMemoryBufferHandle& source_handle, 266 const gfx::GpuMemoryBufferHandle& source_handle,
381 bool* requires_sync_point) { 267 bool* requires_sync_point) {
(...skipping 15 matching lines...) Expand all
397 default: 283 default:
398 NOTREACHED(); 284 NOTREACHED();
399 return gfx::GpuMemoryBufferHandle(); 285 return gfx::GpuMemoryBufferHandle();
400 } 286 }
401 } 287 }
402 288
403 int32_t GpuChannelHost::ReserveImageId() { 289 int32_t GpuChannelHost::ReserveImageId() {
404 return next_image_id_.GetNext(); 290 return next_image_id_.GetNext();
405 } 291 }
406 292
407 int32_t GpuChannelHost::GenerateRouteID() {
408 return next_route_id_.GetNext();
409 }
410
411 int32_t GpuChannelHost::GenerateStreamID() { 293 int32_t GpuChannelHost::GenerateStreamID() {
412 const int32_t stream_id = next_stream_id_.GetNext(); 294 const int32_t stream_id = next_stream_id_.GetNext();
413 DCHECK_NE(0, stream_id); 295 DCHECK_NE(0, stream_id);
414 DCHECK_NE(kDefaultStreamId, stream_id); 296 DCHECK_NE(kDefaultStreamId, stream_id);
415 return stream_id; 297 return stream_id;
416 } 298 }
417 299
418 uint32_t GpuChannelHost::ValidateFlushIDReachedServer(int32_t stream_id, 300 uint32_t GpuChannelHost::ValidateFlushIDReachedServer(int32_t stream_id,
419 bool force_validate) { 301 bool force_validate) {
420 // Store what flush ids we will be validating for all streams. 302 // Store what flush ids we will be validating for all streams.
(...skipping 16 matching lines...) Expand all
437 std::make_pair(iter_stream_id, flush_info.flushed_stream_flush_id)); 319 std::make_pair(iter_stream_id, flush_info.flushed_stream_flush_id));
438 } 320 }
439 } 321 }
440 } 322 }
441 323
442 if (!force_validate && flushed_stream_flush_id == verified_stream_flush_id) { 324 if (!force_validate && flushed_stream_flush_id == verified_stream_flush_id) {
443 // Current stream has no unverified flushes. 325 // Current stream has no unverified flushes.
444 return verified_stream_flush_id; 326 return verified_stream_flush_id;
445 } 327 }
446 328
447 if (Send(new GpuChannelMsg_Nop())) { 329 if (transport_->Nop()) {
448 // Update verified flush id for all streams. 330 // Update verified flush id for all streams.
449 uint32_t highest_flush_id = 0; 331 uint32_t highest_flush_id = 0;
450 AutoLock lock(context_lock_); 332 AutoLock lock(context_lock_);
451 for (const auto& iter : validate_flushes) { 333 for (const auto& iter : validate_flushes) {
452 const int32_t validated_stream_id = iter.first; 334 const int32_t validated_stream_id = iter.first;
453 const uint32_t validated_flush_id = iter.second; 335 const uint32_t validated_flush_id = iter.second;
454 StreamFlushInfo& flush_info = stream_flush_info_[validated_stream_id]; 336 StreamFlushInfo& flush_info = stream_flush_info_[validated_stream_id];
455 if (flush_info.verified_stream_flush_id < validated_flush_id) { 337 if (flush_info.verified_stream_flush_id < validated_flush_id) {
456 flush_info.verified_stream_flush_id = validated_flush_id; 338 flush_info.verified_stream_flush_id = validated_flush_id;
457 } 339 }
(...skipping 10 matching lines...) Expand all
468 350
469 uint32_t GpuChannelHost::GetHighestValidatedFlushID(int32_t stream_id) { 351 uint32_t GpuChannelHost::GetHighestValidatedFlushID(int32_t stream_id) {
470 AutoLock lock(context_lock_); 352 AutoLock lock(context_lock_);
471 StreamFlushInfo& flush_info = stream_flush_info_[stream_id]; 353 StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
472 return flush_info.verified_stream_flush_id; 354 return flush_info.verified_stream_flush_id;
473 } 355 }
474 356
475 GpuChannelHost::~GpuChannelHost() { 357 GpuChannelHost::~GpuChannelHost() {
476 #if DCHECK_IS_ON() 358 #if DCHECK_IS_ON()
477 AutoLock lock(context_lock_); 359 AutoLock lock(context_lock_);
478 DCHECK(!channel_) 360 DCHECK(!transport_)
479 << "GpuChannelHost::DestroyChannel must be called before destruction."; 361 << "GpuChannelHost::DestroyChannel must be called before destruction.";
480 #endif 362 #endif
481 } 363 }
482 364
483 GpuChannelHost::MessageFilter::ListenerInfo::ListenerInfo() {}
484
485 GpuChannelHost::MessageFilter::ListenerInfo::~ListenerInfo() {}
486
487 GpuChannelHost::MessageFilter::MessageFilter()
488 : lost_(false) {
489 }
490
491 GpuChannelHost::MessageFilter::~MessageFilter() {}
492
493 void GpuChannelHost::MessageFilter::AddRoute(
494 int32_t route_id,
495 base::WeakPtr<IPC::Listener> listener,
496 scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
497 DCHECK(listeners_.find(route_id) == listeners_.end());
498 DCHECK(task_runner);
499 ListenerInfo info;
500 info.listener = listener;
501 info.task_runner = task_runner;
502 listeners_[route_id] = info;
503 }
504
505 void GpuChannelHost::MessageFilter::RemoveRoute(int32_t route_id) {
506 listeners_.erase(route_id);
507 }
508
509 bool GpuChannelHost::MessageFilter::OnMessageReceived(
510 const IPC::Message& message) {
511 // Never handle sync message replies or we will deadlock here.
512 if (message.is_reply())
513 return false;
514
515 auto it = listeners_.find(message.routing_id());
516 if (it == listeners_.end())
517 return false;
518
519 const ListenerInfo& info = it->second;
520 info.task_runner->PostTask(
521 FROM_HERE,
522 base::Bind(base::IgnoreResult(&IPC::Listener::OnMessageReceived),
523 info.listener, message));
524 return true;
525 }
526
527 void GpuChannelHost::MessageFilter::OnChannelError() {
528 // Set the lost state before signalling the proxies. That way, if they
529 // themselves post a task to recreate the context, they will not try to re-use
530 // this channel host.
531 {
532 AutoLock lock(lock_);
533 lost_ = true;
534 }
535
536 // Inform all the proxies that an error has occurred. This will be reported
537 // via OpenGL as a lost context.
538 for (const auto& kv : listeners_) {
539 const ListenerInfo& info = kv.second;
540 info.task_runner->PostTask(
541 FROM_HERE, base::Bind(&IPC::Listener::OnChannelError, info.listener));
542 }
543
544 listeners_.clear();
545 }
546
547 bool GpuChannelHost::MessageFilter::IsLost() const {
548 AutoLock lock(lock_);
549 return lost_;
550 }
551
552 } // namespace content 365 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/client/gpu_channel_host.h ('k') | content/common/gpu/client/gpu_channel_host_factory.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698