OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/client/gpu_channel_host.h" | |
6 | |
7 #include <algorithm> | |
8 #include <utility> | |
9 | |
10 #include "base/atomic_sequence_num.h" | |
11 #include "base/bind.h" | |
12 #include "base/location.h" | |
13 #include "base/posix/eintr_wrapper.h" | |
14 #include "base/profiler/scoped_tracker.h" | |
15 #include "base/single_thread_task_runner.h" | |
16 #include "base/thread_task_runner_handle.h" | |
17 #include "base/threading/thread_restrictions.h" | |
18 #include "base/trace_event/trace_event.h" | |
19 #include "build/build_config.h" | |
20 #include "content/common/gpu/client/command_buffer_proxy_impl.h" | |
21 #include "gpu/ipc/common/gpu_messages.h" | |
22 #include "gpu/ipc/common/gpu_param_traits_macros.h" | |
23 #include "ipc/ipc_sync_message_filter.h" | |
24 #include "url/gurl.h" | |
25 | |
26 using base::AutoLock; | |
27 | |
28 namespace content { | |
29 namespace { | |
30 | |
31 // Global atomic to generate unique transfer buffer IDs. | |
32 base::StaticAtomicSequenceNumber g_next_transfer_buffer_id; | |
33 | |
34 } // namespace | |
35 | |
36 GpuChannelHost::StreamFlushInfo::StreamFlushInfo() | |
37 : next_stream_flush_id(1), | |
38 flushed_stream_flush_id(0), | |
39 verified_stream_flush_id(0), | |
40 flush_pending(false), | |
41 route_id(MSG_ROUTING_NONE), | |
42 put_offset(0), | |
43 flush_count(0), | |
44 flush_id(0) {} | |
45 | |
46 GpuChannelHost::StreamFlushInfo::StreamFlushInfo(const StreamFlushInfo& other) = | |
47 default; | |
48 | |
49 GpuChannelHost::StreamFlushInfo::~StreamFlushInfo() {} | |
50 | |
51 // static | |
52 scoped_refptr<GpuChannelHost> GpuChannelHost::Create( | |
53 GpuChannelHostFactory* factory, | |
54 int channel_id, | |
55 const gpu::GPUInfo& gpu_info, | |
56 const IPC::ChannelHandle& channel_handle, | |
57 base::WaitableEvent* shutdown_event, | |
58 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) { | |
59 DCHECK(factory->IsMainThread()); | |
60 scoped_refptr<GpuChannelHost> host = | |
61 new GpuChannelHost(factory, channel_id, gpu_info, | |
62 gpu_memory_buffer_manager); | |
63 host->Connect(channel_handle, shutdown_event); | |
64 return host; | |
65 } | |
66 | |
67 GpuChannelHost::GpuChannelHost( | |
68 GpuChannelHostFactory* factory, | |
69 int channel_id, | |
70 const gpu::GPUInfo& gpu_info, | |
71 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) | |
72 : factory_(factory), | |
73 channel_id_(channel_id), | |
74 gpu_info_(gpu_info), | |
75 gpu_memory_buffer_manager_(gpu_memory_buffer_manager) { | |
76 next_image_id_.GetNext(); | |
77 next_route_id_.GetNext(); | |
78 next_stream_id_.GetNext(); | |
79 } | |
80 | |
81 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle, | |
82 base::WaitableEvent* shutdown_event) { | |
83 DCHECK(factory_->IsMainThread()); | |
84 // Open a channel to the GPU process. We pass nullptr as the main listener | |
85 // here since we need to filter everything to route it to the right thread. | |
86 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = | |
87 factory_->GetIOThreadTaskRunner(); | |
88 channel_ = IPC::SyncChannel::Create(channel_handle, IPC::Channel::MODE_CLIENT, | |
89 nullptr, io_task_runner.get(), true, | |
90 shutdown_event); | |
91 | |
92 sync_filter_ = channel_->CreateSyncMessageFilter(); | |
93 | |
94 channel_filter_ = new MessageFilter(); | |
95 | |
96 // Install the filter last, because we intercept all leftover | |
97 // messages. | |
98 channel_->AddFilter(channel_filter_.get()); | |
99 } | |
100 | |
101 bool GpuChannelHost::Send(IPC::Message* msg) { | |
102 // Callee takes ownership of message, regardless of whether Send is | |
103 // successful. See IPC::Sender. | |
104 scoped_ptr<IPC::Message> message(msg); | |
105 // The GPU process never sends synchronous IPCs so clear the unblock flag to | |
106 // preserve order. | |
107 message->set_unblock(false); | |
108 | |
109 // Currently we need to choose between two different mechanisms for sending. | |
110 // On the main thread we use the regular channel Send() method, on another | |
111 // thread we use SyncMessageFilter. We also have to be careful interpreting | |
112 // IsMainThread() since it might return false during shutdown, | |
113 // impl we are actually calling from the main thread (discard message then). | |
114 // | |
115 // TODO: Can we just always use sync_filter_ since we setup the channel | |
116 // without a main listener? | |
117 if (factory_->IsMainThread()) { | |
118 // channel_ is only modified on the main thread, so we don't need to take a | |
119 // lock here. | |
120 if (!channel_) { | |
121 DVLOG(1) << "GpuChannelHost::Send failed: Channel already destroyed"; | |
122 return false; | |
123 } | |
124 // http://crbug.com/125264 | |
125 base::ThreadRestrictions::ScopedAllowWait allow_wait; | |
126 bool result = channel_->Send(message.release()); | |
127 if (!result) | |
128 DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed"; | |
129 return result; | |
130 } | |
131 | |
132 bool result = sync_filter_->Send(message.release()); | |
133 return result; | |
134 } | |
135 | |
136 uint32_t GpuChannelHost::OrderingBarrier( | |
137 int32_t route_id, | |
138 int32_t stream_id, | |
139 int32_t put_offset, | |
140 uint32_t flush_count, | |
141 const std::vector<ui::LatencyInfo>& latency_info, | |
142 bool put_offset_changed, | |
143 bool do_flush) { | |
144 AutoLock lock(context_lock_); | |
145 StreamFlushInfo& flush_info = stream_flush_info_[stream_id]; | |
146 if (flush_info.flush_pending && flush_info.route_id != route_id) | |
147 InternalFlush(&flush_info); | |
148 | |
149 if (put_offset_changed) { | |
150 const uint32_t flush_id = flush_info.next_stream_flush_id++; | |
151 flush_info.flush_pending = true; | |
152 flush_info.route_id = route_id; | |
153 flush_info.put_offset = put_offset; | |
154 flush_info.flush_count = flush_count; | |
155 flush_info.flush_id = flush_id; | |
156 flush_info.latency_info.insert(flush_info.latency_info.end(), | |
157 latency_info.begin(), latency_info.end()); | |
158 | |
159 if (do_flush) | |
160 InternalFlush(&flush_info); | |
161 | |
162 return flush_id; | |
163 } | |
164 return 0; | |
165 } | |
166 | |
167 void GpuChannelHost::FlushPendingStream(int32_t stream_id) { | |
168 AutoLock lock(context_lock_); | |
169 auto flush_info_iter = stream_flush_info_.find(stream_id); | |
170 if (flush_info_iter == stream_flush_info_.end()) | |
171 return; | |
172 | |
173 StreamFlushInfo& flush_info = flush_info_iter->second; | |
174 if (flush_info.flush_pending) | |
175 InternalFlush(&flush_info); | |
176 } | |
177 | |
178 void GpuChannelHost::InternalFlush(StreamFlushInfo* flush_info) { | |
179 context_lock_.AssertAcquired(); | |
180 DCHECK(flush_info); | |
181 DCHECK(flush_info->flush_pending); | |
182 DCHECK_LT(flush_info->flushed_stream_flush_id, flush_info->flush_id); | |
183 Send(new GpuCommandBufferMsg_AsyncFlush( | |
184 flush_info->route_id, flush_info->put_offset, flush_info->flush_count, | |
185 flush_info->latency_info)); | |
186 flush_info->latency_info.clear(); | |
187 flush_info->flush_pending = false; | |
188 | |
189 flush_info->flushed_stream_flush_id = flush_info->flush_id; | |
190 } | |
191 | |
192 scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateCommandBuffer( | |
193 gpu::SurfaceHandle surface_handle, | |
194 const gfx::Size& size, | |
195 CommandBufferProxyImpl* share_group, | |
196 int32_t stream_id, | |
197 gpu::GpuStreamPriority stream_priority, | |
198 const std::vector<int32_t>& attribs, | |
199 const GURL& active_url, | |
200 gfx::GpuPreference gpu_preference) { | |
201 DCHECK(!share_group || (stream_id == share_group->stream_id())); | |
202 TRACE_EVENT1("gpu", "GpuChannelHost::CreateViewCommandBuffer", | |
203 "surface_handle", surface_handle); | |
204 | |
205 GPUCreateCommandBufferConfig init_params; | |
206 init_params.share_group_id = | |
207 share_group ? share_group->route_id() : MSG_ROUTING_NONE; | |
208 init_params.stream_id = stream_id; | |
209 init_params.stream_priority = stream_priority; | |
210 init_params.attribs = attribs; | |
211 init_params.active_url = active_url; | |
212 init_params.gpu_preference = gpu_preference; | |
213 | |
214 int32_t route_id = GenerateRouteID(); | |
215 | |
216 // TODO(vadimt): Remove ScopedTracker below once crbug.com/125248 is fixed. | |
217 tracked_objects::ScopedTracker tracking_profile( | |
218 FROM_HERE_WITH_EXPLICIT_FUNCTION( | |
219 "125248 GpuChannelHost::CreateCommandBuffer")); | |
220 | |
221 // We're blocking the UI thread, which is generally undesirable. | |
222 // In this case we need to wait for this before we can show any UI /anyway/, | |
223 // so it won't cause additional jank. | |
224 // TODO(piman): Make this asynchronous (http://crbug.com/125248). | |
225 | |
226 bool succeeded = false; | |
227 if (!Send(new GpuChannelMsg_CreateCommandBuffer( | |
228 surface_handle, size, init_params, route_id, &succeeded))) { | |
229 LOG(ERROR) << "Failed to send GpuChannelMsg_CreateCommandBuffer."; | |
230 return nullptr; | |
231 } | |
232 | |
233 if (!succeeded) { | |
234 LOG(ERROR) << "GpuChannelMsg_CreateCommandBuffer returned failure."; | |
235 return nullptr; | |
236 } | |
237 | |
238 scoped_ptr<CommandBufferProxyImpl> command_buffer = | |
239 make_scoped_ptr(new CommandBufferProxyImpl(this, route_id, stream_id)); | |
240 AddRoute(route_id, command_buffer->AsWeakPtr()); | |
241 | |
242 return command_buffer; | |
243 } | |
244 | |
245 void GpuChannelHost::DestroyCommandBuffer( | |
246 CommandBufferProxyImpl* command_buffer) { | |
247 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer"); | |
248 | |
249 int32_t route_id = command_buffer->route_id(); | |
250 int32_t stream_id = command_buffer->stream_id(); | |
251 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id)); | |
252 RemoveRoute(route_id); | |
253 | |
254 AutoLock lock(context_lock_); | |
255 StreamFlushInfo& flush_info = stream_flush_info_[stream_id]; | |
256 if (flush_info.flush_pending && flush_info.route_id == route_id) | |
257 flush_info.flush_pending = false; | |
258 } | |
259 | |
260 void GpuChannelHost::DestroyChannel() { | |
261 DCHECK(factory_->IsMainThread()); | |
262 AutoLock lock(context_lock_); | |
263 channel_.reset(); | |
264 } | |
265 | |
266 void GpuChannelHost::AddRoute( | |
267 int route_id, base::WeakPtr<IPC::Listener> listener) { | |
268 AddRouteWithTaskRunner(route_id, listener, | |
269 base::ThreadTaskRunnerHandle::Get()); | |
270 } | |
271 | |
272 void GpuChannelHost::AddRouteWithTaskRunner( | |
273 int route_id, | |
274 base::WeakPtr<IPC::Listener> listener, | |
275 scoped_refptr<base::SingleThreadTaskRunner> task_runner) { | |
276 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = | |
277 factory_->GetIOThreadTaskRunner(); | |
278 io_task_runner->PostTask( | |
279 FROM_HERE, | |
280 base::Bind(&GpuChannelHost::MessageFilter::AddRoute, | |
281 channel_filter_.get(), route_id, listener, task_runner)); | |
282 } | |
283 | |
284 void GpuChannelHost::RemoveRoute(int route_id) { | |
285 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = | |
286 factory_->GetIOThreadTaskRunner(); | |
287 io_task_runner->PostTask( | |
288 FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute, | |
289 channel_filter_.get(), route_id)); | |
290 } | |
291 | |
292 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess( | |
293 base::SharedMemoryHandle source_handle) { | |
294 if (IsLost()) | |
295 return base::SharedMemory::NULLHandle(); | |
296 | |
297 return base::SharedMemory::DuplicateHandle(source_handle); | |
298 } | |
299 | |
300 int32_t GpuChannelHost::ReserveTransferBufferId() { | |
301 // 0 is a reserved value. | |
302 return g_next_transfer_buffer_id.GetNext() + 1; | |
303 } | |
304 | |
305 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess( | |
306 const gfx::GpuMemoryBufferHandle& source_handle, | |
307 bool* requires_sync_point) { | |
308 switch (source_handle.type) { | |
309 case gfx::SHARED_MEMORY_BUFFER: { | |
310 gfx::GpuMemoryBufferHandle handle; | |
311 handle.type = gfx::SHARED_MEMORY_BUFFER; | |
312 handle.handle = ShareToGpuProcess(source_handle.handle); | |
313 handle.offset = source_handle.offset; | |
314 handle.stride = source_handle.stride; | |
315 *requires_sync_point = false; | |
316 return handle; | |
317 } | |
318 case gfx::IO_SURFACE_BUFFER: | |
319 case gfx::SURFACE_TEXTURE_BUFFER: | |
320 case gfx::OZONE_NATIVE_PIXMAP: | |
321 *requires_sync_point = true; | |
322 return source_handle; | |
323 default: | |
324 NOTREACHED(); | |
325 return gfx::GpuMemoryBufferHandle(); | |
326 } | |
327 } | |
328 | |
329 int32_t GpuChannelHost::ReserveImageId() { | |
330 return next_image_id_.GetNext(); | |
331 } | |
332 | |
333 int32_t GpuChannelHost::GenerateRouteID() { | |
334 return next_route_id_.GetNext(); | |
335 } | |
336 | |
337 int32_t GpuChannelHost::GenerateStreamID() { | |
338 const int32_t stream_id = next_stream_id_.GetNext(); | |
339 DCHECK_NE(0, stream_id); | |
340 DCHECK_NE(kDefaultStreamId, stream_id); | |
341 return stream_id; | |
342 } | |
343 | |
344 uint32_t GpuChannelHost::ValidateFlushIDReachedServer(int32_t stream_id, | |
345 bool force_validate) { | |
346 // Store what flush ids we will be validating for all streams. | |
347 base::hash_map<int32_t, uint32_t> validate_flushes; | |
348 uint32_t flushed_stream_flush_id = 0; | |
349 uint32_t verified_stream_flush_id = 0; | |
350 { | |
351 AutoLock lock(context_lock_); | |
352 for (const auto& iter : stream_flush_info_) { | |
353 const int32_t iter_stream_id = iter.first; | |
354 const StreamFlushInfo& flush_info = iter.second; | |
355 if (iter_stream_id == stream_id) { | |
356 flushed_stream_flush_id = flush_info.flushed_stream_flush_id; | |
357 verified_stream_flush_id = flush_info.verified_stream_flush_id; | |
358 } | |
359 | |
360 if (flush_info.flushed_stream_flush_id > | |
361 flush_info.verified_stream_flush_id) { | |
362 validate_flushes.insert( | |
363 std::make_pair(iter_stream_id, flush_info.flushed_stream_flush_id)); | |
364 } | |
365 } | |
366 } | |
367 | |
368 if (!force_validate && flushed_stream_flush_id == verified_stream_flush_id) { | |
369 // Current stream has no unverified flushes. | |
370 return verified_stream_flush_id; | |
371 } | |
372 | |
373 if (Send(new GpuChannelMsg_Nop())) { | |
374 // Update verified flush id for all streams. | |
375 uint32_t highest_flush_id = 0; | |
376 AutoLock lock(context_lock_); | |
377 for (const auto& iter : validate_flushes) { | |
378 const int32_t validated_stream_id = iter.first; | |
379 const uint32_t validated_flush_id = iter.second; | |
380 StreamFlushInfo& flush_info = stream_flush_info_[validated_stream_id]; | |
381 if (flush_info.verified_stream_flush_id < validated_flush_id) { | |
382 flush_info.verified_stream_flush_id = validated_flush_id; | |
383 } | |
384 | |
385 if (validated_stream_id == stream_id) | |
386 highest_flush_id = flush_info.verified_stream_flush_id; | |
387 } | |
388 | |
389 return highest_flush_id; | |
390 } | |
391 | |
392 return 0; | |
393 } | |
394 | |
395 uint32_t GpuChannelHost::GetHighestValidatedFlushID(int32_t stream_id) { | |
396 AutoLock lock(context_lock_); | |
397 StreamFlushInfo& flush_info = stream_flush_info_[stream_id]; | |
398 return flush_info.verified_stream_flush_id; | |
399 } | |
400 | |
401 GpuChannelHost::~GpuChannelHost() { | |
402 #if DCHECK_IS_ON() | |
403 AutoLock lock(context_lock_); | |
404 DCHECK(!channel_) | |
405 << "GpuChannelHost::DestroyChannel must be called before destruction."; | |
406 #endif | |
407 } | |
408 | |
409 GpuChannelHost::MessageFilter::ListenerInfo::ListenerInfo() {} | |
410 | |
411 GpuChannelHost::MessageFilter::ListenerInfo::ListenerInfo( | |
412 const ListenerInfo& other) = default; | |
413 | |
414 GpuChannelHost::MessageFilter::ListenerInfo::~ListenerInfo() {} | |
415 | |
416 GpuChannelHost::MessageFilter::MessageFilter() | |
417 : lost_(false) { | |
418 } | |
419 | |
420 GpuChannelHost::MessageFilter::~MessageFilter() {} | |
421 | |
422 void GpuChannelHost::MessageFilter::AddRoute( | |
423 int32_t route_id, | |
424 base::WeakPtr<IPC::Listener> listener, | |
425 scoped_refptr<base::SingleThreadTaskRunner> task_runner) { | |
426 DCHECK(listeners_.find(route_id) == listeners_.end()); | |
427 DCHECK(task_runner); | |
428 ListenerInfo info; | |
429 info.listener = listener; | |
430 info.task_runner = task_runner; | |
431 listeners_[route_id] = info; | |
432 } | |
433 | |
434 void GpuChannelHost::MessageFilter::RemoveRoute(int32_t route_id) { | |
435 listeners_.erase(route_id); | |
436 } | |
437 | |
438 bool GpuChannelHost::MessageFilter::OnMessageReceived( | |
439 const IPC::Message& message) { | |
440 // Never handle sync message replies or we will deadlock here. | |
441 if (message.is_reply()) | |
442 return false; | |
443 | |
444 auto it = listeners_.find(message.routing_id()); | |
445 if (it == listeners_.end()) | |
446 return false; | |
447 | |
448 const ListenerInfo& info = it->second; | |
449 info.task_runner->PostTask( | |
450 FROM_HERE, | |
451 base::Bind(base::IgnoreResult(&IPC::Listener::OnMessageReceived), | |
452 info.listener, message)); | |
453 return true; | |
454 } | |
455 | |
456 void GpuChannelHost::MessageFilter::OnChannelError() { | |
457 // Set the lost state before signalling the proxies. That way, if they | |
458 // themselves post a task to recreate the context, they will not try to re-use | |
459 // this channel host. | |
460 { | |
461 AutoLock lock(lock_); | |
462 lost_ = true; | |
463 } | |
464 | |
465 // Inform all the proxies that an error has occurred. This will be reported | |
466 // via OpenGL as a lost context. | |
467 for (const auto& kv : listeners_) { | |
468 const ListenerInfo& info = kv.second; | |
469 info.task_runner->PostTask( | |
470 FROM_HERE, base::Bind(&IPC::Listener::OnChannelError, info.listener)); | |
471 } | |
472 | |
473 listeners_.clear(); | |
474 } | |
475 | |
476 bool GpuChannelHost::MessageFilter::IsLost() const { | |
477 AutoLock lock(lock_); | |
478 return lost_; | |
479 } | |
480 | |
481 } // namespace content | |
OLD | NEW |