Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(351)

Side by Side Diff: content/common/gpu/client/gpu_channel_host.cc

Issue 165393003: gpu: Generate mailboxes on client side (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: rebase Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/client/gpu_channel_host.h" 5 #include "content/common/gpu/client/gpu_channel_host.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/debug/trace_event.h" 10 #include "base/debug/trace_event.h"
11 #include "base/message_loop/message_loop.h" 11 #include "base/message_loop/message_loop.h"
12 #include "base/message_loop/message_loop_proxy.h" 12 #include "base/message_loop/message_loop_proxy.h"
13 #include "base/posix/eintr_wrapper.h" 13 #include "base/posix/eintr_wrapper.h"
14 #include "base/threading/thread_restrictions.h" 14 #include "base/threading/thread_restrictions.h"
15 #include "content/common/gpu/client/command_buffer_proxy_impl.h" 15 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
16 #include "content/common/gpu/client/gpu_video_encode_accelerator_host.h" 16 #include "content/common/gpu/client/gpu_video_encode_accelerator_host.h"
17 #include "content/common/gpu/gpu_messages.h" 17 #include "content/common/gpu/gpu_messages.h"
18 #include "gpu/command_buffer/common/mailbox.h"
19 #include "ipc/ipc_sync_message_filter.h" 18 #include "ipc/ipc_sync_message_filter.h"
20 #include "url/gurl.h" 19 #include "url/gurl.h"
21 20
22 #if defined(OS_WIN) 21 #if defined(OS_WIN)
23 #include "content/public/common/sandbox_init.h" 22 #include "content/public/common/sandbox_init.h"
24 #endif 23 #endif
25 24
26 using base::AutoLock; 25 using base::AutoLock;
27 using base::MessageLoopProxy; 26 using base::MessageLoopProxy;
28 27
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after
255 return target_handle; 254 return target_handle;
256 #else 255 #else
257 int duped_handle = HANDLE_EINTR(dup(source_handle.fd)); 256 int duped_handle = HANDLE_EINTR(dup(source_handle.fd));
258 if (duped_handle < 0) 257 if (duped_handle < 0)
259 return base::SharedMemory::NULLHandle(); 258 return base::SharedMemory::NULLHandle();
260 259
261 return base::FileDescriptor(duped_handle, true); 260 return base::FileDescriptor(duped_handle, true);
262 #endif 261 #endif
263 } 262 }
264 263
265 bool GpuChannelHost::GenerateMailboxNames(unsigned num,
266 std::vector<gpu::Mailbox>* names) {
267 DCHECK(names->empty());
268 TRACE_EVENT0("gpu", "GenerateMailboxName");
269 size_t generate_count = channel_filter_->GetMailboxNames(num, names);
270
271 if (names->size() < num) {
272 std::vector<gpu::Mailbox> new_names;
273 if (!Send(new GpuChannelMsg_GenerateMailboxNames(num - names->size(),
274 &new_names)))
275 return false;
276 names->insert(names->end(), new_names.begin(), new_names.end());
277 }
278
279 if (generate_count > 0)
280 Send(new GpuChannelMsg_GenerateMailboxNamesAsync(generate_count));
281
282 return true;
283 }
284
285 int32 GpuChannelHost::ReserveTransferBufferId() { 264 int32 GpuChannelHost::ReserveTransferBufferId() {
286 return next_transfer_buffer_id_.GetNext(); 265 return next_transfer_buffer_id_.GetNext();
287 } 266 }
288 267
289 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess( 268 gfx::GpuMemoryBufferHandle GpuChannelHost::ShareGpuMemoryBufferToGpuProcess(
290 gfx::GpuMemoryBufferHandle source_handle) { 269 gfx::GpuMemoryBufferHandle source_handle) {
291 switch (source_handle.type) { 270 switch (source_handle.type) {
292 case gfx::SHARED_MEMORY_BUFFER: { 271 case gfx::SHARED_MEMORY_BUFFER: {
293 gfx::GpuMemoryBufferHandle handle; 272 gfx::GpuMemoryBufferHandle handle;
294 handle.type = gfx::SHARED_MEMORY_BUFFER; 273 handle.type = gfx::SHARED_MEMORY_BUFFER;
(...skipping 15 matching lines...) Expand all
310 } 289 }
311 290
312 GpuChannelHost::~GpuChannelHost() { 291 GpuChannelHost::~GpuChannelHost() {
313 // channel_ must be destroyed on the main thread. 292 // channel_ must be destroyed on the main thread.
314 if (!factory_->IsMainThread()) 293 if (!factory_->IsMainThread())
315 factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release()); 294 factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release());
316 } 295 }
317 296
318 297
319 GpuChannelHost::MessageFilter::MessageFilter() 298 GpuChannelHost::MessageFilter::MessageFilter()
320 : lost_(false), 299 : lost_(false) {
321 requested_mailboxes_(0) {
322 } 300 }
323 301
324 GpuChannelHost::MessageFilter::~MessageFilter() {} 302 GpuChannelHost::MessageFilter::~MessageFilter() {}
325 303
326 void GpuChannelHost::MessageFilter::AddRoute( 304 void GpuChannelHost::MessageFilter::AddRoute(
327 int route_id, 305 int route_id,
328 base::WeakPtr<IPC::Listener> listener, 306 base::WeakPtr<IPC::Listener> listener,
329 scoped_refptr<MessageLoopProxy> loop) { 307 scoped_refptr<MessageLoopProxy> loop) {
330 DCHECK(listeners_.find(route_id) == listeners_.end()); 308 DCHECK(listeners_.find(route_id) == listeners_.end());
331 GpuListenerInfo info; 309 GpuListenerInfo info;
332 info.listener = listener; 310 info.listener = listener;
333 info.loop = loop; 311 info.loop = loop;
334 listeners_[route_id] = info; 312 listeners_[route_id] = info;
335 } 313 }
336 314
337 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) { 315 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) {
338 ListenerMap::iterator it = listeners_.find(route_id); 316 ListenerMap::iterator it = listeners_.find(route_id);
339 if (it != listeners_.end()) 317 if (it != listeners_.end())
340 listeners_.erase(it); 318 listeners_.erase(it);
341 } 319 }
342 320
343 bool GpuChannelHost::MessageFilter::OnMessageReceived( 321 bool GpuChannelHost::MessageFilter::OnMessageReceived(
344 const IPC::Message& message) { 322 const IPC::Message& message) {
345 // Never handle sync message replies or we will deadlock here. 323 // Never handle sync message replies or we will deadlock here.
346 if (message.is_reply()) 324 if (message.is_reply())
347 return false; 325 return false;
348 326
349 if (message.routing_id() == MSG_ROUTING_CONTROL) 327 ListenerMap::iterator it = listeners_.find(message.routing_id());
350 return OnControlMessageReceived(message); 328 if (it == listeners_.end())
329 return false;
351 330
352 ListenerMap::iterator it = listeners_.find(message.routing_id()); 331 const GpuListenerInfo& info = it->second;
353 332 info.loop->PostTask(
354 if (it != listeners_.end()) { 333 FROM_HERE,
355 const GpuListenerInfo& info = it->second; 334 base::Bind(
356 info.loop->PostTask( 335 base::IgnoreResult(&IPC::Listener::OnMessageReceived),
357 FROM_HERE, 336 info.listener,
358 base::Bind( 337 message));
359 base::IgnoreResult(&IPC::Listener::OnMessageReceived),
360 info.listener,
361 message));
362 }
363
364 return true; 338 return true;
365 } 339 }
366 340
367 void GpuChannelHost::MessageFilter::OnChannelError() { 341 void GpuChannelHost::MessageFilter::OnChannelError() {
368 // Set the lost state before signalling the proxies. That way, if they 342 // Set the lost state before signalling the proxies. That way, if they
369 // themselves post a task to recreate the context, they will not try to re-use 343 // themselves post a task to recreate the context, they will not try to re-use
370 // this channel host. 344 // this channel host.
371 { 345 {
372 AutoLock lock(lock_); 346 AutoLock lock(lock_);
373 lost_ = true; 347 lost_ = true;
(...skipping 11 matching lines...) Expand all
385 } 359 }
386 360
387 listeners_.clear(); 361 listeners_.clear();
388 } 362 }
389 363
390 bool GpuChannelHost::MessageFilter::IsLost() const { 364 bool GpuChannelHost::MessageFilter::IsLost() const {
391 AutoLock lock(lock_); 365 AutoLock lock(lock_);
392 return lost_; 366 return lost_;
393 } 367 }
394 368
395 size_t GpuChannelHost::MessageFilter::GetMailboxNames(
396 size_t num, std::vector<gpu::Mailbox>* names) {
397 AutoLock lock(lock_);
398 size_t count = std::min(num, mailbox_name_pool_.size());
399 names->insert(names->begin(),
400 mailbox_name_pool_.end() - count,
401 mailbox_name_pool_.end());
402 mailbox_name_pool_.erase(mailbox_name_pool_.end() - count,
403 mailbox_name_pool_.end());
404
405 const size_t ideal_mailbox_pool_size = 100;
406 size_t total = mailbox_name_pool_.size() + requested_mailboxes_;
407 DCHECK_LE(total, ideal_mailbox_pool_size);
408 if (total >= ideal_mailbox_pool_size / 2)
409 return 0;
410 size_t request = ideal_mailbox_pool_size - total;
411 requested_mailboxes_ += request;
412 return request;
413 }
414
415 bool GpuChannelHost::MessageFilter::OnControlMessageReceived(
416 const IPC::Message& message) {
417 bool handled = true;
418
419 IPC_BEGIN_MESSAGE_MAP(GpuChannelHost::MessageFilter, message)
420 IPC_MESSAGE_HANDLER(GpuChannelMsg_GenerateMailboxNamesReply,
421 OnGenerateMailboxNamesReply)
422 IPC_MESSAGE_UNHANDLED(handled = false)
423 IPC_END_MESSAGE_MAP()
424
425 DCHECK(handled);
426 return handled;
427 }
428
429 void GpuChannelHost::MessageFilter::OnGenerateMailboxNamesReply(
430 const std::vector<gpu::Mailbox>& names) {
431 TRACE_EVENT0("gpu", "OnGenerateMailboxNamesReply");
432 AutoLock lock(lock_);
433 DCHECK_LE(names.size(), requested_mailboxes_);
434 requested_mailboxes_ -= names.size();
435 mailbox_name_pool_.insert(mailbox_name_pool_.end(),
436 names.begin(),
437 names.end());
438 }
439
440
441 } // namespace content 369 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698