Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(178)

Side by Side Diff: content/common/gpu/gpu_channel.cc

Issue 299003004: Fix leak in GpuChannel (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | content/common/gpu/gpu_channel_manager.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if defined(OS_WIN) 5 #if defined(OS_WIN)
6 #include <windows.h> 6 #include <windows.h>
7 #endif 7 #endif
8 8
9 #include "content/common/gpu/gpu_channel.h" 9 #include "content/common/gpu/gpu_channel.h"
10 10
11 #include <queue> 11 #include <queue>
12 #include <vector> 12 #include <vector>
13 13
14 #include "base/bind.h" 14 #include "base/bind.h"
15 #include "base/command_line.h" 15 #include "base/command_line.h"
16 #include "base/debug/trace_event.h" 16 #include "base/debug/trace_event.h"
17 #include "base/message_loop/message_loop_proxy.h" 17 #include "base/message_loop/message_loop_proxy.h"
18 #include "base/stl_util.h"
18 #include "base/strings/string_util.h" 19 #include "base/strings/string_util.h"
19 #include "base/timer/timer.h" 20 #include "base/timer/timer.h"
20 #include "content/common/gpu/devtools_gpu_agent.h" 21 #include "content/common/gpu/devtools_gpu_agent.h"
21 #include "content/common/gpu/gpu_channel_manager.h" 22 #include "content/common/gpu/gpu_channel_manager.h"
22 #include "content/common/gpu/gpu_messages.h" 23 #include "content/common/gpu/gpu_messages.h"
23 #include "content/common/gpu/sync_point_manager.h" 24 #include "content/common/gpu/sync_point_manager.h"
24 #include "content/public/common/content_switches.h" 25 #include "content/public/common/content_switches.h"
25 #include "gpu/command_buffer/common/mailbox.h" 26 #include "gpu/command_buffer/common/mailbox.h"
26 #include "gpu/command_buffer/service/gpu_scheduler.h" 27 #include "gpu/command_buffer/service/gpu_scheduler.h"
27 #include "gpu/command_buffer/service/image_manager.h" 28 #include "gpu/command_buffer/service/image_manager.h"
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
64 // process. To guarantee fairness, we must wait a minimum amount of time 65 // process. To guarantee fairness, we must wait a minimum amount of time
65 // before preempting and we limit the amount of time that we can preempt in 66 // before preempting and we limit the amount of time that we can preempt in
66 // one shot (see constants above). 67 // one shot (see constants above).
67 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO 68 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
68 // thread, generating the sync point ID and responding immediately, and then 69 // thread, generating the sync point ID and responding immediately, and then
69 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message 70 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
70 // into the channel's queue. 71 // into the channel's queue.
71 // - it generates mailbox names for clients of the GPU process on the IO thread. 72 // - it generates mailbox names for clients of the GPU process on the IO thread.
72 class GpuChannelMessageFilter : public IPC::MessageFilter { 73 class GpuChannelMessageFilter : public IPC::MessageFilter {
73 public: 74 public:
74 // Takes ownership of gpu_channel (see below). 75 GpuChannelMessageFilter(base::WeakPtr<GpuChannel> gpu_channel,
75 GpuChannelMessageFilter(base::WeakPtr<GpuChannel>* gpu_channel,
76 scoped_refptr<SyncPointManager> sync_point_manager, 76 scoped_refptr<SyncPointManager> sync_point_manager,
77 scoped_refptr<base::MessageLoopProxy> message_loop) 77 scoped_refptr<base::MessageLoopProxy> message_loop)
78 : preemption_state_(IDLE), 78 : preemption_state_(IDLE),
79 gpu_channel_(gpu_channel), 79 gpu_channel_(gpu_channel),
80 channel_(NULL), 80 channel_(NULL),
81 sync_point_manager_(sync_point_manager), 81 sync_point_manager_(sync_point_manager),
82 message_loop_(message_loop), 82 message_loop_(message_loop),
83 messages_forwarded_to_channel_(0), 83 messages_forwarded_to_channel_(0),
84 a_stub_is_descheduled_(false) { 84 a_stub_is_descheduled_(false) {}
85 }
86 85
87 virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE { 86 virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE {
88 DCHECK(!channel_); 87 DCHECK(!channel_);
89 channel_ = channel; 88 channel_ = channel;
90 } 89 }
91 90
92 virtual void OnFilterRemoved() OVERRIDE { 91 virtual void OnFilterRemoved() OVERRIDE {
93 DCHECK(channel_); 92 DCHECK(channel_);
94 channel_ = NULL; 93 channel_ = NULL;
95 } 94 }
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
146 void UpdateStubSchedulingState(bool a_stub_is_descheduled) { 145 void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
147 a_stub_is_descheduled_ = a_stub_is_descheduled; 146 a_stub_is_descheduled_ = a_stub_is_descheduled;
148 UpdatePreemptionState(); 147 UpdatePreemptionState();
149 } 148 }
150 149
151 bool Send(IPC::Message* message) { 150 bool Send(IPC::Message* message) {
152 return channel_->Send(message); 151 return channel_->Send(message);
153 } 152 }
154 153
155 protected: 154 protected:
156 virtual ~GpuChannelMessageFilter() { 155 virtual ~GpuChannelMessageFilter() {}
157 message_loop_->PostTask(FROM_HERE, base::Bind(
158 &GpuChannelMessageFilter::DeleteWeakPtrOnMainThread, gpu_channel_));
159 }
160 156
161 private: 157 private:
162 enum PreemptionState { 158 enum PreemptionState {
163 // Either there's no other channel to preempt, there are no messages 159 // Either there's no other channel to preempt, there are no messages
164 // pending processing, or we just finished preempting and have to wait 160 // pending processing, or we just finished preempting and have to wait
165 // before preempting again. 161 // before preempting again.
166 IDLE, 162 IDLE,
167 // We are waiting kPreemptWaitTimeMs before checking if we should preempt. 163 // We are waiting kPreemptWaitTimeMs before checking if we should preempt.
168 WAITING, 164 WAITING,
169 // We can preempt whenever any IPC processing takes more than 165 // We can preempt whenever any IPC processing takes more than
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
330 } 326 }
331 327
332 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; 328 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
333 preempting_flag_->Reset(); 329 preempting_flag_->Reset();
334 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); 330 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
335 331
336 UpdatePreemptionState(); 332 UpdatePreemptionState();
337 } 333 }
338 334
339 static void InsertSyncPointOnMainThread( 335 static void InsertSyncPointOnMainThread(
340 base::WeakPtr<GpuChannel>* gpu_channel, 336 base::WeakPtr<GpuChannel> gpu_channel,
341 scoped_refptr<SyncPointManager> manager, 337 scoped_refptr<SyncPointManager> manager,
342 int32 routing_id, 338 int32 routing_id,
343 uint32 sync_point) { 339 uint32 sync_point) {
344 // This function must ensure that the sync point will be retired. Normally 340 // This function must ensure that the sync point will be retired. Normally
345 // we'll find the stub based on the routing ID, and associate the sync point 341 // we'll find the stub based on the routing ID, and associate the sync point
346 // with it, but if that fails for any reason (channel or stub already 342 // with it, but if that fails for any reason (channel or stub already
347 // deleted, invalid routing id), we need to retire the sync point 343 // deleted, invalid routing id), we need to retire the sync point
348 // immediately. 344 // immediately.
349 if (gpu_channel->get()) { 345 if (gpu_channel) {
350 GpuCommandBufferStub* stub = gpu_channel->get()->LookupCommandBuffer( 346 GpuCommandBufferStub* stub = gpu_channel->LookupCommandBuffer(routing_id);
351 routing_id);
352 if (stub) { 347 if (stub) {
353 stub->AddSyncPoint(sync_point); 348 stub->AddSyncPoint(sync_point);
354 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point); 349 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
355 gpu_channel->get()->OnMessageReceived(message); 350 gpu_channel->OnMessageReceived(message);
356 return; 351 return;
357 } else { 352 } else {
358 gpu_channel->get()->MessageProcessed(); 353 gpu_channel->MessageProcessed();
359 } 354 }
360 } 355 }
361 manager->RetireSyncPoint(sync_point); 356 manager->RetireSyncPoint(sync_point);
362 } 357 }
363 358
364 static void DeleteWeakPtrOnMainThread( 359 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
365 base::WeakPtr<GpuChannel>* gpu_channel) { 360 // passed through - therefore the WeakPtr assumptions are respected.
366 delete gpu_channel; 361 base::WeakPtr<GpuChannel> gpu_channel_;
367 }
368
369 // NOTE: this is a pointer to a weak pointer. It is never dereferenced on the
370 // IO thread, it's only passed through - therefore the WeakPtr assumptions are
371 // respected.
372 base::WeakPtr<GpuChannel>* gpu_channel_;
373 IPC::Channel* channel_; 362 IPC::Channel* channel_;
374 scoped_refptr<SyncPointManager> sync_point_manager_; 363 scoped_refptr<SyncPointManager> sync_point_manager_;
375 scoped_refptr<base::MessageLoopProxy> message_loop_; 364 scoped_refptr<base::MessageLoopProxy> message_loop_;
376 scoped_refptr<gpu::PreemptionFlag> preempting_flag_; 365 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
377 366
378 std::queue<PendingMessage> pending_messages_; 367 std::queue<PendingMessage> pending_messages_;
379 368
380 // Count of the number of IPCs forwarded to the GpuChannel. 369 // Count of the number of IPCs forwarded to the GpuChannel.
381 uint64 messages_forwarded_to_channel_; 370 uint64 messages_forwarded_to_channel_;
382 371
(...skipping 21 matching lines...) Expand all
404 weak_factory_(this), 393 weak_factory_(this),
405 num_stubs_descheduled_(0) { 394 num_stubs_descheduled_(0) {
406 DCHECK(gpu_channel_manager); 395 DCHECK(gpu_channel_manager);
407 DCHECK(client_id); 396 DCHECK(client_id);
408 397
409 channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu"); 398 channel_id_ = IPC::Channel::GenerateVerifiedChannelID("gpu");
410 const CommandLine* command_line = CommandLine::ForCurrentProcess(); 399 const CommandLine* command_line = CommandLine::ForCurrentProcess();
411 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages); 400 log_messages_ = command_line->HasSwitch(switches::kLogPluginMessages);
412 } 401 }
413 402
403 GpuChannel::~GpuChannel() {
404 STLDeleteElements(&deferred_messages_);
405 if (preempting_flag_.get())
406 preempting_flag_->Reset();
407 }
414 408
415 void GpuChannel::Init(base::MessageLoopProxy* io_message_loop, 409 void GpuChannel::Init(base::MessageLoopProxy* io_message_loop,
416 base::WaitableEvent* shutdown_event) { 410 base::WaitableEvent* shutdown_event) {
417 DCHECK(!channel_.get()); 411 DCHECK(!channel_.get());
418 412
419 // Map renderer ID to a (single) channel to that process. 413 // Map renderer ID to a (single) channel to that process.
420 channel_.reset(new IPC::SyncChannel( 414 channel_.reset(new IPC::SyncChannel(
421 channel_id_, 415 channel_id_,
422 IPC::Channel::MODE_SERVER, 416 IPC::Channel::MODE_SERVER,
423 this, 417 this,
424 io_message_loop, 418 io_message_loop,
425 false, 419 false,
426 shutdown_event)); 420 shutdown_event));
427 421
428 base::WeakPtr<GpuChannel>* weak_ptr(new base::WeakPtr<GpuChannel>( 422 filter_ =
429 weak_factory_.GetWeakPtr())); 423 new GpuChannelMessageFilter(weak_factory_.GetWeakPtr(),
430 424 gpu_channel_manager_->sync_point_manager(),
431 filter_ = new GpuChannelMessageFilter( 425 base::MessageLoopProxy::current());
432 weak_ptr,
433 gpu_channel_manager_->sync_point_manager(),
434 base::MessageLoopProxy::current());
435 io_message_loop_ = io_message_loop; 426 io_message_loop_ = io_message_loop;
436 channel_->AddFilter(filter_.get()); 427 channel_->AddFilter(filter_.get());
437 428
438 devtools_gpu_agent_.reset(new DevToolsGpuAgent(this)); 429 devtools_gpu_agent_.reset(new DevToolsGpuAgent(this));
439 } 430 }
440 431
441 std::string GpuChannel::GetChannelName() { 432 std::string GpuChannel::GetChannelName() {
442 return channel_id_; 433 return channel_id_;
443 } 434 }
444 435
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
625 gpu_channel_manager_->LoseAllContexts(); 616 gpu_channel_manager_->LoseAllContexts();
626 } 617 }
627 618
628 void GpuChannel::MarkAllContextsLost() { 619 void GpuChannel::MarkAllContextsLost() {
629 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_); 620 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
630 !it.IsAtEnd(); it.Advance()) { 621 !it.IsAtEnd(); it.Advance()) {
631 it.GetCurrentValue()->MarkContextLost(); 622 it.GetCurrentValue()->MarkContextLost();
632 } 623 }
633 } 624 }
634 625
635 void GpuChannel::DestroySoon() {
636 base::MessageLoop::current()->PostTask(
637 FROM_HERE, base::Bind(&GpuChannel::OnDestroy, this));
638 }
639
640 bool GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) { 626 bool GpuChannel::AddRoute(int32 route_id, IPC::Listener* listener) {
641 return router_.AddRoute(route_id, listener); 627 return router_.AddRoute(route_id, listener);
642 } 628 }
643 629
644 void GpuChannel::RemoveRoute(int32 route_id) { 630 void GpuChannel::RemoveRoute(int32 route_id) {
645 router_.RemoveRoute(route_id); 631 router_.RemoveRoute(route_id);
646 } 632 }
647 633
648 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() { 634 gpu::PreemptionFlag* GpuChannel::GetPreemptionFlag() {
649 if (!preempting_flag_.get()) { 635 if (!preempting_flag_.get()) {
650 preempting_flag_ = new gpu::PreemptionFlag; 636 preempting_flag_ = new gpu::PreemptionFlag;
651 io_message_loop_->PostTask( 637 io_message_loop_->PostTask(
652 FROM_HERE, base::Bind( 638 FROM_HERE, base::Bind(
653 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState, 639 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState,
654 filter_, preempting_flag_, num_stubs_descheduled_ > 0)); 640 filter_, preempting_flag_, num_stubs_descheduled_ > 0));
655 } 641 }
656 return preempting_flag_.get(); 642 return preempting_flag_.get();
657 } 643 }
658 644
659 void GpuChannel::SetPreemptByFlag( 645 void GpuChannel::SetPreemptByFlag(
660 scoped_refptr<gpu::PreemptionFlag> preempted_flag) { 646 scoped_refptr<gpu::PreemptionFlag> preempted_flag) {
661 preempted_flag_ = preempted_flag; 647 preempted_flag_ = preempted_flag;
662 648
663 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_); 649 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
664 !it.IsAtEnd(); it.Advance()) { 650 !it.IsAtEnd(); it.Advance()) {
665 it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_); 651 it.GetCurrentValue()->SetPreemptByFlag(preempted_flag_);
666 } 652 }
667 } 653 }
668 654
669 GpuChannel::~GpuChannel() {
670 if (preempting_flag_.get())
671 preempting_flag_->Reset();
672 }
673
674 void GpuChannel::OnDestroy() { 655 void GpuChannel::OnDestroy() {
675 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy"); 656 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy");
676 gpu_channel_manager_->RemoveChannel(client_id_); 657 gpu_channel_manager_->RemoveChannel(client_id_);
677 } 658 }
678 659
679 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { 660 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) {
680 bool handled = true; 661 bool handled = true;
681 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) 662 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg)
682 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer, 663 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer,
683 OnCreateOffscreenCommandBuffer) 664 OnCreateOffscreenCommandBuffer)
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
856 uint64 GpuChannel::GetMemoryUsage() { 837 uint64 GpuChannel::GetMemoryUsage() {
857 uint64 size = 0; 838 uint64 size = 0;
858 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_); 839 for (StubMap::Iterator<GpuCommandBufferStub> it(&stubs_);
859 !it.IsAtEnd(); it.Advance()) { 840 !it.IsAtEnd(); it.Advance()) {
860 size += it.GetCurrentValue()->GetMemoryUsage(); 841 size += it.GetCurrentValue()->GetMemoryUsage();
861 } 842 }
862 return size; 843 return size;
863 } 844 }
864 845
865 } // namespace content 846 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | content/common/gpu/gpu_channel_manager.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698