Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: content/common/gpu/client/gpu_channel_host.cc

Issue 896723008: Add OrderingBarrierCHROMIUM API. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Update unit test classes. Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/client/gpu_channel_host.h" 5 #include "content/common/gpu/client/gpu_channel_host.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/message_loop/message_loop.h" 10 #include "base/message_loop/message_loop.h"
(...skipping 12 matching lines...) Expand all
23 23
24 using base::AutoLock; 24 using base::AutoLock;
25 using base::MessageLoopProxy; 25 using base::MessageLoopProxy;
26 26
27 namespace content { 27 namespace content {
28 28
29 GpuListenerInfo::GpuListenerInfo() {} 29 GpuListenerInfo::GpuListenerInfo() {}
30 30
31 GpuListenerInfo::~GpuListenerInfo() {} 31 GpuListenerInfo::~GpuListenerInfo() {}
32 32
33 ProxyFlushInfo::ProxyFlushInfo()
34 : flush_pending(false), route_id(0), put_offset(0), flush_count(0) {
35 }
36
37 ProxyFlushInfo::~ProxyFlushInfo() {
38 }
39
33 // static 40 // static
34 scoped_refptr<GpuChannelHost> GpuChannelHost::Create( 41 scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
35 GpuChannelHostFactory* factory, 42 GpuChannelHostFactory* factory,
36 const gpu::GPUInfo& gpu_info, 43 const gpu::GPUInfo& gpu_info,
37 const IPC::ChannelHandle& channel_handle, 44 const IPC::ChannelHandle& channel_handle,
38 base::WaitableEvent* shutdown_event, 45 base::WaitableEvent* shutdown_event,
39 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) { 46 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) {
40 DCHECK(factory->IsMainThread()); 47 DCHECK(factory->IsMainThread());
41 scoped_refptr<GpuChannelHost> host = 48 scoped_refptr<GpuChannelHost> host =
42 new GpuChannelHost(factory, gpu_info, gpu_memory_buffer_manager); 49 new GpuChannelHost(factory, gpu_info, gpu_memory_buffer_manager);
(...skipping 30 matching lines...) Expand all
73 channel_->AddFilter(sync_filter_.get()); 80 channel_->AddFilter(sync_filter_.get());
74 81
75 channel_filter_ = new MessageFilter(); 82 channel_filter_ = new MessageFilter();
76 83
77 // Install the filter last, because we intercept all leftover 84 // Install the filter last, because we intercept all leftover
78 // messages. 85 // messages.
79 channel_->AddFilter(channel_filter_.get()); 86 channel_->AddFilter(channel_filter_.get());
80 } 87 }
81 88
82 bool GpuChannelHost::Send(IPC::Message* msg) { 89 bool GpuChannelHost::Send(IPC::Message* msg) {
90 AutoLock lock(context_lock_);
91 InternalFlush();
92 return InternalSend(msg);
piman 2015/02/05 01:14:15 The main thing is that some of the messages are sy
vmiura 2015/02/05 01:20:48 Makes sense. I think doing InternalFlush on every
93 }
94
95 bool GpuChannelHost::InternalSend(IPC::Message* msg) {
83 // Callee takes ownership of message, regardless of whether Send is 96 // Callee takes ownership of message, regardless of whether Send is
84 // successful. See IPC::Sender. 97 // successful. See IPC::Sender.
85 scoped_ptr<IPC::Message> message(msg); 98 scoped_ptr<IPC::Message> message(msg);
86 // The GPU process never sends synchronous IPCs so clear the unblock flag to 99 // The GPU process never sends synchronous IPCs so clear the unblock flag to
87 // preserve order. 100 // preserve order.
88 message->set_unblock(false); 101 message->set_unblock(false);
89 102
90 // Currently we need to choose between two different mechanisms for sending. 103 // Currently we need to choose between two different mechanisms for sending.
91 // On the main thread we use the regular channel Send() method, on another 104 // On the main thread we use the regular channel Send() method, on another
92 // thread we use SyncMessageFilter. We also have to be careful interpreting 105 // thread we use SyncMessageFilter. We also have to be careful interpreting
(...skipping 12 matching lines...) Expand all
105 } else if (base::MessageLoop::current()) { 118 } else if (base::MessageLoop::current()) {
106 bool result = sync_filter_->Send(message.release()); 119 bool result = sync_filter_->Send(message.release());
107 if (!result) 120 if (!result)
108 DVLOG(1) << "GpuChannelHost::Send failed: SyncMessageFilter::Send failed"; 121 DVLOG(1) << "GpuChannelHost::Send failed: SyncMessageFilter::Send failed";
109 return result; 122 return result;
110 } 123 }
111 124
112 return false; 125 return false;
113 } 126 }
114 127
128 void GpuChannelHost::Flush(int route_id,
129 int32 put_offset,
130 uint32 flush_count,
131 const std::vector<ui::LatencyInfo>& latency_info,
132 bool shallow_flush) {
133 AutoLock lock(context_lock_);
134 if (flush_info_.flush_pending && flush_info_.route_id != route_id)
135 InternalFlush();
136
137 flush_info_.flush_pending = true;
138 flush_info_.route_id = route_id;
139 flush_info_.put_offset = put_offset;
140 flush_info_.flush_count = flush_count;
141 flush_info_.latency_info.insert(flush_info_.latency_info.end(),
142 latency_info.begin(), latency_info.end());
143
144 if (!shallow_flush)
145 InternalFlush();
146 }
147
148 void GpuChannelHost::InternalFlush() {
149 if (flush_info_.flush_pending) {
150 InternalSend(new GpuCommandBufferMsg_AsyncFlush(
151 flush_info_.route_id, flush_info_.put_offset, flush_info_.flush_count,
152 flush_info_.latency_info));
153 flush_info_.latency_info.clear();
154 flush_info_.flush_pending = false;
155 }
156 }
157
115 CommandBufferProxyImpl* GpuChannelHost::CreateViewCommandBuffer( 158 CommandBufferProxyImpl* GpuChannelHost::CreateViewCommandBuffer(
116 int32 surface_id, 159 int32 surface_id,
117 CommandBufferProxyImpl* share_group, 160 CommandBufferProxyImpl* share_group,
118 const std::vector<int32>& attribs, 161 const std::vector<int32>& attribs,
119 const GURL& active_url, 162 const GURL& active_url,
120 gfx::GpuPreference gpu_preference) { 163 gfx::GpuPreference gpu_preference) {
121 TRACE_EVENT1("gpu", 164 TRACE_EVENT1("gpu",
122 "GpuChannelHost::CreateViewCommandBuffer", 165 "GpuChannelHost::CreateViewCommandBuffer",
123 "surface_id", 166 "surface_id",
124 surface_id); 167 surface_id);
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
222 void GpuChannelHost::DestroyCommandBuffer( 265 void GpuChannelHost::DestroyCommandBuffer(
223 CommandBufferProxyImpl* command_buffer) { 266 CommandBufferProxyImpl* command_buffer) {
224 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer"); 267 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
225 268
226 int route_id = command_buffer->GetRouteID(); 269 int route_id = command_buffer->GetRouteID();
227 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id)); 270 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
228 RemoveRoute(route_id); 271 RemoveRoute(route_id);
229 272
230 AutoLock lock(context_lock_); 273 AutoLock lock(context_lock_);
231 proxies_.erase(route_id); 274 proxies_.erase(route_id);
275 if (flush_info_.flush_pending && flush_info_.route_id == route_id)
276 flush_info_.flush_pending = false;
277
232 delete command_buffer; 278 delete command_buffer;
233 } 279 }
234 280
235 void GpuChannelHost::DestroyChannel() { 281 void GpuChannelHost::DestroyChannel() {
236 // channel_ must be destroyed on the main thread. 282 // channel_ must be destroyed on the main thread.
237 if (channel_.get() && !factory_->IsMainThread()) 283 if (channel_.get() && !factory_->IsMainThread())
238 factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release()); 284 factory_->GetMainLoop()->DeleteSoon(FROM_HERE, channel_.release());
239 channel_.reset(); 285 channel_.reset();
240 } 286 }
241 287
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
386 432
387 listeners_.clear(); 433 listeners_.clear();
388 } 434 }
389 435
390 bool GpuChannelHost::MessageFilter::IsLost() const { 436 bool GpuChannelHost::MessageFilter::IsLost() const {
391 AutoLock lock(lock_); 437 AutoLock lock(lock_);
392 return lost_; 438 return lost_;
393 } 439 }
394 440
395 } // namespace content 441 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698