Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(66)

Side by Side Diff: content/common/gpu/client/gpu_channel_host.cc

Issue 1331843005: Implemented new fence syncs which replaces the old sync points. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Forgot to remove cmd buffer helper functios and replace with gpu control ones Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/client/gpu_channel_host.h" 5 #include "content/common/gpu/client/gpu_channel_host.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/atomic_sequence_num.h" 9 #include "base/atomic_sequence_num.h"
10 #include "base/bind.h" 10 #include "base/bind.h"
(...skipping 17 matching lines...) Expand all
28 28
29 namespace content { 29 namespace content {
30 namespace { 30 namespace {
31 31
32 // Global atomic to generate unique transfer buffer IDs. 32 // Global atomic to generate unique transfer buffer IDs.
33 base::StaticAtomicSequenceNumber g_next_transfer_buffer_id; 33 base::StaticAtomicSequenceNumber g_next_transfer_buffer_id;
34 34
35 } // namespace 35 } // namespace
36 36
37 GpuChannelHost::StreamFlushInfo::StreamFlushInfo() 37 GpuChannelHost::StreamFlushInfo::StreamFlushInfo()
38 : flush_pending(false), 38 : next_stream_flush_id(1),
39 flushed_stream_flush_id(0),
40 verified_stream_flush_id(0),
41 flush_pending(false),
39 route_id(MSG_ROUTING_NONE), 42 route_id(MSG_ROUTING_NONE),
40 put_offset(0), 43 put_offset(0),
41 flush_count(0) {} 44 flush_count(0),
45 flush_id(0) {}
42 46
43 GpuChannelHost::StreamFlushInfo::~StreamFlushInfo() {} 47 GpuChannelHost::StreamFlushInfo::~StreamFlushInfo() {}
44 48
45 // static 49 // static
46 scoped_refptr<GpuChannelHost> GpuChannelHost::Create( 50 scoped_refptr<GpuChannelHost> GpuChannelHost::Create(
47 GpuChannelHostFactory* factory, 51 GpuChannelHostFactory* factory,
48 int channel_id, 52 int channel_id,
49 const gpu::GPUInfo& gpu_info, 53 const gpu::GPUInfo& gpu_info,
50 const IPC::ChannelHandle& channel_handle, 54 const IPC::ChannelHandle& channel_handle,
51 base::WaitableEvent* shutdown_event, 55 base::WaitableEvent* shutdown_event,
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
120 bool result = channel_->Send(message.release()); 124 bool result = channel_->Send(message.release());
121 if (!result) 125 if (!result)
122 DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed"; 126 DVLOG(1) << "GpuChannelHost::Send failed: Channel::Send failed";
123 return result; 127 return result;
124 } 128 }
125 129
126 bool result = sync_filter_->Send(message.release()); 130 bool result = sync_filter_->Send(message.release());
127 return result; 131 return result;
128 } 132 }
129 133
130 void GpuChannelHost::OrderingBarrier( 134 uint32_t GpuChannelHost::OrderingBarrier(
131 int32 route_id, 135 int32 route_id,
132 int32 stream_id, 136 int32 stream_id,
133 int32 put_offset, 137 int32 put_offset,
134 uint32 flush_count, 138 uint32 flush_count,
135 const std::vector<ui::LatencyInfo>& latency_info, 139 const std::vector<ui::LatencyInfo>& latency_info,
136 bool put_offset_changed, 140 bool put_offset_changed,
137 bool do_flush) { 141 bool do_flush) {
138 AutoLock lock(context_lock_); 142 AutoLock lock(context_lock_);
139 StreamFlushInfo& flush_info = stream_flush_info_[stream_id]; 143 StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
140 if (flush_info.flush_pending && flush_info.route_id != route_id) 144 if (flush_info.flush_pending && flush_info.route_id != route_id)
141 InternalFlush(stream_id); 145 InternalFlush(flush_info);
142 146
143 if (put_offset_changed) { 147 if (put_offset_changed) {
148 const uint32_t flush_id = flush_info.next_stream_flush_id++;
144 flush_info.flush_pending = true; 149 flush_info.flush_pending = true;
145 flush_info.route_id = route_id; 150 flush_info.route_id = route_id;
146 flush_info.put_offset = put_offset; 151 flush_info.put_offset = put_offset;
147 flush_info.flush_count = flush_count; 152 flush_info.flush_count = flush_count;
153 flush_info.flush_id = flush_id;
148 flush_info.latency_info.insert(flush_info.latency_info.end(), 154 flush_info.latency_info.insert(flush_info.latency_info.end(),
149 latency_info.begin(), latency_info.end()); 155 latency_info.begin(), latency_info.end());
150 156
151 if (do_flush) 157 if (do_flush)
152 InternalFlush(stream_id); 158 InternalFlush(flush_info);
159
160 return flush_id;
153 } 161 }
162 return 0;
154 } 163 }
155 164
156 void GpuChannelHost::InternalFlush(int32 stream_id) { 165 void GpuChannelHost::InternalFlush(StreamFlushInfo& flush_info) {
157 context_lock_.AssertAcquired(); 166 context_lock_.AssertAcquired();
158 StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
159 DCHECK(flush_info.flush_pending); 167 DCHECK(flush_info.flush_pending);
168 DCHECK(flush_info.flushed_stream_flush_id < flush_info.flush_id);
160 Send(new GpuCommandBufferMsg_AsyncFlush( 169 Send(new GpuCommandBufferMsg_AsyncFlush(
161 flush_info.route_id, flush_info.put_offset, flush_info.flush_count, 170 flush_info.route_id, flush_info.put_offset, flush_info.flush_count,
162 flush_info.latency_info)); 171 flush_info.latency_info));
163 flush_info.latency_info.clear(); 172 flush_info.latency_info.clear();
164 flush_info.flush_pending = false; 173 flush_info.flush_pending = false;
174
175 flush_info.flushed_stream_flush_id = flush_info.flush_id;
165 } 176 }
166 177
167 scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateViewCommandBuffer( 178 scoped_ptr<CommandBufferProxyImpl> GpuChannelHost::CreateViewCommandBuffer(
168 int32 surface_id, 179 int32 surface_id,
169 CommandBufferProxyImpl* share_group, 180 CommandBufferProxyImpl* share_group,
170 int32 stream_id, 181 int32 stream_id,
171 GpuStreamPriority stream_priority, 182 GpuStreamPriority stream_priority,
172 const std::vector<int32>& attribs, 183 const std::vector<int32>& attribs,
173 const GURL& active_url, 184 const GURL& active_url,
174 gfx::GpuPreference gpu_preference) { 185 gfx::GpuPreference gpu_preference) {
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
283 void GpuChannelHost::DestroyCommandBuffer( 294 void GpuChannelHost::DestroyCommandBuffer(
284 CommandBufferProxyImpl* command_buffer) { 295 CommandBufferProxyImpl* command_buffer) {
285 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer"); 296 TRACE_EVENT0("gpu", "GpuChannelHost::DestroyCommandBuffer");
286 297
287 int32 route_id = command_buffer->route_id(); 298 int32 route_id = command_buffer->route_id();
288 int32 stream_id = command_buffer->stream_id(); 299 int32 stream_id = command_buffer->stream_id();
289 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id)); 300 Send(new GpuChannelMsg_DestroyCommandBuffer(route_id));
290 RemoveRoute(route_id); 301 RemoveRoute(route_id);
291 302
292 AutoLock lock(context_lock_); 303 AutoLock lock(context_lock_);
293 if (stream_flush_info_[stream_id].route_id == route_id) 304 StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
294 stream_flush_info_.erase(stream_id); 305 if (flush_info.flush_pending && flush_info.route_id == route_id)
306 flush_info.flush_pending = false;
295 } 307 }
296 308
297 void GpuChannelHost::DestroyChannel() { 309 void GpuChannelHost::DestroyChannel() {
298 DCHECK(factory_->IsMainThread()); 310 DCHECK(factory_->IsMainThread());
299 AutoLock lock(context_lock_); 311 AutoLock lock(context_lock_);
300 channel_.reset(); 312 channel_.reset();
301 } 313 }
302 314
303 void GpuChannelHost::AddRoute( 315 void GpuChannelHost::AddRoute(
304 int route_id, base::WeakPtr<IPC::Listener> listener) { 316 int route_id, base::WeakPtr<IPC::Listener> listener) {
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
383 } 395 }
384 396
385 int32 GpuChannelHost::GenerateRouteID() { 397 int32 GpuChannelHost::GenerateRouteID() {
386 return next_route_id_.GetNext(); 398 return next_route_id_.GetNext();
387 } 399 }
388 400
389 int32 GpuChannelHost::GenerateStreamID() { 401 int32 GpuChannelHost::GenerateStreamID() {
390 return next_stream_id_.GetNext(); 402 return next_stream_id_.GetNext();
391 } 403 }
392 404
405 uint32_t GpuChannelHost::ValidateFlushIDReachedServer(int32 stream_id) {
406 // Store what flush ids we will be validating for all streams.
407 base::hash_map<int32, uint32_t> validate_flushes;
408 {
409 AutoLock lock(context_lock_);
410 for (const auto& iter : stream_flush_info_) {
411 const int32 iter_stream_id = iter.first;
412 const StreamFlushInfo& flush_info = iter.second;
413 if (flush_info.flushed_stream_flush_id >
414 flush_info.verified_stream_flush_id) {
415 validate_flushes[iter_stream_id] = flush_info.flushed_stream_flush_id;
piman 2015/09/26 00:09:51 If there is a pending flush on that stream, you al
David Yen 2015/09/26 00:24:56 The pending flushes here are ordering barriers whi
piman 2015/09/26 00:34:51 Oh, right. flushed_stream_flush_id is only set aft
416 }
417 }
418 }
419
420 if (Send(new GpuChannelMsg_Nop())) {
piman 2015/09/26 00:09:51 nit: you can skip if validate_flushes is empty.
David Yen 2015/09/28 17:38:18 Even better, I made it skip the validation if the
421 // Update verified flush id for all streams.
422 uint32_t highest_flush_id = 0;
423 AutoLock lock(context_lock_);
424 for (const auto& iter : validate_flushes) {
425 const int32_t validated_stream_id = iter.first;
426 const uint32_t validated_flush_id = iter.second;
427 StreamFlushInfo& flush_info = stream_flush_info_[validated_stream_id];
428 if (flush_info.verified_stream_flush_id < validated_flush_id) {
429 flush_info.verified_stream_flush_id = validated_flush_id;
430 }
431
432 if (validated_stream_id == stream_id)
433 highest_flush_id = flush_info.verified_stream_flush_id;
434 }
435
436 return highest_flush_id;
437 }
438
439 return 0;
440 }
441
442 uint32_t GpuChannelHost::GetHighestValidatedFlushID(int32 stream_id) {
443 AutoLock lock(context_lock_);
444 StreamFlushInfo& flush_info = stream_flush_info_[stream_id];
445 return flush_info.verified_stream_flush_id;
446 }
447
393 GpuChannelHost::~GpuChannelHost() { 448 GpuChannelHost::~GpuChannelHost() {
394 #if DCHECK_IS_ON() 449 #if DCHECK_IS_ON()
395 AutoLock lock(context_lock_); 450 AutoLock lock(context_lock_);
396 DCHECK(!channel_) 451 DCHECK(!channel_)
397 << "GpuChannelHost::DestroyChannel must be called before destruction."; 452 << "GpuChannelHost::DestroyChannel must be called before destruction.";
398 #endif 453 #endif
399 } 454 }
400 455
401 GpuChannelHost::MessageFilter::ListenerInfo::ListenerInfo() {} 456 GpuChannelHost::MessageFilter::ListenerInfo::ListenerInfo() {}
402 457
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
461 516
462 listeners_.clear(); 517 listeners_.clear();
463 } 518 }
464 519
465 bool GpuChannelHost::MessageFilter::IsLost() const { 520 bool GpuChannelHost::MessageFilter::IsLost() const {
466 AutoLock lock(lock_); 521 AutoLock lock(lock_);
467 return lost_; 522 return lost_;
468 } 523 }
469 524
470 } // namespace content 525 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698