Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(79)

Side by Side Diff: gpu/ipc/client/gpu_channel_host.cc

Issue 2881813002: Revert of gpu: GPU service scheduler. (Closed)
Patch Set: Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « gpu/ipc/client/gpu_channel_host.h ('k') | gpu/ipc/common/BUILD.gn » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/ipc/client/gpu_channel_host.h" 5 #include "gpu/ipc/client/gpu_channel_host.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <utility> 8 #include <utility>
9 9
10 #include "base/atomic_sequence_num.h" 10 #include "base/atomic_sequence_num.h"
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
63 GpuChannelHostFactory* factory, 63 GpuChannelHostFactory* factory,
64 int channel_id, 64 int channel_id,
65 const gpu::GPUInfo& gpu_info, 65 const gpu::GPUInfo& gpu_info,
66 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) 66 gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager)
67 : factory_(factory), 67 : factory_(factory),
68 channel_id_(channel_id), 68 channel_id_(channel_id),
69 gpu_info_(gpu_info), 69 gpu_info_(gpu_info),
70 gpu_memory_buffer_manager_(gpu_memory_buffer_manager) { 70 gpu_memory_buffer_manager_(gpu_memory_buffer_manager) {
71 next_image_id_.GetNext(); 71 next_image_id_.GetNext();
72 next_route_id_.GetNext(); 72 next_route_id_.GetNext();
73 next_stream_id_.GetNext();
73 } 74 }
74 75
75 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle, 76 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle,
76 base::WaitableEvent* shutdown_event) { 77 base::WaitableEvent* shutdown_event) {
77 DCHECK(factory_->IsMainThread()); 78 DCHECK(factory_->IsMainThread());
78 // Open a channel to the GPU process. We pass nullptr as the main listener 79 // Open a channel to the GPU process. We pass nullptr as the main listener
79 // here since we need to filter everything to route it to the right thread. 80 // here since we need to filter everything to route it to the right thread.
80 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = 81 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner =
81 factory_->GetIOThreadTaskRunner(); 82 factory_->GetIOThreadTaskRunner();
82 channel_ = IPC::SyncChannel::Create(channel_handle, IPC::Channel::MODE_CLIENT, 83 channel_ = IPC::SyncChannel::Create(channel_handle, IPC::Channel::MODE_CLIENT,
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after
237 } 238 }
238 239
239 int32_t GpuChannelHost::ReserveImageId() { 240 int32_t GpuChannelHost::ReserveImageId() {
240 return next_image_id_.GetNext(); 241 return next_image_id_.GetNext();
241 } 242 }
242 243
243 int32_t GpuChannelHost::GenerateRouteID() { 244 int32_t GpuChannelHost::GenerateRouteID() {
244 return next_route_id_.GetNext(); 245 return next_route_id_.GetNext();
245 } 246 }
246 247
248 int32_t GpuChannelHost::GenerateStreamID() {
249 const int32_t stream_id = next_stream_id_.GetNext();
250 DCHECK_NE(gpu::GPU_STREAM_INVALID, stream_id);
251 DCHECK_NE(gpu::GPU_STREAM_DEFAULT, stream_id);
252 return stream_id;
253 }
254
247 uint32_t GpuChannelHost::ValidateFlushIDReachedServer(int32_t stream_id, 255 uint32_t GpuChannelHost::ValidateFlushIDReachedServer(int32_t stream_id,
248 bool force_validate) { 256 bool force_validate) {
249 // Store what flush ids we will be validating for all streams. 257 // Store what flush ids we will be validating for all streams.
250 base::hash_map<int32_t, uint32_t> validate_flushes; 258 base::hash_map<int32_t, uint32_t> validate_flushes;
251 uint32_t flushed_stream_flush_id = 0; 259 uint32_t flushed_stream_flush_id = 0;
252 uint32_t verified_stream_flush_id = 0; 260 uint32_t verified_stream_flush_id = 0;
253 { 261 {
254 AutoLock lock(context_lock_); 262 AutoLock lock(context_lock_);
255 for (const auto& iter : stream_flush_info_) { 263 for (const auto& iter : stream_flush_info_) {
256 const int32_t iter_stream_id = iter.first; 264 const int32_t iter_stream_id = iter.first;
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
373 381
374 listeners_.clear(); 382 listeners_.clear();
375 } 383 }
376 384
377 bool GpuChannelHost::MessageFilter::IsLost() const { 385 bool GpuChannelHost::MessageFilter::IsLost() const {
378 AutoLock lock(lock_); 386 AutoLock lock(lock_);
379 return lost_; 387 return lost_;
380 } 388 }
381 389
382 } // namespace gpu 390 } // namespace gpu
OLDNEW
« no previous file with comments | « gpu/ipc/client/gpu_channel_host.h ('k') | gpu/ipc/common/BUILD.gn » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698