Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(498)

Side by Side Diff: gpu/ipc/service/gpu_command_buffer_stub.cc

Issue 2752393002: gpu: Add SequenceId for identifying sync point sequences. (Closed)
Patch Set: piman's review 3 Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « gpu/ipc/service/gpu_command_buffer_stub.h ('k') | gpu/ipc/service/stream_texture_android.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "gpu/ipc/service/gpu_command_buffer_stub.h" 5 #include "gpu/ipc/service/gpu_command_buffer_stub.h"
6 6
7 #include <utility> 7 #include <utility>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/bind_helpers.h" 10 #include "base/bind_helpers.h"
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
110 base::Bind(&GpuCommandBufferMemoryTracker::LogMemoryStatsPressure, 110 base::Bind(&GpuCommandBufferMemoryTracker::LogMemoryStatsPressure,
111 base::Unretained(this)))) { 111 base::Unretained(this)))) {
112 // Set up |memory_stats_timer_| to call LogMemoryPeriodic periodically 112 // Set up |memory_stats_timer_| to call LogMemoryPeriodic periodically
113 // via the provided |task_runner|. 113 // via the provided |task_runner|.
114 memory_stats_timer_.SetTaskRunner(std::move(task_runner)); 114 memory_stats_timer_.SetTaskRunner(std::move(task_runner));
115 memory_stats_timer_.Start( 115 memory_stats_timer_.Start(
116 FROM_HERE, base::TimeDelta::FromSeconds(30), this, 116 FROM_HERE, base::TimeDelta::FromSeconds(30), this,
117 &GpuCommandBufferMemoryTracker::LogMemoryStatsPeriodic); 117 &GpuCommandBufferMemoryTracker::LogMemoryStatsPeriodic);
118 } 118 }
119 119
120 void TrackMemoryAllocatedChange( 120 void TrackMemoryAllocatedChange(size_t old_size, size_t new_size) override {
121 size_t old_size, size_t new_size) override { 121 tracking_group_->TrackMemoryAllocatedChange(old_size, new_size);
122 tracking_group_->TrackMemoryAllocatedChange(
123 old_size, new_size);
124 } 122 }
125 123
126 bool EnsureGPUMemoryAvailable(size_t size_needed) override { 124 bool EnsureGPUMemoryAvailable(size_t size_needed) override {
127 return tracking_group_->EnsureGPUMemoryAvailable(size_needed); 125 return tracking_group_->EnsureGPUMemoryAvailable(size_needed);
128 } 126 }
129 127
130 uint64_t ClientTracingId() const override { return client_tracing_id_; } 128 uint64_t ClientTracingId() const override { return client_tracing_id_; }
131 int ClientId() const override { return client_id_; } 129 int ClientId() const override { return client_id_; }
132 uint64_t ShareGroupTracingGUID() const override { 130 uint64_t ShareGroupTracingGUID() const override {
133 return share_group_tracing_guid_; 131 return share_group_tracing_guid_;
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
206 }; 204 };
207 205
208 std::unique_ptr<base::trace_event::ConvertableToTraceFormat> 206 std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
209 DevToolsChannelData::CreateForChannel(GpuChannel* channel) { 207 DevToolsChannelData::CreateForChannel(GpuChannel* channel) {
210 std::unique_ptr<base::DictionaryValue> res(new base::DictionaryValue); 208 std::unique_ptr<base::DictionaryValue> res(new base::DictionaryValue);
211 res->SetInteger("renderer_pid", channel->GetClientPID()); 209 res->SetInteger("renderer_pid", channel->GetClientPID());
212 res->SetDouble("used_bytes", channel->GetMemoryUsage()); 210 res->SetDouble("used_bytes", channel->GetMemoryUsage());
213 return base::WrapUnique(new DevToolsChannelData(res.release())); 211 return base::WrapUnique(new DevToolsChannelData(res.release()));
214 } 212 }
215 213
216 CommandBufferId GetCommandBufferID(int channel_id, int32_t route_id) {
217 return CommandBufferId::FromUnsafeValue(
218 (static_cast<uint64_t>(channel_id) << 32) | route_id);
219 }
220
221 } // namespace 214 } // namespace
222 215
223 std::unique_ptr<GpuCommandBufferStub> GpuCommandBufferStub::Create( 216 std::unique_ptr<GpuCommandBufferStub> GpuCommandBufferStub::Create(
224 GpuChannel* channel, 217 GpuChannel* channel,
225 GpuCommandBufferStub* share_command_buffer_stub, 218 GpuCommandBufferStub* share_command_buffer_stub,
226 const GPUCreateCommandBufferConfig& init_params, 219 const GPUCreateCommandBufferConfig& init_params,
220 CommandBufferId command_buffer_id,
221 SequenceId sequence_id,
222 int32_t stream_id,
227 int32_t route_id, 223 int32_t route_id,
228 std::unique_ptr<base::SharedMemory> shared_state_shm) { 224 std::unique_ptr<base::SharedMemory> shared_state_shm) {
229 std::unique_ptr<GpuCommandBufferStub> stub( 225 std::unique_ptr<GpuCommandBufferStub> stub(
230 new GpuCommandBufferStub(channel, init_params, route_id)); 226 new GpuCommandBufferStub(channel, init_params, command_buffer_id,
227 sequence_id, stream_id, route_id));
231 if (!stub->Initialize(share_command_buffer_stub, init_params, 228 if (!stub->Initialize(share_command_buffer_stub, init_params,
232 std::move(shared_state_shm))) 229 std::move(shared_state_shm)))
233 return nullptr; 230 return nullptr;
234 return stub; 231 return stub;
235 } 232 }
236 233
237 GpuCommandBufferStub::GpuCommandBufferStub( 234 GpuCommandBufferStub::GpuCommandBufferStub(
238 GpuChannel* channel, 235 GpuChannel* channel,
239 const GPUCreateCommandBufferConfig& init_params, 236 const GPUCreateCommandBufferConfig& init_params,
237 CommandBufferId command_buffer_id,
238 SequenceId sequence_id,
239 int32_t stream_id,
240 int32_t route_id) 240 int32_t route_id)
241 : channel_(channel), 241 : channel_(channel),
242 initialized_(false), 242 initialized_(false),
243 surface_handle_(init_params.surface_handle), 243 surface_handle_(init_params.surface_handle),
244 use_virtualized_gl_context_(false), 244 use_virtualized_gl_context_(false),
245 command_buffer_id_(GetCommandBufferID(channel->client_id(), route_id)), 245 command_buffer_id_(command_buffer_id),
246 stream_id_(init_params.stream_id), 246 sequence_id_(sequence_id),
247 stream_id_(stream_id),
247 route_id_(route_id), 248 route_id_(route_id),
248 last_flush_count_(0), 249 last_flush_count_(0),
249 waiting_for_sync_point_(false), 250 waiting_for_sync_point_(false),
250 previous_processed_num_(0), 251 previous_processed_num_(0),
251 active_url_(init_params.active_url), 252 active_url_(init_params.active_url),
252 active_url_hash_(base::Hash(active_url_.possibly_invalid_spec())) {} 253 active_url_hash_(base::Hash(active_url_.possibly_invalid_spec())) {}
253 254
254 GpuCommandBufferStub::~GpuCommandBufferStub() { 255 GpuCommandBufferStub::~GpuCommandBufferStub() {
255 Destroy(); 256 Destroy();
256 } 257 }
257 258
258 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const { 259 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const {
259 return channel()->gpu_channel_manager()->gpu_memory_manager(); 260 return channel()->gpu_channel_manager()->gpu_memory_manager();
260 } 261 }
261 262
262 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { 263 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) {
263 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), 264 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), "GPUTask",
264 "GPUTask", 265 "data", DevToolsChannelData::CreateForChannel(channel()));
265 "data",
266 DevToolsChannelData::CreateForChannel(channel()));
267 FastSetActiveURL(active_url_, active_url_hash_, channel_); 266 FastSetActiveURL(active_url_, active_url_hash_, channel_);
268 267
269 bool have_context = false; 268 bool have_context = false;
270 // Ensure the appropriate GL context is current before handling any IPC 269 // Ensure the appropriate GL context is current before handling any IPC
271 // messages directed at the command buffer. This ensures that the message 270 // messages directed at the command buffer. This ensures that the message
272 // handler can assume that the context is current (not necessary for 271 // handler can assume that the context is current (not necessary for
273 // RetireSyncPoint or WaitSyncPoint). 272 // RetireSyncPoint or WaitSyncPoint).
274 if (decoder_.get() && 273 if (decoder_.get() &&
275 message.type() != GpuCommandBufferMsg_SetGetBuffer::ID && 274 message.type() != GpuCommandBufferMsg_SetGetBuffer::ID &&
276 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID && 275 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID &&
(...skipping 19 matching lines...) Expand all
296 OnReturnFrontBuffer); 295 OnReturnFrontBuffer);
297 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange, 296 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange,
298 OnWaitForTokenInRange); 297 OnWaitForTokenInRange);
299 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange, 298 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange,
300 OnWaitForGetOffsetInRange); 299 OnWaitForGetOffsetInRange);
301 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush); 300 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
302 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer, 301 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer,
303 OnRegisterTransferBuffer); 302 OnRegisterTransferBuffer);
304 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer, 303 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer,
305 OnDestroyTransferBuffer); 304 OnDestroyTransferBuffer);
306 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_WaitSyncToken, 305 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_WaitSyncToken, OnWaitSyncToken)
307 OnWaitSyncToken) 306 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncToken, OnSignalSyncToken)
308 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncToken, 307 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery, OnSignalQuery)
309 OnSignalSyncToken)
310 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery,
311 OnSignalQuery)
312 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage); 308 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage);
313 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage); 309 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage);
314 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture, 310 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture,
315 OnCreateStreamTexture) 311 OnCreateStreamTexture)
316 IPC_MESSAGE_UNHANDLED(handled = false) 312 IPC_MESSAGE_UNHANDLED(handled = false)
317 IPC_END_MESSAGE_MAP() 313 IPC_END_MESSAGE_MAP()
318 314
319 CheckCompleteWaits(); 315 CheckCompleteWaits();
320 316
321 // Ensure that any delayed work that was created will be handled. 317 // Ensure that any delayed work that was created will be handled.
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
407 403
408 void GpuCommandBufferStub::PerformWork() { 404 void GpuCommandBufferStub::PerformWork() {
409 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PerformWork"); 405 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PerformWork");
410 406
411 FastSetActiveURL(active_url_, active_url_hash_, channel_); 407 FastSetActiveURL(active_url_, active_url_hash_, channel_);
412 if (decoder_.get() && !MakeCurrent()) 408 if (decoder_.get() && !MakeCurrent())
413 return; 409 return;
414 410
415 if (executor_) { 411 if (executor_) {
416 uint32_t current_unprocessed_num = 412 uint32_t current_unprocessed_num =
417 channel()->gpu_channel_manager()->GetUnprocessedOrderNum(); 413 channel()->sync_point_manager()->GetUnprocessedOrderNum();
418 // We're idle when no messages were processed or scheduled. 414 // We're idle when no messages were processed or scheduled.
419 bool is_idle = (previous_processed_num_ == current_unprocessed_num); 415 bool is_idle = (previous_processed_num_ == current_unprocessed_num);
420 if (!is_idle && !last_idle_time_.is_null()) { 416 if (!is_idle && !last_idle_time_.is_null()) {
421 base::TimeDelta time_since_idle = 417 base::TimeDelta time_since_idle =
422 base::TimeTicks::Now() - last_idle_time_; 418 base::TimeTicks::Now() - last_idle_time_;
423 base::TimeDelta max_time_since_idle = 419 base::TimeDelta max_time_since_idle =
424 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs); 420 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs);
425 421
426 // Force idle when it's been too long since last time we were idle. 422 // Force idle when it's been too long since last time we were idle.
427 if (time_since_idle > max_time_since_idle) 423 if (time_since_idle > max_time_since_idle)
(...skipping 10 matching lines...) Expand all
438 } 434 }
439 435
440 ScheduleDelayedWork( 436 ScheduleDelayedWork(
441 base::TimeDelta::FromMilliseconds(kHandleMoreWorkPeriodBusyMs)); 437 base::TimeDelta::FromMilliseconds(kHandleMoreWorkPeriodBusyMs));
442 } 438 }
443 439
444 bool GpuCommandBufferStub::HasUnprocessedCommands() { 440 bool GpuCommandBufferStub::HasUnprocessedCommands() {
445 if (command_buffer_) { 441 if (command_buffer_) {
446 CommandBuffer::State state = command_buffer_->GetLastState(); 442 CommandBuffer::State state = command_buffer_->GetLastState();
447 return command_buffer_->GetPutOffset() != state.get_offset && 443 return command_buffer_->GetPutOffset() != state.get_offset &&
448 !error::IsError(state.error); 444 !error::IsError(state.error);
449 } 445 }
450 return false; 446 return false;
451 } 447 }
452 448
453 void GpuCommandBufferStub::ScheduleDelayedWork(base::TimeDelta delay) { 449 void GpuCommandBufferStub::ScheduleDelayedWork(base::TimeDelta delay) {
454 bool has_more_work = executor_.get() && (executor_->HasPendingQueries() || 450 bool has_more_work = executor_.get() && (executor_->HasPendingQueries() ||
455 executor_->HasMoreIdleWork() || 451 executor_->HasMoreIdleWork() ||
456 executor_->HasPollingWork()); 452 executor_->HasPollingWork());
457 if (!has_more_work) { 453 if (!has_more_work) {
458 last_idle_time_ = base::TimeTicks(); 454 last_idle_time_ = base::TimeTicks();
459 return; 455 return;
460 } 456 }
461 457
462 base::TimeTicks current_time = base::TimeTicks::Now(); 458 base::TimeTicks current_time = base::TimeTicks::Now();
463 // |process_delayed_work_time_| is set if processing of delayed work is 459 // |process_delayed_work_time_| is set if processing of delayed work is
464 // already scheduled. Just update the time if already scheduled. 460 // already scheduled. Just update the time if already scheduled.
465 if (!process_delayed_work_time_.is_null()) { 461 if (!process_delayed_work_time_.is_null()) {
466 process_delayed_work_time_ = current_time + delay; 462 process_delayed_work_time_ = current_time + delay;
467 return; 463 return;
468 } 464 }
469 465
470 // Idle when no messages are processed between now and when 466 // Idle when no messages are processed between now and when
471 // PollWork is called. 467 // PollWork is called.
472 previous_processed_num_ = 468 previous_processed_num_ =
473 channel()->gpu_channel_manager()->GetProcessedOrderNum(); 469 channel()->sync_point_manager()->GetProcessedOrderNum();
474 if (last_idle_time_.is_null()) 470 if (last_idle_time_.is_null())
475 last_idle_time_ = current_time; 471 last_idle_time_ = current_time;
476 472
477 // IsScheduled() returns true after passing all unschedule fences 473 // IsScheduled() returns true after passing all unschedule fences
478 // and this is when we can start performing idle work. Idle work 474 // and this is when we can start performing idle work. Idle work
479 // is done synchronously so we can set delay to 0 and instead poll 475 // is done synchronously so we can set delay to 0 and instead poll
480 // for more work at the rate idle work is performed. This also ensures 476 // for more work at the rate idle work is performed. This also ensures
481 // that idle work is done as efficiently as possible without any 477 // that idle work is done as efficiently as possible without any
482 // unnecessary delays. 478 // unnecessary delays.
483 if (executor_.get() && executor_->scheduled() && 479 if (executor_.get() && executor_->scheduled() &&
(...skipping 26 matching lines...) Expand all
510 Send(wait_for_get_offset_->reply.release()); 506 Send(wait_for_get_offset_->reply.release());
511 wait_for_get_offset_.reset(); 507 wait_for_get_offset_.reset();
512 } 508 }
513 509
514 if (initialized_) { 510 if (initialized_) {
515 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); 511 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
516 // If we are currently shutting down the GPU process to help with recovery 512 // If we are currently shutting down the GPU process to help with recovery
517 // (exit_on_context_lost workaround), then don't tell the browser about 513 // (exit_on_context_lost workaround), then don't tell the browser about
518 // offscreen context destruction here since it's not client-invoked, and 514 // offscreen context destruction here since it's not client-invoked, and
519 // might bypass the 3D API blocking logic. 515 // might bypass the 3D API blocking logic.
520 if ((surface_handle_ == gpu::kNullSurfaceHandle) && !active_url_.is_empty() 516 if ((surface_handle_ == gpu::kNullSurfaceHandle) &&
521 && !gpu_channel_manager->is_exiting_for_lost_context()) { 517 !active_url_.is_empty() &&
518 !gpu_channel_manager->is_exiting_for_lost_context()) {
522 gpu_channel_manager->delegate()->DidDestroyOffscreenContext(active_url_); 519 gpu_channel_manager->delegate()->DidDestroyOffscreenContext(active_url_);
523 } 520 }
524 } 521 }
525 522
526 if (decoder_) 523 if (decoder_)
527 decoder_->set_engine(NULL); 524 decoder_->set_engine(NULL);
528 525
529 // The scheduler has raw references to the decoder and the command buffer so 526 // The scheduler has raw references to the decoder and the command buffer so
530 // destroy it before those. 527 // destroy it before those.
531 executor_.reset(); 528 executor_.reset();
532 529
533 sync_point_client_.reset(); 530 if (sync_point_client_state_) {
531 sync_point_client_state_->Destroy();
532 sync_point_client_state_ = nullptr;
533 }
534 534
535 bool have_context = false; 535 bool have_context = false;
536 if (decoder_ && decoder_->GetGLContext()) { 536 if (decoder_ && decoder_->GetGLContext()) {
537 // Try to make the context current regardless of whether it was lost, so we 537 // Try to make the context current regardless of whether it was lost, so we
538 // don't leak resources. 538 // don't leak resources.
539 have_context = decoder_->GetGLContext()->MakeCurrent(surface_.get()); 539 have_context = decoder_->GetGLContext()->MakeCurrent(surface_.get());
540 } 540 }
541 for (auto& observer : destruction_observers_) 541 for (auto& observer : destruction_observers_)
542 observer.OnWillDestroyStub(); 542 observer.OnWillDestroyStub();
543 543
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
632 } 632 }
633 633
634 // We can only use virtualized contexts for onscreen command buffers if their 634 // We can only use virtualized contexts for onscreen command buffers if their
635 // config is compatible with the offscreen ones - otherwise MakeCurrent fails. 635 // config is compatible with the offscreen ones - otherwise MakeCurrent fails.
636 // Example use case is a client requesting an onscreen RGBA8888 buffer for 636 // Example use case is a client requesting an onscreen RGBA8888 buffer for
637 // fullscreen video on a low-spec device with RGB565 default format. 637 // fullscreen video on a low-spec device with RGB565 default format.
638 if (!surface_format.IsCompatible(default_surface->GetFormat()) && !offscreen) 638 if (!surface_format.IsCompatible(default_surface->GetFormat()) && !offscreen)
639 use_virtualized_gl_context_ = false; 639 use_virtualized_gl_context_ = false;
640 #endif 640 #endif
641 641
642 command_buffer_.reset(new CommandBufferService( 642 command_buffer_.reset(
643 context_group_->transfer_buffer_manager())); 643 new CommandBufferService(context_group_->transfer_buffer_manager()));
644 644
645 decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get())); 645 decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get()));
646 executor_.reset(new CommandExecutor(command_buffer_.get(), decoder_.get(), 646 executor_.reset(new CommandExecutor(command_buffer_.get(), decoder_.get(),
647 decoder_.get())); 647 decoder_.get()));
648 sync_point_client_ = base::MakeUnique<SyncPointClient>( 648
649 channel_->sync_point_manager(), 649 sync_point_client_state_ =
650 channel_->GetSyncPointOrderData(stream_id_), 650 channel_->sync_point_manager()->CreateSyncPointClientState(
651 CommandBufferNamespace::GPU_IO, command_buffer_id_); 651 CommandBufferNamespace::GPU_IO, command_buffer_id_, sequence_id_);
652 652
653 executor_->SetPreemptByFlag(channel_->preempted_flag()); 653 executor_->SetPreemptByFlag(channel_->preempted_flag());
654 654
655 decoder_->set_engine(executor_.get()); 655 decoder_->set_engine(executor_.get());
656 656
657 if (offscreen) { 657 if (offscreen) {
658 if (init_params.attribs.depth_size > 0) { 658 if (init_params.attribs.depth_size > 0) {
659 surface_format.SetDepthBits(init_params.attribs.depth_size); 659 surface_format.SetDepthBits(init_params.attribs.depth_size);
660 } 660 }
661 if (init_params.attribs.samples > 0) { 661 if (init_params.attribs.samples > 0) {
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
751 DLOG(ERROR) << "Failed to create context."; 751 DLOG(ERROR) << "Failed to create context.";
752 return false; 752 return false;
753 } 753 }
754 754
755 if (!context->MakeCurrent(surface_.get())) { 755 if (!context->MakeCurrent(surface_.get())) {
756 LOG(ERROR) << "Failed to make context current."; 756 LOG(ERROR) << "Failed to make context current.";
757 return false; 757 return false;
758 } 758 }
759 759
760 if (!context->GetGLStateRestorer()) { 760 if (!context->GetGLStateRestorer()) {
761 context->SetGLStateRestorer( 761 context->SetGLStateRestorer(new GLStateRestorerImpl(decoder_->AsWeakPtr()));
762 new GLStateRestorerImpl(decoder_->AsWeakPtr()));
763 } 762 }
764 763
765 if (!context_group_->has_program_cache() && 764 if (!context_group_->has_program_cache() &&
766 !context_group_->feature_info()->workarounds().disable_program_cache) { 765 !context_group_->feature_info()->workarounds().disable_program_cache) {
767 context_group_->set_program_cache(manager->program_cache()); 766 context_group_->set_program_cache(manager->program_cache());
768 } 767 }
769 768
770 // Initialize the decoder with either the view or pbuffer GLContext. 769 // Initialize the decoder with either the view or pbuffer GLContext.
771 if (!decoder_->Initialize(surface_, context, offscreen, 770 if (!decoder_->Initialize(surface_, context, offscreen,
772 gpu::gles2::DisallowedFeatures(), 771 gpu::gles2::DisallowedFeatures(),
773 init_params.attribs)) { 772 init_params.attribs)) {
774 DLOG(ERROR) << "Failed to initialize decoder."; 773 DLOG(ERROR) << "Failed to initialize decoder.";
775 return false; 774 return false;
776 } 775 }
777 776
778 if (manager->gpu_preferences().enable_gpu_service_logging) { 777 if (manager->gpu_preferences().enable_gpu_service_logging) {
779 decoder_->set_log_commands(true); 778 decoder_->set_log_commands(true);
780 } 779 }
781 780
782 decoder_->GetLogger()->SetMsgCallback( 781 decoder_->GetLogger()->SetMsgCallback(base::Bind(
783 base::Bind(&GpuCommandBufferStub::SendConsoleMessage, 782 &GpuCommandBufferStub::SendConsoleMessage, base::Unretained(this)));
784 base::Unretained(this))); 783 decoder_->SetShaderCacheCallback(base::Bind(
785 decoder_->SetShaderCacheCallback( 784 &GpuCommandBufferStub::SendCachedShader, base::Unretained(this)));
786 base::Bind(&GpuCommandBufferStub::SendCachedShader,
787 base::Unretained(this)));
788 decoder_->SetFenceSyncReleaseCallback(base::Bind( 785 decoder_->SetFenceSyncReleaseCallback(base::Bind(
789 &GpuCommandBufferStub::OnFenceSyncRelease, base::Unretained(this))); 786 &GpuCommandBufferStub::OnFenceSyncRelease, base::Unretained(this)));
790 decoder_->SetWaitSyncTokenCallback(base::Bind( 787 decoder_->SetWaitSyncTokenCallback(base::Bind(
791 &GpuCommandBufferStub::OnWaitSyncToken, base::Unretained(this))); 788 &GpuCommandBufferStub::OnWaitSyncToken, base::Unretained(this)));
792 decoder_->SetDescheduleUntilFinishedCallback( 789 decoder_->SetDescheduleUntilFinishedCallback(
793 base::Bind(&GpuCommandBufferStub::OnDescheduleUntilFinished, 790 base::Bind(&GpuCommandBufferStub::OnDescheduleUntilFinished,
794 base::Unretained(this))); 791 base::Unretained(this)));
795 decoder_->SetRescheduleAfterFinishedCallback( 792 decoder_->SetRescheduleAfterFinishedCallback(
796 base::Bind(&GpuCommandBufferStub::OnRescheduleAfterFinished, 793 base::Bind(&GpuCommandBufferStub::OnRescheduleAfterFinished,
797 base::Unretained(this))); 794 base::Unretained(this)));
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
902 } 899 }
903 wait_for_get_offset_ = 900 wait_for_get_offset_ =
904 base::MakeUnique<WaitForCommandState>(start, end, reply_message); 901 base::MakeUnique<WaitForCommandState>(start, end, reply_message);
905 CheckCompleteWaits(); 902 CheckCompleteWaits();
906 } 903 }
907 904
908 void GpuCommandBufferStub::CheckCompleteWaits() { 905 void GpuCommandBufferStub::CheckCompleteWaits() {
909 if (wait_for_token_ || wait_for_get_offset_) { 906 if (wait_for_token_ || wait_for_get_offset_) {
910 CommandBuffer::State state = command_buffer_->GetLastState(); 907 CommandBuffer::State state = command_buffer_->GetLastState();
911 if (wait_for_token_ && 908 if (wait_for_token_ &&
912 (CommandBuffer::InRange( 909 (CommandBuffer::InRange(wait_for_token_->start, wait_for_token_->end,
913 wait_for_token_->start, wait_for_token_->end, state.token) || 910 state.token) ||
914 state.error != error::kNoError)) { 911 state.error != error::kNoError)) {
915 ReportState(); 912 ReportState();
916 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams( 913 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams(
917 wait_for_token_->reply.get(), state); 914 wait_for_token_->reply.get(), state);
918 Send(wait_for_token_->reply.release()); 915 Send(wait_for_token_->reply.release());
919 wait_for_token_.reset(); 916 wait_for_token_.reset();
920 } 917 }
921 if (wait_for_get_offset_ && 918 if (wait_for_get_offset_ &&
922 (CommandBuffer::InRange(wait_for_get_offset_->start, 919 (CommandBuffer::InRange(wait_for_get_offset_->start,
923 wait_for_get_offset_->end, 920 wait_for_get_offset_->end, state.get_offset) ||
924 state.get_offset) ||
925 state.error != error::kNoError)) { 921 state.error != error::kNoError)) {
926 ReportState(); 922 ReportState();
927 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams( 923 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams(
928 wait_for_get_offset_->reply.get(), state); 924 wait_for_get_offset_->reply.get(), state);
929 Send(wait_for_get_offset_->reply.release()); 925 Send(wait_for_get_offset_->reply.release());
930 wait_for_get_offset_.reset(); 926 wait_for_get_offset_.reset();
931 } 927 }
932 } 928 }
933 } 929 }
934 930
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
993 989
994 if (command_buffer_) 990 if (command_buffer_)
995 command_buffer_->DestroyTransferBuffer(id); 991 command_buffer_->DestroyTransferBuffer(id);
996 } 992 }
997 993
998 void GpuCommandBufferStub::OnCommandProcessed() { 994 void GpuCommandBufferStub::OnCommandProcessed() {
999 DCHECK(channel_->watchdog()); 995 DCHECK(channel_->watchdog());
1000 channel_->watchdog()->CheckArmed(); 996 channel_->watchdog()->CheckArmed();
1001 } 997 }
1002 998
1003 void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); } 999 void GpuCommandBufferStub::ReportState() {
1000 command_buffer_->UpdateState();
1001 }
1004 1002
1005 void GpuCommandBufferStub::PutChanged() { 1003 void GpuCommandBufferStub::PutChanged() {
1006 FastSetActiveURL(active_url_, active_url_hash_, channel_); 1004 FastSetActiveURL(active_url_, active_url_hash_, channel_);
1007 executor_->PutChanged(); 1005 executor_->PutChanged();
1008 } 1006 }
1009 1007
1010 void GpuCommandBufferStub::OnSignalSyncToken(const SyncToken& sync_token, 1008 void GpuCommandBufferStub::OnSignalSyncToken(const SyncToken& sync_token,
1011 uint32_t id) { 1009 uint32_t id) {
1012 if (!sync_point_client_->WaitNonThreadSafe( 1010 if (!sync_point_client_state_->WaitNonThreadSafe(
1013 sync_token, channel_->task_runner(), 1011 sync_token, channel_->task_runner(),
1014 base::Bind(&GpuCommandBufferStub::OnSignalAck, this->AsWeakPtr(), 1012 base::Bind(&GpuCommandBufferStub::OnSignalAck, this->AsWeakPtr(),
1015 id))) { 1013 id))) {
1016 OnSignalAck(id); 1014 OnSignalAck(id);
1017 } 1015 }
1018 } 1016 }
1019 1017
1020 void GpuCommandBufferStub::OnSignalAck(uint32_t id) { 1018 void GpuCommandBufferStub::OnSignalAck(uint32_t id) {
1021 Send(new GpuCommandBufferMsg_SignalAck(route_id_, id)); 1019 Send(new GpuCommandBufferMsg_SignalAck(route_id_, id));
1022 } 1020 }
1023 1021
1024 void GpuCommandBufferStub::OnSignalQuery(uint32_t query_id, uint32_t id) { 1022 void GpuCommandBufferStub::OnSignalQuery(uint32_t query_id, uint32_t id) {
1025 if (decoder_) { 1023 if (decoder_) {
1026 gles2::QueryManager* query_manager = decoder_->GetQueryManager(); 1024 gles2::QueryManager* query_manager = decoder_->GetQueryManager();
1027 if (query_manager) { 1025 if (query_manager) {
1028 gles2::QueryManager::Query* query = 1026 gles2::QueryManager::Query* query = query_manager->GetQuery(query_id);
1029 query_manager->GetQuery(query_id);
1030 if (query) { 1027 if (query) {
1031 query->AddCallback( 1028 query->AddCallback(base::Bind(&GpuCommandBufferStub::OnSignalAck,
1032 base::Bind(&GpuCommandBufferStub::OnSignalAck, 1029 this->AsWeakPtr(), id));
1033 this->AsWeakPtr(),
1034 id));
1035 return; 1030 return;
1036 } 1031 }
1037 } 1032 }
1038 } 1033 }
1039 // Something went wrong, run callback immediately. 1034 // Something went wrong, run callback immediately.
1040 OnSignalAck(id); 1035 OnSignalAck(id);
1041 } 1036 }
1042 1037
1043 void GpuCommandBufferStub::OnFenceSyncRelease(uint64_t release) { 1038 void GpuCommandBufferStub::OnFenceSyncRelease(uint64_t release) {
1044 SyncToken sync_token(CommandBufferNamespace::GPU_IO, 0, command_buffer_id_, 1039 SyncToken sync_token(CommandBufferNamespace::GPU_IO, 0, command_buffer_id_,
1045 release); 1040 release);
1046 gles2::MailboxManager* mailbox_manager = context_group_->mailbox_manager(); 1041 gles2::MailboxManager* mailbox_manager = context_group_->mailbox_manager();
1047 if (mailbox_manager->UsesSync() && MakeCurrent()) 1042 if (mailbox_manager->UsesSync() && MakeCurrent())
1048 mailbox_manager->PushTextureUpdates(sync_token); 1043 mailbox_manager->PushTextureUpdates(sync_token);
1049 1044
1050 command_buffer_->SetReleaseCount(release); 1045 command_buffer_->SetReleaseCount(release);
1051 sync_point_client_->ReleaseFenceSync(release); 1046 sync_point_client_state_->ReleaseFenceSync(release);
1052 } 1047 }
1053 1048
1054 void GpuCommandBufferStub::OnDescheduleUntilFinished() { 1049 void GpuCommandBufferStub::OnDescheduleUntilFinished() {
1055 DCHECK(executor_->scheduled()); 1050 DCHECK(executor_->scheduled());
1056 DCHECK(executor_->HasPollingWork()); 1051 DCHECK(executor_->HasPollingWork());
1057 1052
1058 executor_->SetScheduled(false); 1053 executor_->SetScheduled(false);
1059 channel_->OnStreamRescheduled(stream_id_, false); 1054 channel_->OnCommandBufferDescheduled(this);
1060 } 1055 }
1061 1056
1062 void GpuCommandBufferStub::OnRescheduleAfterFinished() { 1057 void GpuCommandBufferStub::OnRescheduleAfterFinished() {
1063 DCHECK(!executor_->scheduled()); 1058 DCHECK(!executor_->scheduled());
1064 1059
1065 executor_->SetScheduled(true); 1060 executor_->SetScheduled(true);
1066 channel_->OnStreamRescheduled(stream_id_, true); 1061 channel_->OnCommandBufferScheduled(this);
1067 } 1062 }
1068 1063
1069 bool GpuCommandBufferStub::OnWaitSyncToken(const SyncToken& sync_token) { 1064 bool GpuCommandBufferStub::OnWaitSyncToken(const SyncToken& sync_token) {
1070 DCHECK(!waiting_for_sync_point_); 1065 DCHECK(!waiting_for_sync_point_);
1071 DCHECK(executor_->scheduled()); 1066 DCHECK(executor_->scheduled());
1072 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncToken", this, "GpuCommandBufferStub", 1067 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitSyncToken", this, "GpuCommandBufferStub",
1073 this); 1068 this);
1074 1069
1075 waiting_for_sync_point_ = sync_point_client_->WaitNonThreadSafe( 1070 waiting_for_sync_point_ = sync_point_client_state_->WaitNonThreadSafe(
1076 sync_token, channel_->task_runner(), 1071 sync_token, channel_->task_runner(),
1077 base::Bind(&GpuCommandBufferStub::OnWaitSyncTokenCompleted, AsWeakPtr(), 1072 base::Bind(&GpuCommandBufferStub::OnWaitSyncTokenCompleted, AsWeakPtr(),
1078 sync_token)); 1073 sync_token));
1079 1074
1080 if (waiting_for_sync_point_) { 1075 if (waiting_for_sync_point_) {
1081 executor_->SetScheduled(false); 1076 executor_->SetScheduled(false);
1082 channel_->OnStreamRescheduled(stream_id_, false); 1077 channel_->OnCommandBufferDescheduled(this);
1083 return true; 1078 return true;
1084 } 1079 }
1085 1080
1086 gles2::MailboxManager* mailbox_manager = context_group_->mailbox_manager(); 1081 gles2::MailboxManager* mailbox_manager = context_group_->mailbox_manager();
1087 if (mailbox_manager->UsesSync() && MakeCurrent()) 1082 if (mailbox_manager->UsesSync() && MakeCurrent())
1088 mailbox_manager->PullTextureUpdates(sync_token); 1083 mailbox_manager->PullTextureUpdates(sync_token);
1089 return false; 1084 return false;
1090 } 1085 }
1091 1086
1092 void GpuCommandBufferStub::OnWaitSyncTokenCompleted( 1087 void GpuCommandBufferStub::OnWaitSyncTokenCompleted(
1093 const SyncToken& sync_token) { 1088 const SyncToken& sync_token) {
1094 DCHECK(waiting_for_sync_point_); 1089 DCHECK(waiting_for_sync_point_);
1095 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncTokenCompleted", this, 1090 TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncTokenCompleted", this,
1096 "GpuCommandBufferStub", this); 1091 "GpuCommandBufferStub", this);
1097 // Don't call PullTextureUpdates here because we can't MakeCurrent if we're 1092 // Don't call PullTextureUpdates here because we can't MakeCurrent if we're
1098 // executing commands on another context. The WaitSyncToken command will run 1093 // executing commands on another context. The WaitSyncToken command will run
1099 // again and call PullTextureUpdates once this command buffer gets scheduled. 1094 // again and call PullTextureUpdates once this command buffer gets scheduled.
1100 waiting_for_sync_point_ = false; 1095 waiting_for_sync_point_ = false;
1101 executor_->SetScheduled(true); 1096 executor_->SetScheduled(true);
1102 channel_->OnStreamRescheduled(stream_id_, true); 1097 channel_->OnCommandBufferScheduled(this);
1103 } 1098 }
1104 1099
1105 void GpuCommandBufferStub::OnCreateImage( 1100 void GpuCommandBufferStub::OnCreateImage(
1106 const GpuCommandBufferMsg_CreateImage_Params& params) { 1101 const GpuCommandBufferMsg_CreateImage_Params& params) {
1107 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage"); 1102 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage");
1108 const int32_t id = params.id; 1103 const int32_t id = params.id;
1109 const gfx::GpuMemoryBufferHandle& handle = params.gpu_memory_buffer; 1104 const gfx::GpuMemoryBufferHandle& handle = params.gpu_memory_buffer;
1110 const gfx::Size& size = params.size; 1105 const gfx::Size& size = params.size;
1111 const gfx::BufferFormat& format = params.format; 1106 const gfx::BufferFormat& format = params.format;
1112 const uint32_t internalformat = params.internal_format; 1107 const uint32_t internalformat = params.internal_format;
(...skipping 26 matching lines...) Expand all
1139 return; 1134 return;
1140 } 1135 }
1141 1136
1142 scoped_refptr<gl::GLImage> image = channel()->CreateImageForGpuMemoryBuffer( 1137 scoped_refptr<gl::GLImage> image = channel()->CreateImageForGpuMemoryBuffer(
1143 handle, size, format, internalformat, surface_handle_); 1138 handle, size, format, internalformat, surface_handle_);
1144 if (!image.get()) 1139 if (!image.get())
1145 return; 1140 return;
1146 1141
1147 image_manager->AddImage(image.get(), id); 1142 image_manager->AddImage(image.get(), id);
1148 if (image_release_count) 1143 if (image_release_count)
1149 sync_point_client_->ReleaseFenceSync(image_release_count); 1144 sync_point_client_state_->ReleaseFenceSync(image_release_count);
1150 } 1145 }
1151 1146
1152 void GpuCommandBufferStub::OnDestroyImage(int32_t id) { 1147 void GpuCommandBufferStub::OnDestroyImage(int32_t id) {
1153 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage"); 1148 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage");
1154 1149
1155 if (!decoder_) 1150 if (!decoder_)
1156 return; 1151 return;
1157 1152
1158 gles2::ImageManager* image_manager = decoder_->GetImageManager(); 1153 gles2::ImageManager* image_manager = decoder_->GetImageManager();
1159 DCHECK(image_manager); 1154 DCHECK(image_manager);
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
1228 command_buffer_->GetLastState().error == error::kLostContext) 1223 command_buffer_->GetLastState().error == error::kLostContext)
1229 return; 1224 return;
1230 1225
1231 command_buffer_->SetContextLostReason(error::kUnknown); 1226 command_buffer_->SetContextLostReason(error::kUnknown);
1232 if (decoder_) 1227 if (decoder_)
1233 decoder_->MarkContextLost(error::kUnknown); 1228 decoder_->MarkContextLost(error::kUnknown);
1234 command_buffer_->SetParseError(error::kLostContext); 1229 command_buffer_->SetParseError(error::kLostContext);
1235 } 1230 }
1236 1231
1237 } // namespace gpu 1232 } // namespace gpu
OLDNEW
« no previous file with comments | « gpu/ipc/service/gpu_command_buffer_stub.h ('k') | gpu/ipc/service/stream_texture_android.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698