| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "gpu/ipc/service/gpu_command_buffer_stub.h" | 5 #include "gpu/ipc/service/gpu_command_buffer_stub.h" |
| 6 | 6 |
| 7 #include <utility> | 7 #include <utility> |
| 8 | 8 |
| 9 #include "base/bind.h" | 9 #include "base/bind.h" |
| 10 #include "base/bind_helpers.h" | 10 #include "base/bind_helpers.h" |
| 11 #include "base/hash.h" | 11 #include "base/hash.h" |
| 12 #include "base/json/json_writer.h" | 12 #include "base/json/json_writer.h" |
| 13 #include "base/macros.h" | 13 #include "base/macros.h" |
| 14 #include "base/memory/ptr_util.h" | 14 #include "base/memory/ptr_util.h" |
| 15 #include "base/memory/shared_memory.h" | 15 #include "base/memory/shared_memory.h" |
| 16 #include "base/time/time.h" | 16 #include "base/time/time.h" |
| 17 #include "base/trace_event/trace_event.h" | 17 #include "base/trace_event/trace_event.h" |
| 18 #include "build/build_config.h" | 18 #include "build/build_config.h" |
| 19 #include "gpu/command_buffer/common/constants.h" | 19 #include "gpu/command_buffer/common/constants.h" |
| 20 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h" | 20 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h" |
| 21 #include "gpu/command_buffer/common/mailbox.h" | 21 #include "gpu/command_buffer/common/mailbox.h" |
| 22 #include "gpu/command_buffer/common/sync_token.h" | 22 #include "gpu/command_buffer/common/sync_token.h" |
| 23 #include "gpu/command_buffer/service/command_executor.h" |
| 23 #include "gpu/command_buffer/service/gl_context_virtual.h" | 24 #include "gpu/command_buffer/service/gl_context_virtual.h" |
| 24 #include "gpu/command_buffer/service/gl_state_restorer_impl.h" | 25 #include "gpu/command_buffer/service/gl_state_restorer_impl.h" |
| 25 #include "gpu/command_buffer/service/image_manager.h" | 26 #include "gpu/command_buffer/service/image_manager.h" |
| 26 #include "gpu/command_buffer/service/logger.h" | 27 #include "gpu/command_buffer/service/logger.h" |
| 27 #include "gpu/command_buffer/service/mailbox_manager.h" | 28 #include "gpu/command_buffer/service/mailbox_manager.h" |
| 28 #include "gpu/command_buffer/service/memory_tracking.h" | 29 #include "gpu/command_buffer/service/memory_tracking.h" |
| 29 #include "gpu/command_buffer/service/query_manager.h" | 30 #include "gpu/command_buffer/service/query_manager.h" |
| 30 #include "gpu/command_buffer/service/service_utils.h" | 31 #include "gpu/command_buffer/service/service_utils.h" |
| 31 #include "gpu/command_buffer/service/sync_point_manager.h" | 32 #include "gpu/command_buffer/service/sync_point_manager.h" |
| 32 #include "gpu/command_buffer/service/transfer_buffer_manager.h" | 33 #include "gpu/command_buffer/service/transfer_buffer_manager.h" |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 73 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel, | 74 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel, |
| 74 uint64_t share_group_tracing_guid) | 75 uint64_t share_group_tracing_guid) |
| 75 : tracking_group_( | 76 : tracking_group_( |
| 76 channel->gpu_channel_manager() | 77 channel->gpu_channel_manager() |
| 77 ->gpu_memory_manager() | 78 ->gpu_memory_manager() |
| 78 ->CreateTrackingGroup(channel->GetClientPID(), this)), | 79 ->CreateTrackingGroup(channel->GetClientPID(), this)), |
| 79 client_tracing_id_(channel->client_tracing_id()), | 80 client_tracing_id_(channel->client_tracing_id()), |
| 80 client_id_(channel->client_id()), | 81 client_id_(channel->client_id()), |
| 81 share_group_tracing_guid_(share_group_tracing_guid) {} | 82 share_group_tracing_guid_(share_group_tracing_guid) {} |
| 82 | 83 |
| 83 void TrackMemoryAllocatedChange( | 84 void TrackMemoryAllocatedChange(size_t old_size, size_t new_size) override { |
| 84 size_t old_size, size_t new_size) override { | 85 tracking_group_->TrackMemoryAllocatedChange(old_size, new_size); |
| 85 tracking_group_->TrackMemoryAllocatedChange( | |
| 86 old_size, new_size); | |
| 87 } | 86 } |
| 88 | 87 |
| 89 bool EnsureGPUMemoryAvailable(size_t size_needed) override { | 88 bool EnsureGPUMemoryAvailable(size_t size_needed) override { |
| 90 return tracking_group_->EnsureGPUMemoryAvailable(size_needed); | 89 return tracking_group_->EnsureGPUMemoryAvailable(size_needed); |
| 91 }; | 90 }; |
| 92 | 91 |
| 93 uint64_t ClientTracingId() const override { return client_tracing_id_; } | 92 uint64_t ClientTracingId() const override { return client_tracing_id_; } |
| 94 int ClientId() const override { return client_id_; } | 93 int ClientId() const override { return client_id_; } |
| 95 uint64_t ShareGroupTracingGUID() const override { | 94 uint64_t ShareGroupTracingGUID() const override { |
| 96 return share_group_tracing_guid_; | 95 return share_group_tracing_guid_; |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 152 }; | 151 }; |
| 153 | 152 |
| 154 std::unique_ptr<base::trace_event::ConvertableToTraceFormat> | 153 std::unique_ptr<base::trace_event::ConvertableToTraceFormat> |
| 155 DevToolsChannelData::CreateForChannel(GpuChannel* channel) { | 154 DevToolsChannelData::CreateForChannel(GpuChannel* channel) { |
| 156 std::unique_ptr<base::DictionaryValue> res(new base::DictionaryValue); | 155 std::unique_ptr<base::DictionaryValue> res(new base::DictionaryValue); |
| 157 res->SetInteger("renderer_pid", channel->GetClientPID()); | 156 res->SetInteger("renderer_pid", channel->GetClientPID()); |
| 158 res->SetDouble("used_bytes", channel->GetMemoryUsage()); | 157 res->SetDouble("used_bytes", channel->GetMemoryUsage()); |
| 159 return base::WrapUnique(new DevToolsChannelData(res.release())); | 158 return base::WrapUnique(new DevToolsChannelData(res.release())); |
| 160 } | 159 } |
| 161 | 160 |
| 162 CommandBufferId GetCommandBufferID(int channel_id, int32_t route_id) { | 161 } // anonymous namespace |
| 162 |
| 163 CommandBufferId GpuCommandBufferStub::GetCommandBufferID(int32_t channel_id, |
| 164 int32_t route_id) { |
| 163 return CommandBufferId::FromUnsafeValue( | 165 return CommandBufferId::FromUnsafeValue( |
| 164 (static_cast<uint64_t>(channel_id) << 32) | route_id); | 166 (static_cast<uint64_t>(channel_id) << 32) | route_id); |
| 165 } | 167 } |
| 166 | 168 |
| 167 } // namespace | 169 int32_t GpuCommandBufferStub::GetChannelID(gpu::CommandBufferId cmd_buffer_id) { |
| 170 return static_cast<int32_t>(cmd_buffer_id.GetUnsafeValue() >> 32); |
| 171 } |
| 172 |
| 173 int32_t GpuCommandBufferStub::GetRouteID(gpu::CommandBufferId cmd_buffer_id) { |
| 174 return static_cast<int32_t>(cmd_buffer_id.GetUnsafeValue()); |
| 175 } |
| 168 | 176 |
| 169 std::unique_ptr<GpuCommandBufferStub> GpuCommandBufferStub::Create( | 177 std::unique_ptr<GpuCommandBufferStub> GpuCommandBufferStub::Create( |
| 170 GpuChannel* channel, | 178 GpuChannel* channel, |
| 171 GpuCommandBufferStub* share_command_buffer_stub, | 179 GpuCommandBufferStub* share_command_buffer_stub, |
| 172 const GPUCreateCommandBufferConfig& init_params, | 180 const GPUCreateCommandBufferConfig& init_params, |
| 173 int32_t route_id, | 181 int32_t route_id, |
| 174 std::unique_ptr<base::SharedMemory> shared_state_shm) { | 182 std::unique_ptr<base::SharedMemory> shared_state_shm) { |
| 175 std::unique_ptr<GpuCommandBufferStub> stub( | 183 std::unique_ptr<GpuCommandBufferStub> stub( |
| 176 new GpuCommandBufferStub(channel, init_params, route_id)); | 184 new GpuCommandBufferStub(channel, init_params, route_id)); |
| 177 if (!stub->Initialize(share_command_buffer_stub, init_params, | 185 if (!stub->Initialize(share_command_buffer_stub, init_params, |
| (...skipping 17 matching lines...) Expand all Loading... |
| 195 waiting_for_sync_point_(false), | 203 waiting_for_sync_point_(false), |
| 196 previous_processed_num_(0), | 204 previous_processed_num_(0), |
| 197 active_url_(init_params.active_url), | 205 active_url_(init_params.active_url), |
| 198 active_url_hash_(base::Hash(active_url_.possibly_invalid_spec())) {} | 206 active_url_hash_(base::Hash(active_url_.possibly_invalid_spec())) {} |
| 199 | 207 |
| 200 GpuCommandBufferStub::~GpuCommandBufferStub() { | 208 GpuCommandBufferStub::~GpuCommandBufferStub() { |
| 201 Destroy(); | 209 Destroy(); |
| 202 } | 210 } |
| 203 | 211 |
| 204 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const { | 212 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const { |
| 205 return channel()->gpu_channel_manager()->gpu_memory_manager(); | 213 return channel()->gpu_channel_manager()->gpu_memory_manager(); |
| 206 } | 214 } |
| 207 | 215 |
| 208 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { | 216 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { |
| 209 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), | 217 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), "GPUTask", |
| 210 "GPUTask", | 218 "data", DevToolsChannelData::CreateForChannel(channel())); |
| 211 "data", | |
| 212 DevToolsChannelData::CreateForChannel(channel())); | |
| 213 FastSetActiveURL(active_url_, active_url_hash_, channel_); | 219 FastSetActiveURL(active_url_, active_url_hash_, channel_); |
| 214 | 220 |
| 215 bool have_context = false; | 221 bool have_context = false; |
| 216 // Ensure the appropriate GL context is current before handling any IPC | 222 // Ensure the appropriate GL context is current before handling any IPC |
| 217 // messages directed at the command buffer. This ensures that the message | 223 // messages directed at the command buffer. This ensures that the message |
| 218 // handler can assume that the context is current (not necessary for | 224 // handler can assume that the context is current (not necessary for |
| 219 // RetireSyncPoint or WaitSyncPoint). | 225 // RetireSyncPoint or WaitSyncPoint). |
| 220 if (decoder_.get() && | 226 if (decoder_.get() && |
| 221 message.type() != GpuCommandBufferMsg_SetGetBuffer::ID && | 227 message.type() != GpuCommandBufferMsg_SetGetBuffer::ID && |
| 222 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID && | 228 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID && |
| (...skipping 16 matching lines...) Expand all Loading... |
| 239 OnReturnFrontBuffer); | 245 OnReturnFrontBuffer); |
| 240 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange, | 246 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange, |
| 241 OnWaitForTokenInRange); | 247 OnWaitForTokenInRange); |
| 242 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange, | 248 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange, |
| 243 OnWaitForGetOffsetInRange); | 249 OnWaitForGetOffsetInRange); |
| 244 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush); | 250 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush); |
| 245 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer, | 251 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer, |
| 246 OnRegisterTransferBuffer); | 252 OnRegisterTransferBuffer); |
| 247 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer, | 253 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer, |
| 248 OnDestroyTransferBuffer); | 254 OnDestroyTransferBuffer); |
| 249 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_WaitSyncToken, | 255 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_WaitSyncToken, OnWaitSyncToken) |
| 250 OnWaitSyncToken) | 256 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncToken, OnSignalSyncToken) |
| 251 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncToken, | 257 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery, OnSignalQuery) |
| 252 OnSignalSyncToken) | |
| 253 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery, | |
| 254 OnSignalQuery) | |
| 255 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage); | 258 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage); |
| 256 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage); | 259 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage); |
| 257 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture, | 260 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture, |
| 258 OnCreateStreamTexture) | 261 OnCreateStreamTexture) |
| 259 IPC_MESSAGE_UNHANDLED(handled = false) | 262 IPC_MESSAGE_UNHANDLED(handled = false) |
| 260 IPC_END_MESSAGE_MAP() | 263 IPC_END_MESSAGE_MAP() |
| 261 | 264 |
| 262 CheckCompleteWaits(); | 265 CheckCompleteWaits(); |
| 263 | 266 |
| 264 // Ensure that any delayed work that was created will be handled. | 267 // Ensure that any delayed work that was created will be handled. |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 373 } | 376 } |
| 374 | 377 |
| 375 ScheduleDelayedWork( | 378 ScheduleDelayedWork( |
| 376 base::TimeDelta::FromMilliseconds(kHandleMoreWorkPeriodBusyMs)); | 379 base::TimeDelta::FromMilliseconds(kHandleMoreWorkPeriodBusyMs)); |
| 377 } | 380 } |
| 378 | 381 |
| 379 bool GpuCommandBufferStub::HasUnprocessedCommands() { | 382 bool GpuCommandBufferStub::HasUnprocessedCommands() { |
| 380 if (command_buffer_) { | 383 if (command_buffer_) { |
| 381 CommandBuffer::State state = command_buffer_->GetLastState(); | 384 CommandBuffer::State state = command_buffer_->GetLastState(); |
| 382 return command_buffer_->GetPutOffset() != state.get_offset && | 385 return command_buffer_->GetPutOffset() != state.get_offset && |
| 383 !error::IsError(state.error); | 386 !error::IsError(state.error); |
| 384 } | 387 } |
| 385 return false; | 388 return false; |
| 386 } | 389 } |
| 387 | 390 |
| 388 void GpuCommandBufferStub::ScheduleDelayedWork(base::TimeDelta delay) { | 391 void GpuCommandBufferStub::ScheduleDelayedWork(base::TimeDelta delay) { |
| 389 bool has_more_work = executor_.get() && (executor_->HasPendingQueries() || | 392 bool has_more_work = executor_.get() && (executor_->HasPendingQueries() || |
| 390 executor_->HasMoreIdleWork() || | 393 executor_->HasMoreIdleWork() || |
| 391 executor_->HasPollingWork()); | 394 executor_->HasPollingWork()); |
| 392 if (!has_more_work) { | 395 if (!has_more_work) { |
| 393 last_idle_time_ = base::TimeTicks(); | 396 last_idle_time_ = base::TimeTicks(); |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 445 Send(wait_for_get_offset_->reply.release()); | 448 Send(wait_for_get_offset_->reply.release()); |
| 446 wait_for_get_offset_.reset(); | 449 wait_for_get_offset_.reset(); |
| 447 } | 450 } |
| 448 | 451 |
| 449 if (initialized_) { | 452 if (initialized_) { |
| 450 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); | 453 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); |
| 451 // If we are currently shutting down the GPU process to help with recovery | 454 // If we are currently shutting down the GPU process to help with recovery |
| 452 // (exit_on_context_lost workaround), then don't tell the browser about | 455 // (exit_on_context_lost workaround), then don't tell the browser about |
| 453 // offscreen context destruction here since it's not client-invoked, and | 456 // offscreen context destruction here since it's not client-invoked, and |
| 454 // might bypass the 3D API blocking logic. | 457 // might bypass the 3D API blocking logic. |
| 455 if ((surface_handle_ == gpu::kNullSurfaceHandle) && !active_url_.is_empty() | 458 if ((surface_handle_ == gpu::kNullSurfaceHandle) && |
| 456 && !gpu_channel_manager->is_exiting_for_lost_context()) { | 459 !active_url_.is_empty() && |
| 460 !gpu_channel_manager->is_exiting_for_lost_context()) { |
| 457 gpu_channel_manager->delegate()->DidDestroyOffscreenContext(active_url_); | 461 gpu_channel_manager->delegate()->DidDestroyOffscreenContext(active_url_); |
| 458 } | 462 } |
| 459 } | 463 } |
| 460 | 464 |
| 461 if (decoder_) | 465 if (decoder_) |
| 462 decoder_->set_engine(NULL); | 466 decoder_->set_engine(NULL); |
| 463 | 467 |
| 464 // The scheduler has raw references to the decoder and the command buffer so | 468 // The scheduler has raw references to the decoder and the command buffer so |
| 465 // destroy it before those. | 469 // destroy it before those. |
| 466 executor_.reset(); | 470 executor_.reset(); |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 535 gl::GLSurface::Format surface_format = gl::GLSurface::SURFACE_DEFAULT; | 539 gl::GLSurface::Format surface_format = gl::GLSurface::SURFACE_DEFAULT; |
| 536 bool offscreen = (surface_handle_ == kNullSurfaceHandle); | 540 bool offscreen = (surface_handle_ == kNullSurfaceHandle); |
| 537 gl::GLSurface* default_surface = manager->GetDefaultOffscreenSurface(); | 541 gl::GLSurface* default_surface = manager->GetDefaultOffscreenSurface(); |
| 538 if (!default_surface) { | 542 if (!default_surface) { |
| 539 DLOG(ERROR) << "Failed to create default offscreen surface."; | 543 DLOG(ERROR) << "Failed to create default offscreen surface."; |
| 540 return false; | 544 return false; |
| 541 } | 545 } |
| 542 #if defined(OS_ANDROID) | 546 #if defined(OS_ANDROID) |
| 543 if (init_params.attribs.red_size <= 5 && | 547 if (init_params.attribs.red_size <= 5 && |
| 544 init_params.attribs.green_size <= 6 && | 548 init_params.attribs.green_size <= 6 && |
| 545 init_params.attribs.blue_size <= 5 && | 549 init_params.attribs.blue_size <= 5 && init_params.attribs.alpha_size == 0) |
| 546 init_params.attribs.alpha_size == 0) | |
| 547 surface_format = gl::GLSurface::SURFACE_RGB565; | 550 surface_format = gl::GLSurface::SURFACE_RGB565; |
| 548 // We can only use virtualized contexts for onscreen command buffers if their | 551 // We can only use virtualized contexts for onscreen command buffers if their |
| 549 // config is compatible with the offscreen ones - otherwise MakeCurrent fails. | 552 // config is compatible with the offscreen ones - otherwise MakeCurrent fails. |
| 550 if (surface_format != default_surface->GetFormat() && !offscreen) | 553 if (surface_format != default_surface->GetFormat() && !offscreen) |
| 551 use_virtualized_gl_context_ = false; | 554 use_virtualized_gl_context_ = false; |
| 552 #endif | 555 #endif |
| 553 | 556 |
| 554 command_buffer_.reset(new CommandBufferService( | 557 command_buffer_.reset( |
| 555 context_group_->transfer_buffer_manager())); | 558 new CommandBufferService(context_group_->transfer_buffer_manager())); |
| 556 | 559 |
| 557 decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get())); | 560 decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get())); |
| 558 executor_.reset(new CommandExecutor(command_buffer_.get(), decoder_.get(), | 561 executor_.reset(new CommandExecutor(command_buffer_.get(), decoder_.get(), |
| 559 decoder_.get())); | 562 decoder_.get())); |
| 560 sync_point_client_ = channel_->sync_point_manager()->CreateSyncPointClient( | 563 sync_point_client_ = channel_->sync_point_manager()->CreateSyncPointClient( |
| 561 channel_->GetSyncPointOrderData(stream_id_), | 564 channel_->GetSyncPointOrderData(stream_id_), |
| 562 CommandBufferNamespace::GPU_IO, command_buffer_id_); | 565 CommandBufferNamespace::GPU_IO, command_buffer_id_); |
| 563 | 566 |
| 564 executor_->SetPreemptByFlag(channel_->preempted_flag()); | 567 executor_->SetPreemptionCallback(channel_->GetPreemptionCallback()); |
| 565 | 568 |
| 566 decoder_->set_engine(executor_.get()); | 569 decoder_->set_engine(executor_.get()); |
| 567 | 570 |
| 568 if (offscreen) { | 571 if (offscreen) { |
| 569 surface_ = default_surface; | 572 surface_ = default_surface; |
| 570 } else { | 573 } else { |
| 571 surface_ = ImageTransportSurface::CreateNativeSurface( | 574 surface_ = ImageTransportSurface::CreateNativeSurface( |
| 572 AsWeakPtr(), surface_handle_, surface_format); | 575 AsWeakPtr(), surface_handle_, surface_format); |
| 573 if (!surface_ || !surface_->Initialize(surface_format)) { | 576 if (!surface_ || !surface_->Initialize(surface_format)) { |
| 574 surface_ = nullptr; | 577 surface_ = nullptr; |
| (...skipping 18 matching lines...) Expand all Loading... |
| 593 // Ensure that context creation did not lose track of the intended | 596 // Ensure that context creation did not lose track of the intended |
| 594 // gl_share_group. | 597 // gl_share_group. |
| 595 DCHECK(context->share_group() == gl_share_group); | 598 DCHECK(context->share_group() == gl_share_group); |
| 596 gl_share_group->SetSharedContext(surface_.get(), context.get()); | 599 gl_share_group->SetSharedContext(surface_.get(), context.get()); |
| 597 } | 600 } |
| 598 // This should be either: | 601 // This should be either: |
| 599 // (1) a non-virtual GL context, or | 602 // (1) a non-virtual GL context, or |
| 600 // (2) a mock context. | 603 // (2) a mock context. |
| 601 DCHECK(context->GetHandle() || | 604 DCHECK(context->GetHandle() || |
| 602 gl::GetGLImplementation() == gl::kGLImplementationMockGL); | 605 gl::GetGLImplementation() == gl::kGLImplementationMockGL); |
| 603 context = new GLContextVirtual( | 606 context = new GLContextVirtual(gl_share_group, context.get(), |
| 604 gl_share_group, context.get(), decoder_->AsWeakPtr()); | 607 decoder_->AsWeakPtr()); |
| 605 if (!context->Initialize( | 608 if (!context->Initialize( |
| 606 surface_.get(), | 609 surface_.get(), |
| 607 GenerateGLContextAttribs(init_params.attribs, | 610 GenerateGLContextAttribs(init_params.attribs, |
| 608 context_group_->gpu_preferences()))) { | 611 context_group_->gpu_preferences()))) { |
| 609 // The real context created above for the default offscreen surface | 612 // The real context created above for the default offscreen surface |
| 610 // might not be compatible with this surface. | 613 // might not be compatible with this surface. |
| 611 context = NULL; | 614 context = NULL; |
| 612 DLOG(ERROR) << "Failed to initialize virtual GL context."; | 615 DLOG(ERROR) << "Failed to initialize virtual GL context."; |
| 613 return false; | 616 return false; |
| 614 } | 617 } |
| 615 } | 618 } |
| 616 if (!context.get()) { | 619 if (!context.get()) { |
| 617 context = gl::init::CreateGLContext( | 620 context = gl::init::CreateGLContext( |
| 618 gl_share_group, surface_.get(), | 621 gl_share_group, surface_.get(), |
| 619 GenerateGLContextAttribs(init_params.attribs, | 622 GenerateGLContextAttribs(init_params.attribs, |
| 620 context_group_->gpu_preferences())); | 623 context_group_->gpu_preferences())); |
| 621 } | 624 } |
| 622 if (!context.get()) { | 625 if (!context.get()) { |
| 623 DLOG(ERROR) << "Failed to create context."; | 626 DLOG(ERROR) << "Failed to create context."; |
| 624 return false; | 627 return false; |
| 625 } | 628 } |
| 626 | 629 |
| 627 if (!context->MakeCurrent(surface_.get())) { | 630 if (!context->MakeCurrent(surface_.get())) { |
| 628 LOG(ERROR) << "Failed to make context current."; | 631 LOG(ERROR) << "Failed to make context current."; |
| 629 return false; | 632 return false; |
| 630 } | 633 } |
| 631 | 634 |
| 632 if (!context->GetGLStateRestorer()) { | 635 if (!context->GetGLStateRestorer()) { |
| 633 context->SetGLStateRestorer( | 636 context->SetGLStateRestorer(new GLStateRestorerImpl(decoder_->AsWeakPtr())); |
| 634 new GLStateRestorerImpl(decoder_->AsWeakPtr())); | |
| 635 } | 637 } |
| 636 | 638 |
| 637 if (!context_group_->has_program_cache() && | 639 if (!context_group_->has_program_cache() && |
| 638 !context_group_->feature_info()->workarounds().disable_program_cache) { | 640 !context_group_->feature_info()->workarounds().disable_program_cache) { |
| 639 context_group_->set_program_cache(manager->program_cache()); | 641 context_group_->set_program_cache(manager->program_cache()); |
| 640 } | 642 } |
| 641 | 643 |
| 642 // Initialize the decoder with either the view or pbuffer GLContext. | 644 // Initialize the decoder with either the view or pbuffer GLContext. |
| 643 if (!decoder_->Initialize(surface_, context, offscreen, | 645 if (!decoder_->Initialize(surface_, context, offscreen, |
| 644 gpu::gles2::DisallowedFeatures(), | 646 gpu::gles2::DisallowedFeatures(), |
| 645 init_params.attribs)) { | 647 init_params.attribs)) { |
| 646 DLOG(ERROR) << "Failed to initialize decoder."; | 648 DLOG(ERROR) << "Failed to initialize decoder."; |
| 647 return false; | 649 return false; |
| 648 } | 650 } |
| 649 | 651 |
| 650 if (manager->gpu_preferences().enable_gpu_service_logging) { | 652 if (manager->gpu_preferences().enable_gpu_service_logging) { |
| 651 decoder_->set_log_commands(true); | 653 decoder_->set_log_commands(true); |
| 652 } | 654 } |
| 653 | 655 |
| 654 decoder_->GetLogger()->SetMsgCallback( | 656 decoder_->GetLogger()->SetMsgCallback(base::Bind( |
| 655 base::Bind(&GpuCommandBufferStub::SendConsoleMessage, | 657 &GpuCommandBufferStub::SendConsoleMessage, base::Unretained(this))); |
| 656 base::Unretained(this))); | 658 decoder_->SetShaderCacheCallback(base::Bind( |
| 657 decoder_->SetShaderCacheCallback( | 659 &GpuCommandBufferStub::SendCachedShader, base::Unretained(this))); |
| 658 base::Bind(&GpuCommandBufferStub::SendCachedShader, | |
| 659 base::Unretained(this))); | |
| 660 decoder_->SetFenceSyncReleaseCallback(base::Bind( | 660 decoder_->SetFenceSyncReleaseCallback(base::Bind( |
| 661 &GpuCommandBufferStub::OnFenceSyncRelease, base::Unretained(this))); | 661 &GpuCommandBufferStub::OnFenceSyncRelease, base::Unretained(this))); |
| 662 decoder_->SetWaitFenceSyncCallback(base::Bind( | 662 decoder_->SetWaitFenceSyncCallback(base::Bind( |
| 663 &GpuCommandBufferStub::OnWaitFenceSync, base::Unretained(this))); | 663 &GpuCommandBufferStub::OnWaitFenceSync, base::Unretained(this))); |
| 664 decoder_->SetDescheduleUntilFinishedCallback( | 664 decoder_->SetDescheduleUntilFinishedCallback( |
| 665 base::Bind(&GpuCommandBufferStub::OnDescheduleUntilFinished, | 665 base::Bind(&GpuCommandBufferStub::OnDescheduleUntilFinished, |
| 666 base::Unretained(this))); | 666 base::Unretained(this))); |
| 667 decoder_->SetRescheduleAfterFinishedCallback( | 667 decoder_->SetRescheduleAfterFinishedCallback( |
| 668 base::Bind(&GpuCommandBufferStub::OnRescheduleAfterFinished, | 668 base::Bind(&GpuCommandBufferStub::OnRescheduleAfterFinished, |
| 669 base::Unretained(this))); | 669 base::Unretained(this))); |
| (...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 774 } | 774 } |
| 775 wait_for_get_offset_ = | 775 wait_for_get_offset_ = |
| 776 base::MakeUnique<WaitForCommandState>(start, end, reply_message); | 776 base::MakeUnique<WaitForCommandState>(start, end, reply_message); |
| 777 CheckCompleteWaits(); | 777 CheckCompleteWaits(); |
| 778 } | 778 } |
| 779 | 779 |
| 780 void GpuCommandBufferStub::CheckCompleteWaits() { | 780 void GpuCommandBufferStub::CheckCompleteWaits() { |
| 781 if (wait_for_token_ || wait_for_get_offset_) { | 781 if (wait_for_token_ || wait_for_get_offset_) { |
| 782 CommandBuffer::State state = command_buffer_->GetLastState(); | 782 CommandBuffer::State state = command_buffer_->GetLastState(); |
| 783 if (wait_for_token_ && | 783 if (wait_for_token_ && |
| 784 (CommandBuffer::InRange( | 784 (CommandBuffer::InRange(wait_for_token_->start, wait_for_token_->end, |
| 785 wait_for_token_->start, wait_for_token_->end, state.token) || | 785 state.token) || |
| 786 state.error != error::kNoError)) { | 786 state.error != error::kNoError)) { |
| 787 ReportState(); | 787 ReportState(); |
| 788 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams( | 788 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams( |
| 789 wait_for_token_->reply.get(), state); | 789 wait_for_token_->reply.get(), state); |
| 790 Send(wait_for_token_->reply.release()); | 790 Send(wait_for_token_->reply.release()); |
| 791 wait_for_token_.reset(); | 791 wait_for_token_.reset(); |
| 792 } | 792 } |
| 793 if (wait_for_get_offset_ && | 793 if (wait_for_get_offset_ && |
| 794 (CommandBuffer::InRange(wait_for_get_offset_->start, | 794 (CommandBuffer::InRange(wait_for_get_offset_->start, |
| 795 wait_for_get_offset_->end, | 795 wait_for_get_offset_->end, state.get_offset) || |
| 796 state.get_offset) || | |
| 797 state.error != error::kNoError)) { | 796 state.error != error::kNoError)) { |
| 798 ReportState(); | 797 ReportState(); |
| 799 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams( | 798 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams( |
| 800 wait_for_get_offset_->reply.get(), state); | 799 wait_for_get_offset_->reply.get(), state); |
| 801 Send(wait_for_get_offset_->reply.release()); | 800 Send(wait_for_get_offset_->reply.release()); |
| 802 wait_for_get_offset_.reset(); | 801 wait_for_get_offset_.reset(); |
| 803 } | 802 } |
| 804 } | 803 } |
| 805 } | 804 } |
| 806 | 805 |
| 807 void GpuCommandBufferStub::OnAsyncFlush( | 806 void GpuCommandBufferStub::OnAsyncFlush( |
| 808 int32_t put_offset, | 807 int32_t put_offset, |
| 809 uint32_t flush_count, | 808 uint32_t flush_count, |
| 810 const std::vector<ui::LatencyInfo>& latency_info) { | 809 const std::vector<ui::LatencyInfo>& latency_info) { |
| 811 TRACE_EVENT1( | 810 TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", |
| 812 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset); | 811 put_offset); |
| 813 DCHECK(command_buffer_); | 812 DCHECK(command_buffer_); |
| 814 | 813 |
| 815 // We received this message out-of-order. This should not happen but is here | 814 // We received this message out-of-order. This should not happen but is here |
| 816 // to catch regressions. Ignore the message. | 815 // to catch regressions. Ignore the message. |
| 817 DVLOG_IF(0, flush_count - last_flush_count_ >= 0x8000000U) | 816 DVLOG_IF(0, flush_count - last_flush_count_ >= 0x8000000U) |
| 818 << "Received a Flush message out-of-order"; | 817 << "Received a Flush message out-of-order"; |
| 819 | 818 |
| 820 if (flush_count > last_flush_count_ && | 819 if (flush_count > last_flush_count_ && |
| 821 ui::LatencyInfo::Verify(latency_info, | 820 ui::LatencyInfo::Verify(latency_info, |
| 822 "GpuCommandBufferStub::OnAsyncFlush") && | 821 "GpuCommandBufferStub::OnAsyncFlush") && |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 864 | 863 |
| 865 if (command_buffer_) | 864 if (command_buffer_) |
| 866 command_buffer_->DestroyTransferBuffer(id); | 865 command_buffer_->DestroyTransferBuffer(id); |
| 867 } | 866 } |
| 868 | 867 |
| 869 void GpuCommandBufferStub::OnCommandProcessed() { | 868 void GpuCommandBufferStub::OnCommandProcessed() { |
| 870 DCHECK(channel_->watchdog()); | 869 DCHECK(channel_->watchdog()); |
| 871 channel_->watchdog()->CheckArmed(); | 870 channel_->watchdog()->CheckArmed(); |
| 872 } | 871 } |
| 873 | 872 |
| 874 void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); } | 873 void GpuCommandBufferStub::ReportState() { |
| 874 command_buffer_->UpdateState(); |
| 875 } |
| 875 | 876 |
| 876 void GpuCommandBufferStub::PutChanged() { | 877 void GpuCommandBufferStub::PutChanged() { |
| 877 FastSetActiveURL(active_url_, active_url_hash_, channel_); | 878 FastSetActiveURL(active_url_, active_url_hash_, channel_); |
| 878 executor_->PutChanged(); | 879 executor_->PutChanged(); |
| 879 } | 880 } |
| 880 | 881 |
| 881 void GpuCommandBufferStub::PullTextureUpdates( | 882 void GpuCommandBufferStub::PullTextureUpdates( |
| 882 CommandBufferNamespace namespace_id, | 883 CommandBufferNamespace namespace_id, |
| 883 CommandBufferId command_buffer_id, | 884 CommandBufferId command_buffer_id, |
| 884 uint32_t release) { | 885 uint32_t release) { |
| 885 gles2::MailboxManager* mailbox_manager = | 886 gles2::MailboxManager* mailbox_manager = context_group_->mailbox_manager(); |
| 886 context_group_->mailbox_manager(); | |
| 887 if (mailbox_manager->UsesSync() && MakeCurrent()) { | 887 if (mailbox_manager->UsesSync() && MakeCurrent()) { |
| 888 SyncToken sync_token(namespace_id, 0, command_buffer_id, release); | 888 SyncToken sync_token(namespace_id, 0, command_buffer_id, release); |
| 889 mailbox_manager->PullTextureUpdates(sync_token); | 889 mailbox_manager->PullTextureUpdates(sync_token); |
| 890 } | 890 } |
| 891 } | 891 } |
| 892 | 892 |
| 893 void GpuCommandBufferStub::OnWaitSyncToken(const SyncToken& sync_token) { | 893 void GpuCommandBufferStub::OnWaitSyncToken(const SyncToken& sync_token) { |
| 894 OnWaitFenceSync(sync_token.namespace_id(), sync_token.command_buffer_id(), | 894 OnWaitFenceSync(sync_token.namespace_id(), sync_token.command_buffer_id(), |
| 895 sync_token.release_count()); | 895 sync_token.release_count()); |
| 896 } | 896 } |
| 897 | 897 |
| 898 void GpuCommandBufferStub::OnSignalSyncToken(const SyncToken& sync_token, | 898 void GpuCommandBufferStub::OnSignalSyncToken(const SyncToken& sync_token, |
| 899 uint32_t id) { | 899 uint32_t id) { |
| 900 scoped_refptr<SyncPointClientState> release_state = | 900 scoped_refptr<SyncPointClientState> release_state = |
| 901 channel_->sync_point_manager()->GetSyncPointClientState( | 901 channel_->sync_point_manager()->GetSyncPointClientState( |
| 902 sync_token.namespace_id(), sync_token.command_buffer_id()); | 902 sync_token.namespace_id(), sync_token.command_buffer_id()); |
| 903 | 903 |
| 904 if (release_state) { | 904 if (release_state) { |
| 905 sync_point_client_->Wait(release_state.get(), sync_token.release_count(), | 905 sync_point_client_->Wait( |
| 906 base::Bind(&GpuCommandBufferStub::OnSignalAck, | 906 release_state.get(), sync_token.release_count(), |
| 907 this->AsWeakPtr(), id)); | 907 base::Bind(&GpuCommandBufferStub::OnSignalAck, this->AsWeakPtr(), id)); |
| 908 } else { | 908 } else { |
| 909 OnSignalAck(id); | 909 OnSignalAck(id); |
| 910 } | 910 } |
| 911 } | 911 } |
| 912 | 912 |
| 913 void GpuCommandBufferStub::OnSignalAck(uint32_t id) { | 913 void GpuCommandBufferStub::OnSignalAck(uint32_t id) { |
| 914 Send(new GpuCommandBufferMsg_SignalAck(route_id_, id)); | 914 Send(new GpuCommandBufferMsg_SignalAck(route_id_, id)); |
| 915 } | 915 } |
| 916 | 916 |
| 917 void GpuCommandBufferStub::OnSignalQuery(uint32_t query_id, uint32_t id) { | 917 void GpuCommandBufferStub::OnSignalQuery(uint32_t query_id, uint32_t id) { |
| 918 if (decoder_) { | 918 if (decoder_) { |
| 919 gles2::QueryManager* query_manager = decoder_->GetQueryManager(); | 919 gles2::QueryManager* query_manager = decoder_->GetQueryManager(); |
| 920 if (query_manager) { | 920 if (query_manager) { |
| 921 gles2::QueryManager::Query* query = | 921 gles2::QueryManager::Query* query = query_manager->GetQuery(query_id); |
| 922 query_manager->GetQuery(query_id); | |
| 923 if (query) { | 922 if (query) { |
| 924 query->AddCallback( | 923 query->AddCallback(base::Bind(&GpuCommandBufferStub::OnSignalAck, |
| 925 base::Bind(&GpuCommandBufferStub::OnSignalAck, | 924 this->AsWeakPtr(), id)); |
| 926 this->AsWeakPtr(), | |
| 927 id)); | |
| 928 return; | 925 return; |
| 929 } | 926 } |
| 930 } | 927 } |
| 931 } | 928 } |
| 932 // Something went wrong, run callback immediately. | 929 // Something went wrong, run callback immediately. |
| 933 OnSignalAck(id); | 930 OnSignalAck(id); |
| 934 } | 931 } |
| 935 | 932 |
| 936 void GpuCommandBufferStub::OnFenceSyncRelease(uint64_t release) { | 933 void GpuCommandBufferStub::OnFenceSyncRelease(uint64_t release) { |
| 937 if (sync_point_client_->client_state()->IsFenceSyncReleased(release)) { | 934 if (sync_point_client_->client_state()->IsFenceSyncReleased(release)) { |
| 938 DLOG(ERROR) << "Fence Sync has already been released."; | 935 DLOG(ERROR) << "Fence Sync has already been released."; |
| 939 return; | 936 return; |
| 940 } | 937 } |
| 941 | 938 |
| 942 gles2::MailboxManager* mailbox_manager = | 939 gles2::MailboxManager* mailbox_manager = context_group_->mailbox_manager(); |
| 943 context_group_->mailbox_manager(); | |
| 944 if (mailbox_manager->UsesSync() && MakeCurrent()) { | 940 if (mailbox_manager->UsesSync() && MakeCurrent()) { |
| 945 SyncToken sync_token(CommandBufferNamespace::GPU_IO, 0, | 941 SyncToken sync_token(CommandBufferNamespace::GPU_IO, 0, command_buffer_id_, |
| 946 command_buffer_id_, release); | 942 release); |
| 947 mailbox_manager->PushTextureUpdates(sync_token); | 943 mailbox_manager->PushTextureUpdates(sync_token); |
| 948 } | 944 } |
| 949 | 945 |
| 950 sync_point_client_->ReleaseFenceSync(release); | 946 sync_point_client_->ReleaseFenceSync(release); |
| 947 command_buffer_->SetReleaseCount(release); |
| 951 } | 948 } |
| 952 | 949 |
| 953 void GpuCommandBufferStub::OnDescheduleUntilFinished() { | 950 void GpuCommandBufferStub::OnDescheduleUntilFinished() { |
| 954 DCHECK(executor_->scheduled()); | 951 DCHECK(executor_->scheduled()); |
| 955 DCHECK(executor_->HasPollingWork()); | 952 DCHECK(executor_->HasPollingWork()); |
| 956 | |
| 957 executor_->SetScheduled(false); | 953 executor_->SetScheduled(false); |
| 958 channel_->OnStreamRescheduled(stream_id_, false); | 954 channel_->DescheduleCommandBuffer(this); |
| 959 } | 955 } |
| 960 | 956 |
| 961 void GpuCommandBufferStub::OnRescheduleAfterFinished() { | 957 void GpuCommandBufferStub::OnRescheduleAfterFinished() { |
| 962 DCHECK(!executor_->scheduled()); | 958 DCHECK(!executor_->scheduled()); |
| 963 | |
| 964 executor_->SetScheduled(true); | 959 executor_->SetScheduled(true); |
| 965 channel_->OnStreamRescheduled(stream_id_, true); | 960 channel_->ScheduleCommandBuffer(this); |
| 966 } | 961 } |
| 967 | 962 |
| 968 bool GpuCommandBufferStub::OnWaitFenceSync( | 963 bool GpuCommandBufferStub::OnWaitFenceSync(CommandBufferNamespace namespace_id, |
| 969 CommandBufferNamespace namespace_id, | 964 CommandBufferId command_buffer_id, |
| 970 CommandBufferId command_buffer_id, | 965 uint64_t release) { |
| 971 uint64_t release) { | |
| 972 DCHECK(!waiting_for_sync_point_); | 966 DCHECK(!waiting_for_sync_point_); |
| 973 DCHECK(executor_->scheduled()); | 967 DCHECK(executor_->scheduled()); |
| 974 | 968 |
| 975 scoped_refptr<SyncPointClientState> release_state = | 969 scoped_refptr<SyncPointClientState> release_state = |
| 976 channel_->sync_point_manager()->GetSyncPointClientState( | 970 channel_->sync_point_manager()->GetSyncPointClientState( |
| 977 namespace_id, command_buffer_id); | 971 namespace_id, command_buffer_id); |
| 978 | 972 |
| 979 if (!release_state) | 973 if (!release_state) |
| 980 return true; | 974 return true; |
| 981 | 975 |
| 982 if (release_state->IsFenceSyncReleased(release)) { | 976 if (release_state->IsFenceSyncReleased(release)) { |
| 983 PullTextureUpdates(namespace_id, command_buffer_id, release); | 977 PullTextureUpdates(namespace_id, command_buffer_id, release); |
| 984 return true; | 978 return true; |
| 985 } | 979 } |
| 986 | 980 |
| 987 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub", | 981 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub", |
| 988 this); | 982 this); |
| 989 waiting_for_sync_point_ = true; | 983 waiting_for_sync_point_ = true; |
| 990 sync_point_client_->WaitNonThreadSafe( | 984 sync_point_client_->WaitNonThreadSafe( |
| 991 release_state.get(), release, channel_->task_runner(), | 985 release_state.get(), release, channel_->task_runner(), |
| 992 base::Bind(&GpuCommandBufferStub::OnWaitFenceSyncCompleted, | 986 base::Bind(&GpuCommandBufferStub::OnWaitFenceSyncCompleted, |
| 993 this->AsWeakPtr(), namespace_id, command_buffer_id, release)); | 987 this->AsWeakPtr(), namespace_id, command_buffer_id, release)); |
| 994 | 988 |
| 995 if (!waiting_for_sync_point_) | 989 if (!waiting_for_sync_point_) |
| 996 return true; | 990 return true; |
| 997 | 991 |
| 998 executor_->SetScheduled(false); | 992 executor_->SetScheduled(false); |
| 999 channel_->OnStreamRescheduled(stream_id_, false); | 993 channel_->DescheduleCommandBuffer(this); |
| 1000 return false; | 994 return false; |
| 1001 } | 995 } |
| 1002 | 996 |
| 1003 void GpuCommandBufferStub::OnWaitFenceSyncCompleted( | 997 void GpuCommandBufferStub::OnWaitFenceSyncCompleted( |
| 1004 CommandBufferNamespace namespace_id, | 998 CommandBufferNamespace namespace_id, |
| 1005 CommandBufferId command_buffer_id, | 999 CommandBufferId command_buffer_id, |
| 1006 uint64_t release) { | 1000 uint64_t release) { |
| 1007 DCHECK(waiting_for_sync_point_); | 1001 DCHECK(waiting_for_sync_point_); |
| 1008 TRACE_EVENT_ASYNC_END1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub", | 1002 TRACE_EVENT_ASYNC_END1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub", |
| 1009 this); | 1003 this); |
| 1010 PullTextureUpdates(namespace_id, command_buffer_id, release); | 1004 PullTextureUpdates(namespace_id, command_buffer_id, release); |
| 1011 waiting_for_sync_point_ = false; | 1005 waiting_for_sync_point_ = false; |
| 1012 executor_->SetScheduled(true); | 1006 executor_->SetScheduled(true); |
| 1013 channel_->OnStreamRescheduled(stream_id_, true); | 1007 channel_->ScheduleCommandBuffer(this); |
| 1014 } | 1008 } |
| 1015 | 1009 |
| 1016 void GpuCommandBufferStub::OnCreateImage( | 1010 void GpuCommandBufferStub::OnCreateImage( |
| 1017 const GpuCommandBufferMsg_CreateImage_Params& params) { | 1011 const GpuCommandBufferMsg_CreateImage_Params& params) { |
| 1018 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage"); | 1012 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage"); |
| 1019 const int32_t id = params.id; | 1013 const int32_t id = params.id; |
| 1020 const gfx::GpuMemoryBufferHandle& handle = params.gpu_memory_buffer; | 1014 const gfx::GpuMemoryBufferHandle& handle = params.gpu_memory_buffer; |
| 1021 const gfx::Size& size = params.size; | 1015 const gfx::Size& size = params.size; |
| 1022 const gfx::BufferFormat& format = params.format; | 1016 const gfx::BufferFormat& format = params.format; |
| 1023 const uint32_t internalformat = params.internal_format; | 1017 const uint32_t internalformat = params.internal_format; |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1079 } | 1073 } |
| 1080 | 1074 |
| 1081 image_manager->RemoveImage(id); | 1075 image_manager->RemoveImage(id); |
| 1082 } | 1076 } |
| 1083 | 1077 |
| 1084 void GpuCommandBufferStub::SendConsoleMessage(int32_t id, | 1078 void GpuCommandBufferStub::SendConsoleMessage(int32_t id, |
| 1085 const std::string& message) { | 1079 const std::string& message) { |
| 1086 GPUCommandBufferConsoleMessage console_message; | 1080 GPUCommandBufferConsoleMessage console_message; |
| 1087 console_message.id = id; | 1081 console_message.id = id; |
| 1088 console_message.message = message; | 1082 console_message.message = message; |
| 1089 IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg( | 1083 IPC::Message* msg = |
| 1090 route_id_, console_message); | 1084 new GpuCommandBufferMsg_ConsoleMsg(route_id_, console_message); |
| 1091 msg->set_unblock(true); | 1085 msg->set_unblock(true); |
| 1092 Send(msg); | 1086 Send(msg); |
| 1093 } | 1087 } |
| 1094 | 1088 |
| 1095 void GpuCommandBufferStub::SendCachedShader( | 1089 void GpuCommandBufferStub::SendCachedShader(const std::string& key, |
| 1096 const std::string& key, const std::string& shader) { | 1090 const std::string& shader) { |
| 1097 channel_->CacheShader(key, shader); | 1091 channel_->CacheShader(key, shader); |
| 1098 } | 1092 } |
| 1099 | 1093 |
| 1100 void GpuCommandBufferStub::AddDestructionObserver( | 1094 void GpuCommandBufferStub::AddDestructionObserver( |
| 1101 DestructionObserver* observer) { | 1095 DestructionObserver* observer) { |
| 1102 destruction_observers_.AddObserver(observer); | 1096 destruction_observers_.AddObserver(observer); |
| 1103 } | 1097 } |
| 1104 | 1098 |
| 1105 void GpuCommandBufferStub::RemoveDestructionObserver( | 1099 void GpuCommandBufferStub::RemoveDestructionObserver( |
| 1106 DestructionObserver* observer) { | 1100 DestructionObserver* observer) { |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1144 command_buffer_->GetLastState().error == error::kLostContext) | 1138 command_buffer_->GetLastState().error == error::kLostContext) |
| 1145 return; | 1139 return; |
| 1146 | 1140 |
| 1147 command_buffer_->SetContextLostReason(error::kUnknown); | 1141 command_buffer_->SetContextLostReason(error::kUnknown); |
| 1148 if (decoder_) | 1142 if (decoder_) |
| 1149 decoder_->MarkContextLost(error::kUnknown); | 1143 decoder_->MarkContextLost(error::kUnknown); |
| 1150 command_buffer_->SetParseError(error::kLostContext); | 1144 command_buffer_->SetParseError(error::kLostContext); |
| 1151 } | 1145 } |
| 1152 | 1146 |
| 1153 } // namespace gpu | 1147 } // namespace gpu |
| OLD | NEW |