| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "gpu/ipc/service/gpu_command_buffer_stub.h" | |
| 6 | |
| 7 #include <utility> | |
| 8 | |
| 9 #include "base/bind.h" | |
| 10 #include "base/bind_helpers.h" | |
| 11 #include "base/hash.h" | |
| 12 #include "base/json/json_writer.h" | |
| 13 #include "base/macros.h" | |
| 14 #include "base/memory/shared_memory.h" | |
| 15 #include "base/time/time.h" | |
| 16 #include "base/trace_event/trace_event.h" | |
| 17 #include "build/build_config.h" | |
| 18 #include "gpu/command_buffer/common/constants.h" | |
| 19 #include "gpu/command_buffer/common/mailbox.h" | |
| 20 #include "gpu/command_buffer/common/sync_token.h" | |
| 21 #include "gpu/command_buffer/service/gl_context_virtual.h" | |
| 22 #include "gpu/command_buffer/service/gl_state_restorer_impl.h" | |
| 23 #include "gpu/command_buffer/service/image_factory.h" | |
| 24 #include "gpu/command_buffer/service/image_manager.h" | |
| 25 #include "gpu/command_buffer/service/logger.h" | |
| 26 #include "gpu/command_buffer/service/mailbox_manager.h" | |
| 27 #include "gpu/command_buffer/service/memory_tracking.h" | |
| 28 #include "gpu/command_buffer/service/query_manager.h" | |
| 29 #include "gpu/command_buffer/service/sync_point_manager.h" | |
| 30 #include "gpu/command_buffer/service/transfer_buffer_manager.h" | |
| 31 #include "gpu/command_buffer/service/valuebuffer_manager.h" | |
| 32 #include "gpu/ipc/common/gpu_messages.h" | |
| 33 #include "gpu/ipc/service/gpu_channel.h" | |
| 34 #include "gpu/ipc/service/gpu_channel_manager.h" | |
| 35 #include "gpu/ipc/service/gpu_channel_manager_delegate.h" | |
| 36 #include "gpu/ipc/service/gpu_memory_manager.h" | |
| 37 #include "gpu/ipc/service/gpu_memory_tracking.h" | |
| 38 #include "gpu/ipc/service/gpu_watchdog.h" | |
| 39 #include "gpu/ipc/service/image_transport_surface.h" | |
| 40 #include "ui/gl/gl_bindings.h" | |
| 41 #include "ui/gl/gl_image.h" | |
| 42 #include "ui/gl/gl_switches.h" | |
| 43 | |
| 44 #if defined(OS_WIN) | |
| 45 #include "base/win/win_util.h" | |
| 46 #endif | |
| 47 | |
| 48 #if defined(OS_ANDROID) | |
| 49 #include "gpu/ipc/service/stream_texture_android.h" | |
| 50 #endif | |
| 51 | |
| 52 namespace gpu { | |
| 53 struct WaitForCommandState { | |
| 54 WaitForCommandState(int32_t start, int32_t end, IPC::Message* reply) | |
| 55 : start(start), end(end), reply(reply) {} | |
| 56 | |
| 57 int32_t start; | |
| 58 int32_t end; | |
| 59 scoped_ptr<IPC::Message> reply; | |
| 60 }; | |
| 61 | |
| 62 namespace { | |
| 63 | |
| 64 // The GpuCommandBufferMemoryTracker class provides a bridge between the | |
| 65 // ContextGroup's memory type managers and the GpuMemoryManager class. | |
| 66 class GpuCommandBufferMemoryTracker : public gles2::MemoryTracker { | |
| 67 public: | |
| 68 explicit GpuCommandBufferMemoryTracker(GpuChannel* channel, | |
| 69 uint64_t share_group_tracing_guid) | |
| 70 : tracking_group_( | |
| 71 channel->gpu_channel_manager() | |
| 72 ->gpu_memory_manager() | |
| 73 ->CreateTrackingGroup(channel->GetClientPID(), this)), | |
| 74 client_tracing_id_(channel->client_tracing_id()), | |
| 75 client_id_(channel->client_id()), | |
| 76 share_group_tracing_guid_(share_group_tracing_guid) {} | |
| 77 | |
| 78 void TrackMemoryAllocatedChange( | |
| 79 size_t old_size, size_t new_size) override { | |
| 80 tracking_group_->TrackMemoryAllocatedChange( | |
| 81 old_size, new_size); | |
| 82 } | |
| 83 | |
| 84 bool EnsureGPUMemoryAvailable(size_t size_needed) override { | |
| 85 return tracking_group_->EnsureGPUMemoryAvailable(size_needed); | |
| 86 }; | |
| 87 | |
| 88 uint64_t ClientTracingId() const override { return client_tracing_id_; } | |
| 89 int ClientId() const override { return client_id_; } | |
| 90 uint64_t ShareGroupTracingGUID() const override { | |
| 91 return share_group_tracing_guid_; | |
| 92 } | |
| 93 | |
| 94 private: | |
| 95 ~GpuCommandBufferMemoryTracker() override {} | |
| 96 scoped_ptr<GpuMemoryTrackingGroup> tracking_group_; | |
| 97 const uint64_t client_tracing_id_; | |
| 98 const int client_id_; | |
| 99 const uint64_t share_group_tracing_guid_; | |
| 100 | |
| 101 DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker); | |
| 102 }; | |
| 103 | |
| 104 // FastSetActiveURL will shortcut the expensive call to SetActiveURL when the | |
| 105 // url_hash matches. | |
| 106 void FastSetActiveURL(const GURL& url, size_t url_hash, GpuChannel* channel) { | |
| 107 // Leave the previously set URL in the empty case -- empty URLs are given by | |
| 108 // BlinkPlatformImpl::createOffscreenGraphicsContext3DProvider. Hopefully the | |
| 109 // onscreen context URL was set previously and will show up even when a crash | |
| 110 // occurs during offscreen command processing. | |
| 111 if (url.is_empty()) | |
| 112 return; | |
| 113 static size_t g_last_url_hash = 0; | |
| 114 if (url_hash != g_last_url_hash) { | |
| 115 g_last_url_hash = url_hash; | |
| 116 DCHECK(channel && channel->gpu_channel_manager() && | |
| 117 channel->gpu_channel_manager()->delegate()); | |
| 118 channel->gpu_channel_manager()->delegate()->SetActiveURL(url); | |
| 119 } | |
| 120 } | |
| 121 | |
| 122 // The first time polling a fence, delay some extra time to allow other | |
| 123 // stubs to process some work, or else the timing of the fences could | |
| 124 // allow a pattern of alternating fast and slow frames to occur. | |
| 125 const int64_t kHandleMoreWorkPeriodMs = 2; | |
| 126 const int64_t kHandleMoreWorkPeriodBusyMs = 1; | |
| 127 | |
| 128 // Prevents idle work from being starved. | |
| 129 const int64_t kMaxTimeSinceIdleMs = 10; | |
| 130 | |
| 131 class DevToolsChannelData : public base::trace_event::ConvertableToTraceFormat { | |
| 132 public: | |
| 133 static scoped_ptr<base::trace_event::ConvertableToTraceFormat> | |
| 134 CreateForChannel(GpuChannel* channel); | |
| 135 ~DevToolsChannelData() override {} | |
| 136 | |
| 137 void AppendAsTraceFormat(std::string* out) const override { | |
| 138 std::string tmp; | |
| 139 base::JSONWriter::Write(*value_, &tmp); | |
| 140 *out += tmp; | |
| 141 } | |
| 142 | |
| 143 private: | |
| 144 explicit DevToolsChannelData(base::Value* value) : value_(value) {} | |
| 145 scoped_ptr<base::Value> value_; | |
| 146 DISALLOW_COPY_AND_ASSIGN(DevToolsChannelData); | |
| 147 }; | |
| 148 | |
| 149 scoped_ptr<base::trace_event::ConvertableToTraceFormat> | |
| 150 DevToolsChannelData::CreateForChannel(GpuChannel* channel) { | |
| 151 scoped_ptr<base::DictionaryValue> res(new base::DictionaryValue); | |
| 152 res->SetInteger("renderer_pid", channel->GetClientPID()); | |
| 153 res->SetDouble("used_bytes", channel->GetMemoryUsage()); | |
| 154 return make_scoped_ptr(new DevToolsChannelData(res.release())); | |
| 155 } | |
| 156 | |
| 157 CommandBufferId GetCommandBufferID(int channel_id, int32_t route_id) { | |
| 158 return CommandBufferId::FromUnsafeValue( | |
| 159 (static_cast<uint64_t>(channel_id) << 32) | route_id); | |
| 160 } | |
| 161 | |
| 162 } // namespace | |
| 163 | |
| 164 GpuCommandBufferStub::GpuCommandBufferStub( | |
| 165 GpuChannel* channel, | |
| 166 SyncPointManager* sync_point_manager, | |
| 167 base::SingleThreadTaskRunner* task_runner, | |
| 168 GpuCommandBufferStub* share_group, | |
| 169 SurfaceHandle surface_handle, | |
| 170 gles2::MailboxManager* mailbox_manager, | |
| 171 PreemptionFlag* preempt_by_flag, | |
| 172 gles2::SubscriptionRefSet* subscription_ref_set, | |
| 173 ValueStateMap* pending_valuebuffer_state, | |
| 174 const gfx::Size& size, | |
| 175 const gles2::DisallowedFeatures& disallowed_features, | |
| 176 const std::vector<int32_t>& attribs, | |
| 177 gfx::GpuPreference gpu_preference, | |
| 178 int32_t stream_id, | |
| 179 int32_t route_id, | |
| 180 GpuWatchdog* watchdog, | |
| 181 const GURL& active_url) | |
| 182 : channel_(channel), | |
| 183 sync_point_manager_(sync_point_manager), | |
| 184 task_runner_(task_runner), | |
| 185 initialized_(false), | |
| 186 surface_handle_(surface_handle), | |
| 187 initial_size_(size), | |
| 188 disallowed_features_(disallowed_features), | |
| 189 requested_attribs_(attribs), | |
| 190 gpu_preference_(gpu_preference), | |
| 191 use_virtualized_gl_context_(false), | |
| 192 command_buffer_id_(GetCommandBufferID(channel->client_id(), route_id)), | |
| 193 stream_id_(stream_id), | |
| 194 route_id_(route_id), | |
| 195 last_flush_count_(0), | |
| 196 surface_format_(gfx::GLSurface::SURFACE_DEFAULT), | |
| 197 watchdog_(watchdog), | |
| 198 waiting_for_sync_point_(false), | |
| 199 previous_processed_num_(0), | |
| 200 preemption_flag_(preempt_by_flag), | |
| 201 active_url_(active_url) { | |
| 202 active_url_hash_ = base::Hash(active_url.possibly_invalid_spec()); | |
| 203 FastSetActiveURL(active_url_, active_url_hash_, channel_); | |
| 204 | |
| 205 gles2::ContextCreationAttribHelper attrib_parser; | |
| 206 attrib_parser.Parse(requested_attribs_); | |
| 207 | |
| 208 if (share_group) { | |
| 209 context_group_ = share_group->context_group_; | |
| 210 DCHECK(context_group_->bind_generates_resource() == | |
| 211 attrib_parser.bind_generates_resource); | |
| 212 } else { | |
| 213 context_group_ = new gles2::ContextGroup( | |
| 214 channel_->gpu_channel_manager()->gpu_preferences(), mailbox_manager, | |
| 215 new GpuCommandBufferMemoryTracker(channel, | |
| 216 command_buffer_id_.GetUnsafeValue()), | |
| 217 channel_->gpu_channel_manager()->shader_translator_cache(), | |
| 218 channel_->gpu_channel_manager()->framebuffer_completeness_cache(), NULL, | |
| 219 subscription_ref_set, pending_valuebuffer_state, | |
| 220 attrib_parser.bind_generates_resource); | |
| 221 } | |
| 222 | |
| 223 // Virtualize PreferIntegratedGpu contexts by default on OS X to prevent | |
| 224 // performance regressions when enabling FCM. | |
| 225 // http://crbug.com/180463 | |
| 226 #if defined(OS_MACOSX) | |
| 227 if (gpu_preference_ == gfx::PreferIntegratedGpu) | |
| 228 use_virtualized_gl_context_ = true; | |
| 229 #endif | |
| 230 | |
| 231 use_virtualized_gl_context_ |= | |
| 232 context_group_->feature_info()->workarounds().use_virtualized_gl_contexts; | |
| 233 | |
| 234 // MailboxManagerSync synchronization correctness currently depends on having | |
| 235 // only a single context. See crbug.com/510243 for details. | |
| 236 use_virtualized_gl_context_ |= mailbox_manager->UsesSync(); | |
| 237 | |
| 238 #if defined(OS_ANDROID) | |
| 239 if (attrib_parser.red_size <= 5 && | |
| 240 attrib_parser.green_size <= 6 && | |
| 241 attrib_parser.blue_size <= 5 && | |
| 242 attrib_parser.alpha_size == 0) | |
| 243 surface_format_ = gfx::GLSurface::SURFACE_RGB565; | |
| 244 gfx::GLSurface* defaultOffscreenSurface = | |
| 245 channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(); | |
| 246 bool is_onscreen = (surface_handle_ != kNullSurfaceHandle); | |
| 247 if (surface_format_ != defaultOffscreenSurface->GetFormat() && is_onscreen) | |
| 248 use_virtualized_gl_context_ = false; | |
| 249 #endif | |
| 250 | |
| 251 if ((surface_handle_ == kNullSurfaceHandle) && initial_size_.IsEmpty()) { | |
| 252 // If we're an offscreen surface with zero width and/or height, set to a | |
| 253 // non-zero size so that we have a complete framebuffer for operations like | |
| 254 // glClear. | |
| 255 initial_size_ = gfx::Size(1, 1); | |
| 256 } | |
| 257 } | |
| 258 | |
| 259 GpuCommandBufferStub::~GpuCommandBufferStub() { | |
| 260 Destroy(); | |
| 261 } | |
| 262 | |
| 263 GpuMemoryManager* GpuCommandBufferStub::GetMemoryManager() const { | |
| 264 return channel()->gpu_channel_manager()->gpu_memory_manager(); | |
| 265 } | |
| 266 | |
| 267 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { | |
| 268 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), | |
| 269 "GPUTask", | |
| 270 "data", | |
| 271 DevToolsChannelData::CreateForChannel(channel())); | |
| 272 FastSetActiveURL(active_url_, active_url_hash_, channel_); | |
| 273 | |
| 274 bool have_context = false; | |
| 275 // Ensure the appropriate GL context is current before handling any IPC | |
| 276 // messages directed at the command buffer. This ensures that the message | |
| 277 // handler can assume that the context is current (not necessary for | |
| 278 // RetireSyncPoint or WaitSyncPoint). | |
| 279 if (decoder_.get() && | |
| 280 message.type() != GpuCommandBufferMsg_SetGetBuffer::ID && | |
| 281 message.type() != GpuCommandBufferMsg_WaitForTokenInRange::ID && | |
| 282 message.type() != GpuCommandBufferMsg_WaitForGetOffsetInRange::ID && | |
| 283 message.type() != GpuCommandBufferMsg_RegisterTransferBuffer::ID && | |
| 284 message.type() != GpuCommandBufferMsg_DestroyTransferBuffer::ID) { | |
| 285 if (!MakeCurrent()) | |
| 286 return false; | |
| 287 have_context = true; | |
| 288 } | |
| 289 | |
| 290 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers | |
| 291 // here. This is so the reply can be delayed if the scheduler is unscheduled. | |
| 292 bool handled = true; | |
| 293 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message) | |
| 294 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize, | |
| 295 OnInitialize); | |
| 296 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_SetGetBuffer, | |
| 297 OnSetGetBuffer); | |
| 298 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ProduceFrontBuffer, | |
| 299 OnProduceFrontBuffer); | |
| 300 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForTokenInRange, | |
| 301 OnWaitForTokenInRange); | |
| 302 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_WaitForGetOffsetInRange, | |
| 303 OnWaitForGetOffsetInRange); | |
| 304 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush); | |
| 305 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_RegisterTransferBuffer, | |
| 306 OnRegisterTransferBuffer); | |
| 307 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyTransferBuffer, | |
| 308 OnDestroyTransferBuffer); | |
| 309 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalSyncToken, | |
| 310 OnSignalSyncToken) | |
| 311 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SignalQuery, | |
| 312 OnSignalQuery) | |
| 313 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateImage, OnCreateImage); | |
| 314 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_DestroyImage, OnDestroyImage); | |
| 315 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_CreateStreamTexture, | |
| 316 OnCreateStreamTexture) | |
| 317 IPC_MESSAGE_UNHANDLED(handled = false) | |
| 318 IPC_END_MESSAGE_MAP() | |
| 319 | |
| 320 CheckCompleteWaits(); | |
| 321 | |
| 322 // Ensure that any delayed work that was created will be handled. | |
| 323 if (have_context) { | |
| 324 if (executor_) | |
| 325 executor_->ProcessPendingQueries(); | |
| 326 ScheduleDelayedWork( | |
| 327 base::TimeDelta::FromMilliseconds(kHandleMoreWorkPeriodMs)); | |
| 328 } | |
| 329 | |
| 330 return handled; | |
| 331 } | |
| 332 | |
| 333 bool GpuCommandBufferStub::Send(IPC::Message* message) { | |
| 334 return channel_->Send(message); | |
| 335 } | |
| 336 | |
| 337 bool GpuCommandBufferStub::IsScheduled() { | |
| 338 return (!executor_.get() || executor_->scheduled()); | |
| 339 } | |
| 340 | |
| 341 void GpuCommandBufferStub::PollWork() { | |
| 342 // Post another delayed task if we have not yet reached the time at which | |
| 343 // we should process delayed work. | |
| 344 base::TimeTicks current_time = base::TimeTicks::Now(); | |
| 345 DCHECK(!process_delayed_work_time_.is_null()); | |
| 346 if (process_delayed_work_time_ > current_time) { | |
| 347 task_runner_->PostDelayedTask( | |
| 348 FROM_HERE, base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()), | |
| 349 process_delayed_work_time_ - current_time); | |
| 350 return; | |
| 351 } | |
| 352 process_delayed_work_time_ = base::TimeTicks(); | |
| 353 | |
| 354 PerformWork(); | |
| 355 } | |
| 356 | |
| 357 void GpuCommandBufferStub::PerformWork() { | |
| 358 TRACE_EVENT0("gpu", "GpuCommandBufferStub::PerformWork"); | |
| 359 | |
| 360 FastSetActiveURL(active_url_, active_url_hash_, channel_); | |
| 361 if (decoder_.get() && !MakeCurrent()) | |
| 362 return; | |
| 363 | |
| 364 if (executor_) { | |
| 365 uint32_t current_unprocessed_num = | |
| 366 channel()->gpu_channel_manager()->GetUnprocessedOrderNum(); | |
| 367 // We're idle when no messages were processed or scheduled. | |
| 368 bool is_idle = (previous_processed_num_ == current_unprocessed_num); | |
| 369 if (!is_idle && !last_idle_time_.is_null()) { | |
| 370 base::TimeDelta time_since_idle = | |
| 371 base::TimeTicks::Now() - last_idle_time_; | |
| 372 base::TimeDelta max_time_since_idle = | |
| 373 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs); | |
| 374 | |
| 375 // Force idle when it's been too long since last time we were idle. | |
| 376 if (time_since_idle > max_time_since_idle) | |
| 377 is_idle = true; | |
| 378 } | |
| 379 | |
| 380 if (is_idle) { | |
| 381 last_idle_time_ = base::TimeTicks::Now(); | |
| 382 executor_->PerformIdleWork(); | |
| 383 } | |
| 384 | |
| 385 executor_->ProcessPendingQueries(); | |
| 386 } | |
| 387 | |
| 388 ScheduleDelayedWork( | |
| 389 base::TimeDelta::FromMilliseconds(kHandleMoreWorkPeriodBusyMs)); | |
| 390 } | |
| 391 | |
| 392 bool GpuCommandBufferStub::HasUnprocessedCommands() { | |
| 393 if (command_buffer_) { | |
| 394 CommandBuffer::State state = command_buffer_->GetLastState(); | |
| 395 return command_buffer_->GetPutOffset() != state.get_offset && | |
| 396 !error::IsError(state.error); | |
| 397 } | |
| 398 return false; | |
| 399 } | |
| 400 | |
| 401 void GpuCommandBufferStub::ScheduleDelayedWork(base::TimeDelta delay) { | |
| 402 bool has_more_work = executor_.get() && (executor_->HasPendingQueries() || | |
| 403 executor_->HasMoreIdleWork()); | |
| 404 if (!has_more_work) { | |
| 405 last_idle_time_ = base::TimeTicks(); | |
| 406 return; | |
| 407 } | |
| 408 | |
| 409 base::TimeTicks current_time = base::TimeTicks::Now(); | |
| 410 // |process_delayed_work_time_| is set if processing of delayed work is | |
| 411 // already scheduled. Just update the time if already scheduled. | |
| 412 if (!process_delayed_work_time_.is_null()) { | |
| 413 process_delayed_work_time_ = current_time + delay; | |
| 414 return; | |
| 415 } | |
| 416 | |
| 417 // Idle when no messages are processed between now and when | |
| 418 // PollWork is called. | |
| 419 previous_processed_num_ = | |
| 420 channel()->gpu_channel_manager()->GetProcessedOrderNum(); | |
| 421 if (last_idle_time_.is_null()) | |
| 422 last_idle_time_ = current_time; | |
| 423 | |
| 424 // IsScheduled() returns true after passing all unschedule fences | |
| 425 // and this is when we can start performing idle work. Idle work | |
| 426 // is done synchronously so we can set delay to 0 and instead poll | |
| 427 // for more work at the rate idle work is performed. This also ensures | |
| 428 // that idle work is done as efficiently as possible without any | |
| 429 // unnecessary delays. | |
| 430 if (executor_.get() && executor_->scheduled() && | |
| 431 executor_->HasMoreIdleWork()) { | |
| 432 delay = base::TimeDelta(); | |
| 433 } | |
| 434 | |
| 435 process_delayed_work_time_ = current_time + delay; | |
| 436 task_runner_->PostDelayedTask( | |
| 437 FROM_HERE, base::Bind(&GpuCommandBufferStub::PollWork, AsWeakPtr()), | |
| 438 delay); | |
| 439 } | |
| 440 | |
| 441 bool GpuCommandBufferStub::MakeCurrent() { | |
| 442 if (decoder_->MakeCurrent()) | |
| 443 return true; | |
| 444 DLOG(ERROR) << "Context lost because MakeCurrent failed."; | |
| 445 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); | |
| 446 command_buffer_->SetParseError(error::kLostContext); | |
| 447 CheckContextLost(); | |
| 448 return false; | |
| 449 } | |
| 450 | |
| 451 void GpuCommandBufferStub::Destroy() { | |
| 452 if (wait_for_token_) { | |
| 453 Send(wait_for_token_->reply.release()); | |
| 454 wait_for_token_.reset(); | |
| 455 } | |
| 456 if (wait_for_get_offset_) { | |
| 457 Send(wait_for_get_offset_->reply.release()); | |
| 458 wait_for_get_offset_.reset(); | |
| 459 } | |
| 460 | |
| 461 if (initialized_) { | |
| 462 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); | |
| 463 if ((surface_handle_ == kNullSurfaceHandle) && !active_url_.is_empty()) | |
| 464 gpu_channel_manager->delegate()->DidDestroyOffscreenContext(active_url_); | |
| 465 } | |
| 466 | |
| 467 if (decoder_) | |
| 468 decoder_->set_engine(NULL); | |
| 469 | |
| 470 // The scheduler has raw references to the decoder and the command buffer so | |
| 471 // destroy it before those. | |
| 472 executor_.reset(); | |
| 473 | |
| 474 sync_point_client_.reset(); | |
| 475 | |
| 476 bool have_context = false; | |
| 477 if (decoder_ && decoder_->GetGLContext()) { | |
| 478 // Try to make the context current regardless of whether it was lost, so we | |
| 479 // don't leak resources. | |
| 480 have_context = decoder_->GetGLContext()->MakeCurrent(surface_.get()); | |
| 481 } | |
| 482 FOR_EACH_OBSERVER(DestructionObserver, | |
| 483 destruction_observers_, | |
| 484 OnWillDestroyStub()); | |
| 485 | |
| 486 if (decoder_) { | |
| 487 decoder_->Destroy(have_context); | |
| 488 decoder_.reset(); | |
| 489 } | |
| 490 | |
| 491 command_buffer_.reset(); | |
| 492 | |
| 493 // Remove this after crbug.com/248395 is sorted out. | |
| 494 surface_ = NULL; | |
| 495 } | |
| 496 | |
| 497 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) { | |
| 498 Destroy(); | |
| 499 GpuCommandBufferMsg_Initialize::WriteReplyParams( | |
| 500 reply_message, false, Capabilities()); | |
| 501 Send(reply_message); | |
| 502 } | |
| 503 | |
| 504 scoped_refptr<gfx::GLSurface> GpuCommandBufferStub::CreateSurface() { | |
| 505 GpuChannelManager* manager = channel_->gpu_channel_manager(); | |
| 506 scoped_refptr<gfx::GLSurface> surface; | |
| 507 if (surface_handle_ != kNullSurfaceHandle) { | |
| 508 surface = ImageTransportSurface::CreateNativeSurface( | |
| 509 manager, this, surface_handle_, surface_format_); | |
| 510 if (!surface || !surface->Initialize(surface_format_)) | |
| 511 return nullptr; | |
| 512 } else { | |
| 513 surface = manager->GetDefaultOffscreenSurface(); | |
| 514 } | |
| 515 return surface; | |
| 516 } | |
| 517 | |
| 518 void GpuCommandBufferStub::OnInitialize( | |
| 519 base::SharedMemoryHandle shared_state_handle, | |
| 520 IPC::Message* reply_message) { | |
| 521 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnInitialize"); | |
| 522 DCHECK(!command_buffer_.get()); | |
| 523 | |
| 524 scoped_ptr<base::SharedMemory> shared_state_shm( | |
| 525 new base::SharedMemory(shared_state_handle, false)); | |
| 526 | |
| 527 command_buffer_.reset(new CommandBufferService( | |
| 528 context_group_->transfer_buffer_manager())); | |
| 529 | |
| 530 bool result = command_buffer_->Initialize(); | |
| 531 DCHECK(result); | |
| 532 | |
| 533 GpuChannelManager* manager = channel_->gpu_channel_manager(); | |
| 534 DCHECK(manager); | |
| 535 | |
| 536 decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get())); | |
| 537 executor_.reset(new CommandExecutor(command_buffer_.get(), | |
| 538 decoder_.get(), decoder_.get())); | |
| 539 sync_point_client_ = sync_point_manager_->CreateSyncPointClient( | |
| 540 channel_->GetSyncPointOrderData(stream_id_), | |
| 541 CommandBufferNamespace::GPU_IO, command_buffer_id_); | |
| 542 | |
| 543 if (preemption_flag_.get()) | |
| 544 executor_->SetPreemptByFlag(preemption_flag_); | |
| 545 | |
| 546 decoder_->set_engine(executor_.get()); | |
| 547 | |
| 548 surface_ = CreateSurface(); | |
| 549 if (!surface_.get()) { | |
| 550 DLOG(ERROR) << "Failed to create surface."; | |
| 551 OnInitializeFailed(reply_message); | |
| 552 return; | |
| 553 } | |
| 554 | |
| 555 scoped_refptr<gfx::GLContext> context; | |
| 556 gfx::GLShareGroup* share_group = channel_->share_group(); | |
| 557 if (use_virtualized_gl_context_ && share_group) { | |
| 558 context = share_group->GetSharedContext(); | |
| 559 if (!context.get()) { | |
| 560 context = gfx::GLContext::CreateGLContext( | |
| 561 channel_->share_group(), | |
| 562 channel_->gpu_channel_manager()->GetDefaultOffscreenSurface(), | |
| 563 gpu_preference_); | |
| 564 if (!context.get()) { | |
| 565 DLOG(ERROR) << "Failed to create shared context for virtualization."; | |
| 566 OnInitializeFailed(reply_message); | |
| 567 return; | |
| 568 } | |
| 569 channel_->share_group()->SetSharedContext(context.get()); | |
| 570 } | |
| 571 // This should be a non-virtual GL context. | |
| 572 DCHECK(context->GetHandle()); | |
| 573 context = new GLContextVirtual( | |
| 574 share_group, context.get(), decoder_->AsWeakPtr()); | |
| 575 if (!context->Initialize(surface_.get(), gpu_preference_)) { | |
| 576 // The real context created above for the default offscreen surface | |
| 577 // might not be compatible with this surface. | |
| 578 context = NULL; | |
| 579 | |
| 580 DLOG(ERROR) << "Failed to initialize virtual GL context."; | |
| 581 OnInitializeFailed(reply_message); | |
| 582 return; | |
| 583 } | |
| 584 } | |
| 585 if (!context.get()) { | |
| 586 context = gfx::GLContext::CreateGLContext( | |
| 587 share_group, surface_.get(), gpu_preference_); | |
| 588 } | |
| 589 if (!context.get()) { | |
| 590 DLOG(ERROR) << "Failed to create context."; | |
| 591 OnInitializeFailed(reply_message); | |
| 592 return; | |
| 593 } | |
| 594 | |
| 595 if (!context->MakeCurrent(surface_.get())) { | |
| 596 LOG(ERROR) << "Failed to make context current."; | |
| 597 OnInitializeFailed(reply_message); | |
| 598 return; | |
| 599 } | |
| 600 | |
| 601 if (!context->GetGLStateRestorer()) { | |
| 602 context->SetGLStateRestorer( | |
| 603 new GLStateRestorerImpl(decoder_->AsWeakPtr())); | |
| 604 } | |
| 605 | |
| 606 if (!context_group_->has_program_cache() && | |
| 607 !context_group_->feature_info()->workarounds().disable_program_cache) { | |
| 608 context_group_->set_program_cache( | |
| 609 channel_->gpu_channel_manager()->program_cache()); | |
| 610 } | |
| 611 | |
| 612 // Initialize the decoder with either the view or pbuffer GLContext. | |
| 613 bool offscreen = (surface_handle_ == kNullSurfaceHandle); | |
| 614 if (!decoder_->Initialize(surface_, context, offscreen, initial_size_, | |
| 615 disallowed_features_, requested_attribs_)) { | |
| 616 DLOG(ERROR) << "Failed to initialize decoder."; | |
| 617 OnInitializeFailed(reply_message); | |
| 618 return; | |
| 619 } | |
| 620 | |
| 621 if (channel_->gpu_channel_manager()-> | |
| 622 gpu_preferences().enable_gpu_service_logging) { | |
| 623 decoder_->set_log_commands(true); | |
| 624 } | |
| 625 | |
| 626 decoder_->GetLogger()->SetMsgCallback( | |
| 627 base::Bind(&GpuCommandBufferStub::SendConsoleMessage, | |
| 628 base::Unretained(this))); | |
| 629 decoder_->SetShaderCacheCallback( | |
| 630 base::Bind(&GpuCommandBufferStub::SendCachedShader, | |
| 631 base::Unretained(this))); | |
| 632 decoder_->SetFenceSyncReleaseCallback(base::Bind( | |
| 633 &GpuCommandBufferStub::OnFenceSyncRelease, base::Unretained(this))); | |
| 634 decoder_->SetWaitFenceSyncCallback(base::Bind( | |
| 635 &GpuCommandBufferStub::OnWaitFenceSync, base::Unretained(this))); | |
| 636 | |
| 637 command_buffer_->SetPutOffsetChangeCallback( | |
| 638 base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this))); | |
| 639 command_buffer_->SetGetBufferChangeCallback(base::Bind( | |
| 640 &CommandExecutor::SetGetBuffer, base::Unretained(executor_.get()))); | |
| 641 command_buffer_->SetParseErrorCallback( | |
| 642 base::Bind(&GpuCommandBufferStub::OnParseError, base::Unretained(this))); | |
| 643 executor_->SetSchedulingChangedCallback(base::Bind( | |
| 644 &GpuCommandBufferStub::OnSchedulingChanged, base::Unretained(this))); | |
| 645 | |
| 646 if (watchdog_) { | |
| 647 executor_->SetCommandProcessedCallback(base::Bind( | |
| 648 &GpuCommandBufferStub::OnCommandProcessed, base::Unretained(this))); | |
| 649 } | |
| 650 | |
| 651 const size_t kSharedStateSize = sizeof(CommandBufferSharedState); | |
| 652 if (!shared_state_shm->Map(kSharedStateSize)) { | |
| 653 DLOG(ERROR) << "Failed to map shared state buffer."; | |
| 654 OnInitializeFailed(reply_message); | |
| 655 return; | |
| 656 } | |
| 657 command_buffer_->SetSharedStateBuffer(MakeBackingFromSharedMemory( | |
| 658 std::move(shared_state_shm), kSharedStateSize)); | |
| 659 | |
| 660 Capabilities capabilities = decoder_->GetCapabilities(); | |
| 661 | |
| 662 GpuCommandBufferMsg_Initialize::WriteReplyParams( | |
| 663 reply_message, true, capabilities); | |
| 664 Send(reply_message); | |
| 665 | |
| 666 if ((surface_handle_ == kNullSurfaceHandle) && !active_url_.is_empty()) | |
| 667 manager->delegate()->DidCreateOffscreenContext(active_url_); | |
| 668 | |
| 669 initialized_ = true; | |
| 670 } | |
| 671 | |
| 672 void GpuCommandBufferStub::OnCreateStreamTexture(uint32_t texture_id, | |
| 673 int32_t stream_id, | |
| 674 bool* succeeded) { | |
| 675 #if defined(OS_ANDROID) | |
| 676 *succeeded = StreamTexture::Create(this, texture_id, stream_id); | |
| 677 #else | |
| 678 *succeeded = false; | |
| 679 #endif | |
| 680 } | |
| 681 | |
| 682 void GpuCommandBufferStub::SetLatencyInfoCallback( | |
| 683 const LatencyInfoCallback& callback) { | |
| 684 latency_info_callback_ = callback; | |
| 685 } | |
| 686 | |
| 687 int32_t GpuCommandBufferStub::GetRequestedAttribute(int attr) const { | |
| 688 // The command buffer is pairs of enum, value | |
| 689 // search for the requested attribute, return the value. | |
| 690 for (std::vector<int32_t>::const_iterator it = requested_attribs_.begin(); | |
| 691 it != requested_attribs_.end(); ++it) { | |
| 692 if (*it++ == attr) { | |
| 693 return *it; | |
| 694 } | |
| 695 } | |
| 696 return -1; | |
| 697 } | |
| 698 | |
| 699 void GpuCommandBufferStub::OnSetGetBuffer(int32_t shm_id, | |
| 700 IPC::Message* reply_message) { | |
| 701 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer"); | |
| 702 if (command_buffer_) | |
| 703 command_buffer_->SetGetBuffer(shm_id); | |
| 704 Send(reply_message); | |
| 705 } | |
| 706 | |
| 707 void GpuCommandBufferStub::OnProduceFrontBuffer(const Mailbox& mailbox) { | |
| 708 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnProduceFrontBuffer"); | |
| 709 if (!decoder_) { | |
| 710 LOG(ERROR) << "Can't produce front buffer before initialization."; | |
| 711 return; | |
| 712 } | |
| 713 | |
| 714 decoder_->ProduceFrontBuffer(mailbox); | |
| 715 } | |
| 716 | |
| 717 void GpuCommandBufferStub::OnParseError() { | |
| 718 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnParseError"); | |
| 719 DCHECK(command_buffer_.get()); | |
| 720 CommandBuffer::State state = command_buffer_->GetLastState(); | |
| 721 IPC::Message* msg = new GpuCommandBufferMsg_Destroyed( | |
| 722 route_id_, state.context_lost_reason, state.error); | |
| 723 msg->set_unblock(true); | |
| 724 Send(msg); | |
| 725 | |
| 726 // Tell the browser about this context loss as well, so it can | |
| 727 // determine whether client APIs like WebGL need to be immediately | |
| 728 // blocked from automatically running. | |
| 729 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); | |
| 730 gpu_channel_manager->delegate()->DidLoseContext( | |
| 731 (surface_handle_ == kNullSurfaceHandle), state.context_lost_reason, | |
| 732 active_url_); | |
| 733 | |
| 734 CheckContextLost(); | |
| 735 } | |
| 736 | |
| 737 void GpuCommandBufferStub::OnSchedulingChanged(bool scheduled) { | |
| 738 TRACE_EVENT1("gpu", "GpuCommandBufferStub::OnSchedulingChanged", "scheduled", | |
| 739 scheduled); | |
| 740 channel_->OnStreamRescheduled(stream_id_, scheduled); | |
| 741 } | |
| 742 | |
| 743 void GpuCommandBufferStub::OnWaitForTokenInRange(int32_t start, | |
| 744 int32_t end, | |
| 745 IPC::Message* reply_message) { | |
| 746 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForTokenInRange"); | |
| 747 DCHECK(command_buffer_.get()); | |
| 748 CheckContextLost(); | |
| 749 if (wait_for_token_) | |
| 750 LOG(ERROR) << "Got WaitForToken command while currently waiting for token."; | |
| 751 wait_for_token_ = | |
| 752 make_scoped_ptr(new WaitForCommandState(start, end, reply_message)); | |
| 753 CheckCompleteWaits(); | |
| 754 } | |
| 755 | |
| 756 void GpuCommandBufferStub::OnWaitForGetOffsetInRange( | |
| 757 int32_t start, | |
| 758 int32_t end, | |
| 759 IPC::Message* reply_message) { | |
| 760 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnWaitForGetOffsetInRange"); | |
| 761 DCHECK(command_buffer_.get()); | |
| 762 CheckContextLost(); | |
| 763 if (wait_for_get_offset_) { | |
| 764 LOG(ERROR) | |
| 765 << "Got WaitForGetOffset command while currently waiting for offset."; | |
| 766 } | |
| 767 wait_for_get_offset_ = | |
| 768 make_scoped_ptr(new WaitForCommandState(start, end, reply_message)); | |
| 769 CheckCompleteWaits(); | |
| 770 } | |
| 771 | |
| 772 void GpuCommandBufferStub::CheckCompleteWaits() { | |
| 773 if (wait_for_token_ || wait_for_get_offset_) { | |
| 774 CommandBuffer::State state = command_buffer_->GetLastState(); | |
| 775 if (wait_for_token_ && | |
| 776 (CommandBuffer::InRange( | |
| 777 wait_for_token_->start, wait_for_token_->end, state.token) || | |
| 778 state.error != error::kNoError)) { | |
| 779 ReportState(); | |
| 780 GpuCommandBufferMsg_WaitForTokenInRange::WriteReplyParams( | |
| 781 wait_for_token_->reply.get(), state); | |
| 782 Send(wait_for_token_->reply.release()); | |
| 783 wait_for_token_.reset(); | |
| 784 } | |
| 785 if (wait_for_get_offset_ && | |
| 786 (CommandBuffer::InRange(wait_for_get_offset_->start, | |
| 787 wait_for_get_offset_->end, | |
| 788 state.get_offset) || | |
| 789 state.error != error::kNoError)) { | |
| 790 ReportState(); | |
| 791 GpuCommandBufferMsg_WaitForGetOffsetInRange::WriteReplyParams( | |
| 792 wait_for_get_offset_->reply.get(), state); | |
| 793 Send(wait_for_get_offset_->reply.release()); | |
| 794 wait_for_get_offset_.reset(); | |
| 795 } | |
| 796 } | |
| 797 } | |
| 798 | |
| 799 void GpuCommandBufferStub::OnAsyncFlush( | |
| 800 int32_t put_offset, | |
| 801 uint32_t flush_count, | |
| 802 const std::vector<ui::LatencyInfo>& latency_info) { | |
| 803 TRACE_EVENT1( | |
| 804 "gpu", "GpuCommandBufferStub::OnAsyncFlush", "put_offset", put_offset); | |
| 805 DCHECK(command_buffer_); | |
| 806 | |
| 807 // We received this message out-of-order. This should not happen but is here | |
| 808 // to catch regressions. Ignore the message. | |
| 809 DVLOG_IF(0, flush_count - last_flush_count_ >= 0x8000000U) | |
| 810 << "Received a Flush message out-of-order"; | |
| 811 | |
| 812 if (flush_count > last_flush_count_ && | |
| 813 ui::LatencyInfo::Verify(latency_info, | |
| 814 "GpuCommandBufferStub::OnAsyncFlush") && | |
| 815 !latency_info_callback_.is_null()) { | |
| 816 latency_info_callback_.Run(latency_info); | |
| 817 } | |
| 818 | |
| 819 last_flush_count_ = flush_count; | |
| 820 CommandBuffer::State pre_state = command_buffer_->GetLastState(); | |
| 821 command_buffer_->Flush(put_offset); | |
| 822 CommandBuffer::State post_state = command_buffer_->GetLastState(); | |
| 823 | |
| 824 if (pre_state.get_offset != post_state.get_offset) | |
| 825 ReportState(); | |
| 826 | |
| 827 #if defined(OS_ANDROID) | |
| 828 GpuChannelManager* manager = channel_->gpu_channel_manager(); | |
| 829 manager->DidAccessGpu(); | |
| 830 #endif | |
| 831 } | |
| 832 | |
| 833 void GpuCommandBufferStub::OnRegisterTransferBuffer( | |
| 834 int32_t id, | |
| 835 base::SharedMemoryHandle transfer_buffer, | |
| 836 uint32_t size) { | |
| 837 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer"); | |
| 838 | |
| 839 // Take ownership of the memory and map it into this process. | |
| 840 // This validates the size. | |
| 841 scoped_ptr<base::SharedMemory> shared_memory( | |
| 842 new base::SharedMemory(transfer_buffer, false)); | |
| 843 if (!shared_memory->Map(size)) { | |
| 844 DVLOG(0) << "Failed to map shared memory."; | |
| 845 return; | |
| 846 } | |
| 847 | |
| 848 if (command_buffer_) { | |
| 849 command_buffer_->RegisterTransferBuffer( | |
| 850 id, MakeBackingFromSharedMemory(std::move(shared_memory), size)); | |
| 851 } | |
| 852 } | |
| 853 | |
| 854 void GpuCommandBufferStub::OnDestroyTransferBuffer(int32_t id) { | |
| 855 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer"); | |
| 856 | |
| 857 if (command_buffer_) | |
| 858 command_buffer_->DestroyTransferBuffer(id); | |
| 859 } | |
| 860 | |
| 861 void GpuCommandBufferStub::OnCommandProcessed() { | |
| 862 if (watchdog_) | |
| 863 watchdog_->CheckArmed(); | |
| 864 } | |
| 865 | |
| 866 void GpuCommandBufferStub::ReportState() { command_buffer_->UpdateState(); } | |
| 867 | |
| 868 void GpuCommandBufferStub::PutChanged() { | |
| 869 FastSetActiveURL(active_url_, active_url_hash_, channel_); | |
| 870 executor_->PutChanged(); | |
| 871 } | |
| 872 | |
| 873 void GpuCommandBufferStub::PullTextureUpdates( | |
| 874 CommandBufferNamespace namespace_id, | |
| 875 CommandBufferId command_buffer_id, | |
| 876 uint32_t release) { | |
| 877 gles2::MailboxManager* mailbox_manager = | |
| 878 context_group_->mailbox_manager(); | |
| 879 if (mailbox_manager->UsesSync() && MakeCurrent()) { | |
| 880 SyncToken sync_token(namespace_id, 0, command_buffer_id, release); | |
| 881 mailbox_manager->PullTextureUpdates(sync_token); | |
| 882 } | |
| 883 } | |
| 884 | |
| 885 void GpuCommandBufferStub::OnSignalSyncToken(const SyncToken& sync_token, | |
| 886 uint32_t id) { | |
| 887 scoped_refptr<SyncPointClientState> release_state = | |
| 888 sync_point_manager_->GetSyncPointClientState( | |
| 889 sync_token.namespace_id(), sync_token.command_buffer_id()); | |
| 890 | |
| 891 if (release_state) { | |
| 892 sync_point_client_->Wait(release_state.get(), sync_token.release_count(), | |
| 893 base::Bind(&GpuCommandBufferStub::OnSignalAck, | |
| 894 this->AsWeakPtr(), id)); | |
| 895 } else { | |
| 896 OnSignalAck(id); | |
| 897 } | |
| 898 } | |
| 899 | |
| 900 void GpuCommandBufferStub::OnSignalAck(uint32_t id) { | |
| 901 Send(new GpuCommandBufferMsg_SignalAck(route_id_, id)); | |
| 902 } | |
| 903 | |
| 904 void GpuCommandBufferStub::OnSignalQuery(uint32_t query_id, uint32_t id) { | |
| 905 if (decoder_) { | |
| 906 gles2::QueryManager* query_manager = decoder_->GetQueryManager(); | |
| 907 if (query_manager) { | |
| 908 gles2::QueryManager::Query* query = | |
| 909 query_manager->GetQuery(query_id); | |
| 910 if (query) { | |
| 911 query->AddCallback( | |
| 912 base::Bind(&GpuCommandBufferStub::OnSignalAck, | |
| 913 this->AsWeakPtr(), | |
| 914 id)); | |
| 915 return; | |
| 916 } | |
| 917 } | |
| 918 } | |
| 919 // Something went wrong, run callback immediately. | |
| 920 OnSignalAck(id); | |
| 921 } | |
| 922 | |
| 923 void GpuCommandBufferStub::OnFenceSyncRelease(uint64_t release) { | |
| 924 if (sync_point_client_->client_state()->IsFenceSyncReleased(release)) { | |
| 925 DLOG(ERROR) << "Fence Sync has already been released."; | |
| 926 return; | |
| 927 } | |
| 928 | |
| 929 gles2::MailboxManager* mailbox_manager = | |
| 930 context_group_->mailbox_manager(); | |
| 931 if (mailbox_manager->UsesSync() && MakeCurrent()) { | |
| 932 SyncToken sync_token(CommandBufferNamespace::GPU_IO, 0, | |
| 933 command_buffer_id_, release); | |
| 934 mailbox_manager->PushTextureUpdates(sync_token); | |
| 935 } | |
| 936 | |
| 937 sync_point_client_->ReleaseFenceSync(release); | |
| 938 } | |
| 939 | |
| 940 bool GpuCommandBufferStub::OnWaitFenceSync( | |
| 941 CommandBufferNamespace namespace_id, | |
| 942 CommandBufferId command_buffer_id, | |
| 943 uint64_t release) { | |
| 944 DCHECK(!waiting_for_sync_point_); | |
| 945 DCHECK(executor_->scheduled()); | |
| 946 | |
| 947 scoped_refptr<SyncPointClientState> release_state = | |
| 948 sync_point_manager_->GetSyncPointClientState(namespace_id, | |
| 949 command_buffer_id); | |
| 950 | |
| 951 if (!release_state) | |
| 952 return true; | |
| 953 | |
| 954 if (release_state->IsFenceSyncReleased(release)) { | |
| 955 PullTextureUpdates(namespace_id, command_buffer_id, release); | |
| 956 return true; | |
| 957 } | |
| 958 | |
| 959 TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub", | |
| 960 this); | |
| 961 waiting_for_sync_point_ = true; | |
| 962 sync_point_client_->WaitNonThreadSafe( | |
| 963 release_state.get(), release, task_runner_, | |
| 964 base::Bind(&GpuCommandBufferStub::OnWaitFenceSyncCompleted, | |
| 965 this->AsWeakPtr(), namespace_id, command_buffer_id, release)); | |
| 966 | |
| 967 if (!waiting_for_sync_point_) | |
| 968 return true; | |
| 969 | |
| 970 executor_->SetScheduled(false); | |
| 971 return false; | |
| 972 } | |
| 973 | |
| 974 void GpuCommandBufferStub::OnWaitFenceSyncCompleted( | |
| 975 CommandBufferNamespace namespace_id, | |
| 976 CommandBufferId command_buffer_id, | |
| 977 uint64_t release) { | |
| 978 DCHECK(waiting_for_sync_point_); | |
| 979 TRACE_EVENT_ASYNC_END1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub", | |
| 980 this); | |
| 981 PullTextureUpdates(namespace_id, command_buffer_id, release); | |
| 982 waiting_for_sync_point_ = false; | |
| 983 executor_->SetScheduled(true); | |
| 984 } | |
| 985 | |
| 986 void GpuCommandBufferStub::OnCreateImage( | |
| 987 const GpuCommandBufferMsg_CreateImage_Params& params) { | |
| 988 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnCreateImage"); | |
| 989 const int32_t id = params.id; | |
| 990 const gfx::GpuMemoryBufferHandle& handle = params.gpu_memory_buffer; | |
| 991 const gfx::Size& size = params.size; | |
| 992 const gfx::BufferFormat& format = params.format; | |
| 993 const uint32_t internalformat = params.internal_format; | |
| 994 const uint64_t image_release_count = params.image_release_count; | |
| 995 | |
| 996 if (!decoder_) | |
| 997 return; | |
| 998 | |
| 999 gles2::ImageManager* image_manager = decoder_->GetImageManager(); | |
| 1000 DCHECK(image_manager); | |
| 1001 if (image_manager->LookupImage(id)) { | |
| 1002 LOG(ERROR) << "Image already exists with same ID."; | |
| 1003 return; | |
| 1004 } | |
| 1005 | |
| 1006 if (!ImageFactory::IsGpuMemoryBufferFormatSupported( | |
| 1007 format, decoder_->GetCapabilities())) { | |
| 1008 LOG(ERROR) << "Format is not supported."; | |
| 1009 return; | |
| 1010 } | |
| 1011 | |
| 1012 if (!ImageFactory::IsImageSizeValidForGpuMemoryBufferFormat(size, | |
| 1013 format)) { | |
| 1014 LOG(ERROR) << "Invalid image size for format."; | |
| 1015 return; | |
| 1016 } | |
| 1017 | |
| 1018 if (!ImageFactory::IsImageFormatCompatibleWithGpuMemoryBufferFormat( | |
| 1019 internalformat, format)) { | |
| 1020 LOG(ERROR) << "Incompatible image format."; | |
| 1021 return; | |
| 1022 } | |
| 1023 | |
| 1024 scoped_refptr<gl::GLImage> image = channel()->CreateImageForGpuMemoryBuffer( | |
| 1025 handle, size, format, internalformat); | |
| 1026 if (!image.get()) | |
| 1027 return; | |
| 1028 | |
| 1029 image_manager->AddImage(image.get(), id); | |
| 1030 if (image_release_count) { | |
| 1031 sync_point_client_->ReleaseFenceSync(image_release_count); | |
| 1032 } | |
| 1033 } | |
| 1034 | |
| 1035 void GpuCommandBufferStub::OnDestroyImage(int32_t id) { | |
| 1036 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyImage"); | |
| 1037 | |
| 1038 if (!decoder_) | |
| 1039 return; | |
| 1040 | |
| 1041 gles2::ImageManager* image_manager = decoder_->GetImageManager(); | |
| 1042 DCHECK(image_manager); | |
| 1043 if (!image_manager->LookupImage(id)) { | |
| 1044 LOG(ERROR) << "Image with ID doesn't exist."; | |
| 1045 return; | |
| 1046 } | |
| 1047 | |
| 1048 image_manager->RemoveImage(id); | |
| 1049 } | |
| 1050 | |
| 1051 void GpuCommandBufferStub::SendConsoleMessage(int32_t id, | |
| 1052 const std::string& message) { | |
| 1053 GPUCommandBufferConsoleMessage console_message; | |
| 1054 console_message.id = id; | |
| 1055 console_message.message = message; | |
| 1056 IPC::Message* msg = new GpuCommandBufferMsg_ConsoleMsg( | |
| 1057 route_id_, console_message); | |
| 1058 msg->set_unblock(true); | |
| 1059 Send(msg); | |
| 1060 } | |
| 1061 | |
| 1062 void GpuCommandBufferStub::SendCachedShader( | |
| 1063 const std::string& key, const std::string& shader) { | |
| 1064 channel_->CacheShader(key, shader); | |
| 1065 } | |
| 1066 | |
| 1067 void GpuCommandBufferStub::AddDestructionObserver( | |
| 1068 DestructionObserver* observer) { | |
| 1069 destruction_observers_.AddObserver(observer); | |
| 1070 } | |
| 1071 | |
| 1072 void GpuCommandBufferStub::RemoveDestructionObserver( | |
| 1073 DestructionObserver* observer) { | |
| 1074 destruction_observers_.RemoveObserver(observer); | |
| 1075 } | |
| 1076 | |
| 1077 const gles2::FeatureInfo* GpuCommandBufferStub::GetFeatureInfo() const { | |
| 1078 return context_group_->feature_info(); | |
| 1079 } | |
| 1080 | |
| 1081 gles2::MemoryTracker* GpuCommandBufferStub::GetMemoryTracker() const { | |
| 1082 return context_group_->memory_tracker(); | |
| 1083 } | |
| 1084 | |
| 1085 bool GpuCommandBufferStub::CheckContextLost() { | |
| 1086 DCHECK(command_buffer_); | |
| 1087 CommandBuffer::State state = command_buffer_->GetLastState(); | |
| 1088 bool was_lost = state.error == error::kLostContext; | |
| 1089 | |
| 1090 if (was_lost) { | |
| 1091 bool was_lost_by_robustness = | |
| 1092 decoder_ && decoder_->WasContextLostByRobustnessExtension(); | |
| 1093 | |
| 1094 // Work around issues with recovery by allowing a new GPU process to launch. | |
| 1095 if ((was_lost_by_robustness || | |
| 1096 context_group_->feature_info()->workarounds().exit_on_context_lost) && | |
| 1097 !channel_->gpu_channel_manager()->gpu_preferences().single_process && | |
| 1098 !channel_->gpu_channel_manager()->gpu_preferences().in_process_gpu) { | |
| 1099 LOG(ERROR) << "Exiting GPU process because some drivers cannot recover" | |
| 1100 << " from problems."; | |
| 1101 // Signal the message loop to quit to shut down other threads | |
| 1102 // gracefully. | |
| 1103 base::MessageLoop::current()->QuitNow(); | |
| 1104 } | |
| 1105 | |
| 1106 // Lose all other contexts if the reset was triggered by the robustness | |
| 1107 // extension instead of being synthetic. | |
| 1108 if (was_lost_by_robustness && | |
| 1109 (gfx::GLContext::LosesAllContextsOnContextLost() || | |
| 1110 use_virtualized_gl_context_)) { | |
| 1111 channel_->LoseAllContexts(); | |
| 1112 } | |
| 1113 } | |
| 1114 | |
| 1115 CheckCompleteWaits(); | |
| 1116 return was_lost; | |
| 1117 } | |
| 1118 | |
| 1119 void GpuCommandBufferStub::MarkContextLost() { | |
| 1120 if (!command_buffer_ || | |
| 1121 command_buffer_->GetLastState().error == error::kLostContext) | |
| 1122 return; | |
| 1123 | |
| 1124 command_buffer_->SetContextLostReason(error::kUnknown); | |
| 1125 if (decoder_) | |
| 1126 decoder_->MarkContextLost(error::kUnknown); | |
| 1127 command_buffer_->SetParseError(error::kLostContext); | |
| 1128 } | |
| 1129 | |
| 1130 void GpuCommandBufferStub::SendSwapBuffersCompleted( | |
| 1131 const std::vector<ui::LatencyInfo>& latency_info, | |
| 1132 gfx::SwapResult result) { | |
| 1133 Send(new GpuCommandBufferMsg_SwapBuffersCompleted(route_id_, latency_info, | |
| 1134 result)); | |
| 1135 } | |
| 1136 | |
| 1137 void GpuCommandBufferStub::SendUpdateVSyncParameters(base::TimeTicks timebase, | |
| 1138 base::TimeDelta interval) { | |
| 1139 Send(new GpuCommandBufferMsg_UpdateVSyncParameters(route_id_, timebase, | |
| 1140 interval)); | |
| 1141 } | |
| 1142 | |
| 1143 } // namespace gpu | |
| OLD | NEW |