| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/common/gpu/client/context_provider_command_buffer.h" | |
| 6 | |
| 7 #include <stddef.h> | |
| 8 | |
| 9 #include <memory> | |
| 10 #include <set> | |
| 11 #include <utility> | |
| 12 #include <vector> | |
| 13 | |
| 14 #include "base/callback_helpers.h" | |
| 15 #include "base/command_line.h" | |
| 16 #include "base/optional.h" | |
| 17 #include "base/strings/stringprintf.h" | |
| 18 #include "base/threading/thread_task_runner_handle.h" | |
| 19 #include "base/trace_event/memory_dump_manager.h" | |
| 20 #include "cc/output/context_cache_controller.h" | |
| 21 #include "cc/output/managed_memory_policy.h" | |
| 22 #include "content/common/gpu/client/command_buffer_metrics.h" | |
| 23 #include "gpu/command_buffer/client/gles2_cmd_helper.h" | |
| 24 #include "gpu/command_buffer/client/gles2_implementation.h" | |
| 25 #include "gpu/command_buffer/client/gles2_trace_implementation.h" | |
| 26 #include "gpu/command_buffer/client/gpu_switches.h" | |
| 27 #include "gpu/command_buffer/client/transfer_buffer.h" | |
| 28 #include "gpu/command_buffer/common/constants.h" | |
| 29 #include "gpu/ipc/client/command_buffer_proxy_impl.h" | |
| 30 #include "gpu/ipc/client/gpu_channel_host.h" | |
| 31 #include "gpu/skia_bindings/grcontext_for_gles2_interface.h" | |
| 32 #include "third_party/skia/include/core/SkTraceMemoryDump.h" | |
| 33 #include "third_party/skia/include/gpu/GrContext.h" | |
| 34 #include "ui/gl/trace_util.h" | |
| 35 | |
| 36 class SkDiscardableMemory; | |
| 37 | |
| 38 namespace { | |
| 39 | |
| 40 // Similar to base::AutoReset but it sets the variable to the new value | |
| 41 // when it is destroyed. Use Reset() to cancel setting the variable. | |
| 42 class AutoSet { | |
| 43 public: | |
| 44 AutoSet(bool* b, bool set) : b_(b), set_(set) {} | |
| 45 ~AutoSet() { | |
| 46 if (b_) | |
| 47 *b_ = set_; | |
| 48 } | |
| 49 // Stops us from setting b_ on destruction. | |
| 50 void Reset() { b_ = nullptr; } | |
| 51 | |
| 52 private: | |
| 53 bool* b_; | |
| 54 const bool set_; | |
| 55 }; | |
| 56 | |
| 57 // Derives from SkTraceMemoryDump and implements graphics specific memory | |
| 58 // backing functionality. | |
| 59 class SkiaGpuTraceMemoryDump : public SkTraceMemoryDump { | |
| 60 public: | |
| 61 // This should never outlive the provided ProcessMemoryDump, as it should | |
| 62 // always be scoped to a single OnMemoryDump funciton call. | |
| 63 explicit SkiaGpuTraceMemoryDump(base::trace_event::ProcessMemoryDump* pmd, | |
| 64 uint64_t share_group_tracing_guid) | |
| 65 : pmd_(pmd), share_group_tracing_guid_(share_group_tracing_guid) {} | |
| 66 | |
| 67 // Overridden from SkTraceMemoryDump: | |
| 68 void dumpNumericValue(const char* dump_name, | |
| 69 const char* value_name, | |
| 70 const char* units, | |
| 71 uint64_t value) override { | |
| 72 auto* dump = GetOrCreateAllocatorDump(dump_name); | |
| 73 dump->AddScalar(value_name, units, value); | |
| 74 } | |
| 75 | |
| 76 void setMemoryBacking(const char* dump_name, | |
| 77 const char* backing_type, | |
| 78 const char* backing_object_id) override { | |
| 79 const uint64_t tracing_process_id = | |
| 80 base::trace_event::MemoryDumpManager::GetInstance() | |
| 81 ->GetTracingProcessId(); | |
| 82 | |
| 83 // For uniformity, skia provides this value as a string. Convert back to a | |
| 84 // uint32_t. | |
| 85 uint32_t gl_id = | |
| 86 std::strtoul(backing_object_id, nullptr /* str_end */, 10 /* base */); | |
| 87 | |
| 88 // Constants used by SkiaGpuTraceMemoryDump to identify different memory | |
| 89 // types. | |
| 90 const char* kGLTextureBackingType = "gl_texture"; | |
| 91 const char* kGLBufferBackingType = "gl_buffer"; | |
| 92 const char* kGLRenderbufferBackingType = "gl_renderbuffer"; | |
| 93 | |
| 94 // Populated in if statements below. | |
| 95 base::trace_event::MemoryAllocatorDumpGuid guid; | |
| 96 | |
| 97 if (strcmp(backing_type, kGLTextureBackingType) == 0) { | |
| 98 guid = gl::GetGLTextureClientGUIDForTracing(share_group_tracing_guid_, | |
| 99 gl_id); | |
| 100 } else if (strcmp(backing_type, kGLBufferBackingType) == 0) { | |
| 101 guid = gl::GetGLBufferGUIDForTracing(tracing_process_id, gl_id); | |
| 102 } else if (strcmp(backing_type, kGLRenderbufferBackingType) == 0) { | |
| 103 guid = gl::GetGLRenderbufferGUIDForTracing(tracing_process_id, gl_id); | |
| 104 } | |
| 105 | |
| 106 if (!guid.empty()) { | |
| 107 pmd_->CreateSharedGlobalAllocatorDump(guid); | |
| 108 | |
| 109 auto* dump = GetOrCreateAllocatorDump(dump_name); | |
| 110 | |
| 111 const int kImportance = 2; | |
| 112 pmd_->AddOwnershipEdge(dump->guid(), guid, kImportance); | |
| 113 } | |
| 114 } | |
| 115 | |
| 116 void setDiscardableMemoryBacking( | |
| 117 const char* dump_name, | |
| 118 const SkDiscardableMemory& discardable_memory_object) override { | |
| 119 // We don't use this class for dumping discardable memory. | |
| 120 NOTREACHED(); | |
| 121 } | |
| 122 | |
| 123 LevelOfDetail getRequestedDetails() const override { | |
| 124 // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested | |
| 125 // (crbug.com/499731). | |
| 126 return kObjectsBreakdowns_LevelOfDetail; | |
| 127 } | |
| 128 | |
| 129 private: | |
| 130 // Helper to create allocator dumps. | |
| 131 base::trace_event::MemoryAllocatorDump* GetOrCreateAllocatorDump( | |
| 132 const char* dump_name) { | |
| 133 auto* dump = pmd_->GetAllocatorDump(dump_name); | |
| 134 if (!dump) | |
| 135 dump = pmd_->CreateAllocatorDump(dump_name); | |
| 136 return dump; | |
| 137 } | |
| 138 | |
| 139 base::trace_event::ProcessMemoryDump* pmd_; | |
| 140 uint64_t share_group_tracing_guid_; | |
| 141 | |
| 142 DISALLOW_COPY_AND_ASSIGN(SkiaGpuTraceMemoryDump); | |
| 143 }; | |
| 144 | |
| 145 } // namespace | |
| 146 | |
| 147 namespace content { | |
| 148 | |
| 149 ContextProviderCommandBuffer::SharedProviders::SharedProviders() = default; | |
| 150 ContextProviderCommandBuffer::SharedProviders::~SharedProviders() = default; | |
| 151 | |
| 152 ContextProviderCommandBuffer::ContextProviderCommandBuffer( | |
| 153 scoped_refptr<gpu::GpuChannelHost> channel, | |
| 154 int32_t stream_id, | |
| 155 gpu::GpuStreamPriority stream_priority, | |
| 156 gpu::SurfaceHandle surface_handle, | |
| 157 const GURL& active_url, | |
| 158 bool automatic_flushes, | |
| 159 bool support_locking, | |
| 160 const gpu::SharedMemoryLimits& memory_limits, | |
| 161 const gpu::gles2::ContextCreationAttribHelper& attributes, | |
| 162 ContextProviderCommandBuffer* shared_context_provider, | |
| 163 command_buffer_metrics::ContextType type) | |
| 164 : stream_id_(stream_id), | |
| 165 stream_priority_(stream_priority), | |
| 166 surface_handle_(surface_handle), | |
| 167 active_url_(active_url), | |
| 168 automatic_flushes_(automatic_flushes), | |
| 169 support_locking_(support_locking), | |
| 170 memory_limits_(memory_limits), | |
| 171 attributes_(attributes), | |
| 172 context_type_(type), | |
| 173 shared_providers_(shared_context_provider | |
| 174 ? shared_context_provider->shared_providers_ | |
| 175 : new SharedProviders), | |
| 176 channel_(std::move(channel)) { | |
| 177 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 178 DCHECK(channel_); | |
| 179 context_thread_checker_.DetachFromThread(); | |
| 180 } | |
| 181 | |
| 182 ContextProviderCommandBuffer::~ContextProviderCommandBuffer() { | |
| 183 DCHECK(main_thread_checker_.CalledOnValidThread() || | |
| 184 context_thread_checker_.CalledOnValidThread()); | |
| 185 | |
| 186 { | |
| 187 base::AutoLock hold(shared_providers_->lock); | |
| 188 auto it = std::find(shared_providers_->list.begin(), | |
| 189 shared_providers_->list.end(), this); | |
| 190 if (it != shared_providers_->list.end()) | |
| 191 shared_providers_->list.erase(it); | |
| 192 } | |
| 193 | |
| 194 if (bind_succeeded_) { | |
| 195 // Clear the lock to avoid DCHECKs that the lock is being held during | |
| 196 // shutdown. | |
| 197 command_buffer_->SetLock(nullptr); | |
| 198 // Disconnect lost callbacks during destruction. | |
| 199 gles2_impl_->SetLostContextCallback(base::Closure()); | |
| 200 // Unregister memory dump provider. | |
| 201 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( | |
| 202 this); | |
| 203 } | |
| 204 } | |
| 205 | |
| 206 gpu::CommandBufferProxyImpl* | |
| 207 ContextProviderCommandBuffer::GetCommandBufferProxy() { | |
| 208 return command_buffer_.get(); | |
| 209 } | |
| 210 | |
| 211 uint32_t ContextProviderCommandBuffer::GetCopyTextureInternalFormat() { | |
| 212 if (attributes_.alpha_size > 0) | |
| 213 return GL_RGBA; | |
| 214 DCHECK_NE(attributes_.red_size, 0); | |
| 215 DCHECK_NE(attributes_.green_size, 0); | |
| 216 DCHECK_NE(attributes_.blue_size, 0); | |
| 217 return GL_RGB; | |
| 218 } | |
| 219 | |
| 220 bool ContextProviderCommandBuffer::BindToCurrentThread() { | |
| 221 // This is called on the thread the context will be used. | |
| 222 DCHECK(context_thread_checker_.CalledOnValidThread()); | |
| 223 | |
| 224 if (bind_failed_) | |
| 225 return false; | |
| 226 if (bind_succeeded_) | |
| 227 return true; | |
| 228 | |
| 229 // Early outs should report failure. | |
| 230 AutoSet set_bind_failed(&bind_failed_, true); | |
| 231 | |
| 232 scoped_refptr<base::SingleThreadTaskRunner> task_runner = | |
| 233 default_task_runner_; | |
| 234 if (!task_runner) | |
| 235 task_runner = base::ThreadTaskRunnerHandle::Get(); | |
| 236 | |
| 237 // It's possible to be running BindToCurrentThread on two contexts | |
| 238 // on different threads at the same time, but which will be in the same share | |
| 239 // group. To ensure they end up in the same group, hold the lock on the | |
| 240 // shared_providers_ (which they will share) after querying the group, until | |
| 241 // this context has been added to the list. | |
| 242 { | |
| 243 ContextProviderCommandBuffer* shared_context_provider = nullptr; | |
| 244 gpu::CommandBufferProxyImpl* shared_command_buffer = nullptr; | |
| 245 scoped_refptr<gpu::gles2::ShareGroup> share_group; | |
| 246 | |
| 247 base::AutoLock hold(shared_providers_->lock); | |
| 248 | |
| 249 if (!shared_providers_->list.empty()) { | |
| 250 shared_context_provider = shared_providers_->list.front(); | |
| 251 shared_command_buffer = shared_context_provider->command_buffer_.get(); | |
| 252 share_group = shared_context_provider->gles2_impl_->share_group(); | |
| 253 DCHECK_EQ(!!shared_command_buffer, !!share_group); | |
| 254 } | |
| 255 | |
| 256 // This command buffer is a client-side proxy to the command buffer in the | |
| 257 // GPU process. | |
| 258 command_buffer_ = gpu::CommandBufferProxyImpl::Create( | |
| 259 std::move(channel_), surface_handle_, shared_command_buffer, stream_id_, | |
| 260 stream_priority_, attributes_, active_url_, task_runner); | |
| 261 if (!command_buffer_) { | |
| 262 DLOG(ERROR) << "GpuChannelHost failed to create command buffer."; | |
| 263 command_buffer_metrics::UmaRecordContextInitFailed(context_type_); | |
| 264 return false; | |
| 265 } | |
| 266 | |
| 267 // The GLES2 helper writes the command buffer protocol. | |
| 268 gles2_helper_.reset(new gpu::gles2::GLES2CmdHelper(command_buffer_.get())); | |
| 269 gles2_helper_->SetAutomaticFlushes(automatic_flushes_); | |
| 270 if (!gles2_helper_->Initialize(memory_limits_.command_buffer_size)) { | |
| 271 DLOG(ERROR) << "Failed to initialize GLES2CmdHelper."; | |
| 272 return false; | |
| 273 } | |
| 274 | |
| 275 // The transfer buffer is used to copy resources between the client | |
| 276 // process and the GPU process. | |
| 277 transfer_buffer_.reset(new gpu::TransferBuffer(gles2_helper_.get())); | |
| 278 | |
| 279 // The GLES2Implementation exposes the OpenGLES2 API, as well as the | |
| 280 // gpu::ContextSupport interface. | |
| 281 constexpr bool support_client_side_arrays = false; | |
| 282 gles2_impl_.reset(new gpu::gles2::GLES2Implementation( | |
| 283 gles2_helper_.get(), share_group, transfer_buffer_.get(), | |
| 284 attributes_.bind_generates_resource, | |
| 285 attributes_.lose_context_when_out_of_memory, support_client_side_arrays, | |
| 286 command_buffer_.get())); | |
| 287 if (!gles2_impl_->Initialize(memory_limits_.start_transfer_buffer_size, | |
| 288 memory_limits_.min_transfer_buffer_size, | |
| 289 memory_limits_.max_transfer_buffer_size, | |
| 290 memory_limits_.mapped_memory_reclaim_limit)) { | |
| 291 DLOG(ERROR) << "Failed to initialize GLES2Implementation."; | |
| 292 return false; | |
| 293 } | |
| 294 | |
| 295 if (command_buffer_->GetLastState().error != gpu::error::kNoError) { | |
| 296 DLOG(ERROR) << "Context dead on arrival. Last error: " | |
| 297 << command_buffer_->GetLastState().error; | |
| 298 return false; | |
| 299 } | |
| 300 | |
| 301 // If any context in the share group has been lost, then abort and don't | |
| 302 // continue since we need to go back to the caller of the constructor to | |
| 303 // find the correct share group. | |
| 304 // This may happen in between the share group being chosen at the | |
| 305 // constructor, and getting to run this BindToCurrentThread method which | |
| 306 // can be on some other thread. | |
| 307 // We intentionally call this *after* creating the command buffer via the | |
| 308 // GpuChannelHost. Once that has happened, the service knows we are in the | |
| 309 // share group and if a shared context is lost, our context will be informed | |
| 310 // also, and the lost context callback will occur for the owner of the | |
| 311 // context provider. If we check sooner, the shared context may be lost in | |
| 312 // between these two states and our context here would be left in an orphan | |
| 313 // share group. | |
| 314 if (share_group && share_group->IsLost()) | |
| 315 return false; | |
| 316 | |
| 317 shared_providers_->list.push_back(this); | |
| 318 | |
| 319 cache_controller_.reset( | |
| 320 new cc::ContextCacheController(gles2_impl_.get(), task_runner)); | |
| 321 } | |
| 322 set_bind_failed.Reset(); | |
| 323 bind_succeeded_ = true; | |
| 324 | |
| 325 gles2_impl_->SetLostContextCallback( | |
| 326 base::Bind(&ContextProviderCommandBuffer::OnLostContext, | |
| 327 // |this| owns the GLES2Implementation which holds the | |
| 328 // callback. | |
| 329 base::Unretained(this))); | |
| 330 | |
| 331 if (base::CommandLine::ForCurrentProcess()->HasSwitch( | |
| 332 switches::kEnableGpuClientTracing)) { | |
| 333 // This wraps the real GLES2Implementation and we should always use this | |
| 334 // instead when it's present. | |
| 335 trace_impl_.reset( | |
| 336 new gpu::gles2::GLES2TraceImplementation(gles2_impl_.get())); | |
| 337 } | |
| 338 | |
| 339 // Do this last once the context is set up. | |
| 340 std::string type_name = | |
| 341 command_buffer_metrics::ContextTypeToString(context_type_); | |
| 342 std::string unique_context_name = | |
| 343 base::StringPrintf("%s-%p", type_name.c_str(), gles2_impl_.get()); | |
| 344 ContextGL()->TraceBeginCHROMIUM("gpu_toplevel", unique_context_name.c_str()); | |
| 345 // If support_locking_ is true, the context may be used from multiple | |
| 346 // threads, and any async callstacks will need to hold the same lock, so | |
| 347 // give it to the command buffer and cache controller. | |
| 348 // We don't hold a lock here since there's no need, so set the lock very last | |
| 349 // to prevent asserts that we're not holding it. | |
| 350 if (support_locking_) { | |
| 351 command_buffer_->SetLock(&context_lock_); | |
| 352 cache_controller_->SetLock(&context_lock_); | |
| 353 } | |
| 354 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( | |
| 355 this, "ContextProviderCommandBuffer", std::move(task_runner)); | |
| 356 return true; | |
| 357 } | |
| 358 | |
| 359 void ContextProviderCommandBuffer::DetachFromThread() { | |
| 360 context_thread_checker_.DetachFromThread(); | |
| 361 } | |
| 362 | |
| 363 gpu::gles2::GLES2Interface* ContextProviderCommandBuffer::ContextGL() { | |
| 364 DCHECK(bind_succeeded_); | |
| 365 DCHECK(context_thread_checker_.CalledOnValidThread()); | |
| 366 | |
| 367 if (trace_impl_) | |
| 368 return trace_impl_.get(); | |
| 369 return gles2_impl_.get(); | |
| 370 } | |
| 371 | |
| 372 gpu::ContextSupport* ContextProviderCommandBuffer::ContextSupport() { | |
| 373 return gles2_impl_.get(); | |
| 374 } | |
| 375 | |
| 376 class GrContext* ContextProviderCommandBuffer::GrContext() { | |
| 377 DCHECK(bind_succeeded_); | |
| 378 DCHECK(context_thread_checker_.CalledOnValidThread()); | |
| 379 | |
| 380 if (gr_context_) | |
| 381 return gr_context_->get(); | |
| 382 | |
| 383 gr_context_.reset(new skia_bindings::GrContextForGLES2Interface(ContextGL())); | |
| 384 cache_controller_->SetGrContext(gr_context_->get()); | |
| 385 | |
| 386 // If GlContext is already lost, also abandon the new GrContext. | |
| 387 if (gr_context_->get() && | |
| 388 ContextGL()->GetGraphicsResetStatusKHR() != GL_NO_ERROR) | |
| 389 gr_context_->get()->abandonContext(); | |
| 390 | |
| 391 return gr_context_->get(); | |
| 392 } | |
| 393 | |
| 394 cc::ContextCacheController* ContextProviderCommandBuffer::CacheController() { | |
| 395 DCHECK(context_thread_checker_.CalledOnValidThread()); | |
| 396 return cache_controller_.get(); | |
| 397 } | |
| 398 | |
| 399 void ContextProviderCommandBuffer::InvalidateGrContext(uint32_t state) { | |
| 400 if (gr_context_) { | |
| 401 DCHECK(bind_succeeded_); | |
| 402 DCHECK(context_thread_checker_.CalledOnValidThread()); | |
| 403 gr_context_->ResetContext(state); | |
| 404 } | |
| 405 } | |
| 406 | |
| 407 void ContextProviderCommandBuffer::SetDefaultTaskRunner( | |
| 408 scoped_refptr<base::SingleThreadTaskRunner> default_task_runner) { | |
| 409 DCHECK(!bind_succeeded_); | |
| 410 default_task_runner_ = std::move(default_task_runner); | |
| 411 } | |
| 412 | |
| 413 base::Lock* ContextProviderCommandBuffer::GetLock() { | |
| 414 DCHECK(support_locking_); | |
| 415 return &context_lock_; | |
| 416 } | |
| 417 | |
| 418 gpu::Capabilities ContextProviderCommandBuffer::ContextCapabilities() { | |
| 419 DCHECK(bind_succeeded_); | |
| 420 DCHECK(context_thread_checker_.CalledOnValidThread()); | |
| 421 // Skips past the trace_impl_ as it doesn't have capabilities. | |
| 422 return gles2_impl_->capabilities(); | |
| 423 } | |
| 424 | |
| 425 void ContextProviderCommandBuffer::OnLostContext() { | |
| 426 DCHECK(context_thread_checker_.CalledOnValidThread()); | |
| 427 | |
| 428 if (!lost_context_callback_.is_null()) | |
| 429 lost_context_callback_.Run(); | |
| 430 if (gr_context_) | |
| 431 gr_context_->OnLostContext(); | |
| 432 | |
| 433 gpu::CommandBuffer::State state = GetCommandBufferProxy()->GetLastState(); | |
| 434 command_buffer_metrics::UmaRecordContextLost(context_type_, state.error, | |
| 435 state.context_lost_reason); | |
| 436 } | |
| 437 | |
| 438 void ContextProviderCommandBuffer::SetLostContextCallback( | |
| 439 const LostContextCallback& lost_context_callback) { | |
| 440 DCHECK(context_thread_checker_.CalledOnValidThread()); | |
| 441 DCHECK(lost_context_callback_.is_null() || | |
| 442 lost_context_callback.is_null()); | |
| 443 lost_context_callback_ = lost_context_callback; | |
| 444 } | |
| 445 | |
| 446 bool ContextProviderCommandBuffer::OnMemoryDump( | |
| 447 const base::trace_event::MemoryDumpArgs& args, | |
| 448 base::trace_event::ProcessMemoryDump* pmd) { | |
| 449 DCHECK(bind_succeeded_); | |
| 450 | |
| 451 base::Optional<base::AutoLock> hold; | |
| 452 if (support_locking_) | |
| 453 hold.emplace(context_lock_); | |
| 454 | |
| 455 gles2_impl_->OnMemoryDump(args, pmd); | |
| 456 gles2_helper_->OnMemoryDump(args, pmd); | |
| 457 | |
| 458 if (gr_context_) { | |
| 459 context_thread_checker_.DetachFromThread(); | |
| 460 SkiaGpuTraceMemoryDump trace_memory_dump( | |
| 461 pmd, gles2_impl_->ShareGroupTracingGUID()); | |
| 462 gr_context_->get()->dumpMemoryStatistics(&trace_memory_dump); | |
| 463 context_thread_checker_.DetachFromThread(); | |
| 464 } | |
| 465 return true; | |
| 466 } | |
| 467 | |
| 468 } // namespace content | |
| OLD | NEW |