Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(137)

Side by Side Diff: content/common/gpu/client/context_provider_command_buffer.cc

Issue 2408513002: Move memory observer off OutputSurface/CompositorFrameSink (Closed)
Patch Set: Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/client/context_provider_command_buffer.h" 5 #include "content/common/gpu/client/context_provider_command_buffer.h"
6 6
7 #include <stddef.h> 7 #include <stddef.h>
8 8
9 #include <memory> 9 #include <memory>
10 #include <set> 10 #include <set>
11 #include <utility> 11 #include <utility>
12 #include <vector> 12 #include <vector>
13 13
14 #include "base/callback_helpers.h" 14 #include "base/callback_helpers.h"
15 #include "base/command_line.h" 15 #include "base/command_line.h"
16 #include "base/optional.h"
16 #include "base/strings/stringprintf.h" 17 #include "base/strings/stringprintf.h"
17 #include "base/threading/thread_task_runner_handle.h" 18 #include "base/threading/thread_task_runner_handle.h"
19 #include "base/trace_event/memory_dump_manager.h"
18 #include "cc/output/context_cache_controller.h" 20 #include "cc/output/context_cache_controller.h"
19 #include "cc/output/managed_memory_policy.h" 21 #include "cc/output/managed_memory_policy.h"
20 #include "content/common/gpu/client/command_buffer_metrics.h" 22 #include "content/common/gpu/client/command_buffer_metrics.h"
21 #include "gpu/command_buffer/client/gles2_cmd_helper.h" 23 #include "gpu/command_buffer/client/gles2_cmd_helper.h"
22 #include "gpu/command_buffer/client/gles2_implementation.h" 24 #include "gpu/command_buffer/client/gles2_implementation.h"
23 #include "gpu/command_buffer/client/gles2_trace_implementation.h" 25 #include "gpu/command_buffer/client/gles2_trace_implementation.h"
24 #include "gpu/command_buffer/client/gpu_switches.h" 26 #include "gpu/command_buffer/client/gpu_switches.h"
25 #include "gpu/command_buffer/client/transfer_buffer.h" 27 #include "gpu/command_buffer/client/transfer_buffer.h"
26 #include "gpu/command_buffer/common/constants.h" 28 #include "gpu/command_buffer/common/constants.h"
27 #include "gpu/ipc/client/command_buffer_proxy_impl.h" 29 #include "gpu/ipc/client/command_buffer_proxy_impl.h"
28 #include "gpu/ipc/client/gpu_channel_host.h" 30 #include "gpu/ipc/client/gpu_channel_host.h"
29 #include "gpu/skia_bindings/grcontext_for_gles2_interface.h" 31 #include "gpu/skia_bindings/grcontext_for_gles2_interface.h"
32 #include "third_party/skia/include/core/SkTraceMemoryDump.h"
30 #include "third_party/skia/include/gpu/GrContext.h" 33 #include "third_party/skia/include/gpu/GrContext.h"
34 #include "ui/gl/trace_util.h"
35
36 class SkDiscardableMemory;
31 37
32 namespace { 38 namespace {
33 39
34 // Similar to base::AutoReset but it sets the variable to the new value 40 // Similar to base::AutoReset but it sets the variable to the new value
35 // when it is destroyed. Use Reset() to cancel setting the variable. 41 // when it is destroyed. Use Reset() to cancel setting the variable.
36 class AutoSet { 42 class AutoSet {
37 public: 43 public:
38 AutoSet(bool* b, bool set) : b_(b), set_(set) {} 44 AutoSet(bool* b, bool set) : b_(b), set_(set) {}
39 ~AutoSet() { 45 ~AutoSet() {
40 if (b_) 46 if (b_)
41 *b_ = set_; 47 *b_ = set_;
42 } 48 }
43 // Stops us from setting b_ on destruction. 49 // Stops us from setting b_ on destruction.
44 void Reset() { b_ = nullptr; } 50 void Reset() { b_ = nullptr; }
45 51
46 private: 52 private:
47 bool* b_; 53 bool* b_;
48 const bool set_; 54 const bool set_;
49 }; 55 };
50 } 56
57 // Derives from SkTraceMemoryDump and implements graphics specific memory
58 // backing functionality.
59 class SkiaGpuTraceMemoryDump : public SkTraceMemoryDump {
60 public:
61 // This should never outlive the provided ProcessMemoryDump, as it should
62 // always be scoped to a single OnMemoryDump funciton call.
63 explicit SkiaGpuTraceMemoryDump(base::trace_event::ProcessMemoryDump* pmd,
ericrk 2016/10/07 23:50:14 nit: pre-existing, but can you drop the "explicit"
64 uint64_t share_group_tracing_guid)
65 : pmd_(pmd), share_group_tracing_guid_(share_group_tracing_guid) {}
66
67 // Overridden from SkTraceMemoryDump:
68 void dumpNumericValue(const char* dump_name,
69 const char* value_name,
70 const char* units,
71 uint64_t value) override {
72 auto* dump = GetOrCreateAllocatorDump(dump_name);
73 dump->AddScalar(value_name, units, value);
74 }
75
76 void setMemoryBacking(const char* dump_name,
77 const char* backing_type,
78 const char* backing_object_id) override {
79 const uint64_t tracing_process_id =
80 base::trace_event::MemoryDumpManager::GetInstance()
81 ->GetTracingProcessId();
82
83 // For uniformity, skia provides this value as a string. Convert back to a
84 // uint32_t.
85 uint32_t gl_id =
86 std::strtoul(backing_object_id, nullptr /* str_end */, 10 /* base */);
87
88 // Constants used by SkiaGpuTraceMemoryDump to identify different memory
89 // types.
90 const char* kGLTextureBackingType = "gl_texture";
91 const char* kGLBufferBackingType = "gl_buffer";
92 const char* kGLRenderbufferBackingType = "gl_renderbuffer";
93
94 // Populated in if statements below.
95 base::trace_event::MemoryAllocatorDumpGuid guid;
96
97 if (strcmp(backing_type, kGLTextureBackingType) == 0) {
98 guid = gl::GetGLTextureClientGUIDForTracing(share_group_tracing_guid_,
99 gl_id);
100 } else if (strcmp(backing_type, kGLBufferBackingType) == 0) {
101 guid = gl::GetGLBufferGUIDForTracing(tracing_process_id, gl_id);
102 } else if (strcmp(backing_type, kGLRenderbufferBackingType) == 0) {
103 guid = gl::GetGLRenderbufferGUIDForTracing(tracing_process_id, gl_id);
104 }
105
106 if (!guid.empty()) {
107 pmd_->CreateSharedGlobalAllocatorDump(guid);
108
109 auto* dump = GetOrCreateAllocatorDump(dump_name);
110
111 const int kImportance = 2;
112 pmd_->AddOwnershipEdge(dump->guid(), guid, kImportance);
113 }
114 }
115
116 void setDiscardableMemoryBacking(
117 const char* dump_name,
118 const SkDiscardableMemory& discardable_memory_object) override {
119 // We don't use this class for dumping discardable memory.
120 NOTREACHED();
121 }
122
123 LevelOfDetail getRequestedDetails() const override {
124 // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
125 // (crbug.com/499731).
126 return kObjectsBreakdowns_LevelOfDetail;
127 }
128
129 private:
130 // Helper to create allocator dumps.
131 base::trace_event::MemoryAllocatorDump* GetOrCreateAllocatorDump(
132 const char* dump_name) {
133 auto* dump = pmd_->GetAllocatorDump(dump_name);
134 if (!dump)
135 dump = pmd_->CreateAllocatorDump(dump_name);
136 return dump;
137 }
138
139 base::trace_event::ProcessMemoryDump* pmd_;
140 uint64_t share_group_tracing_guid_;
141
142 DISALLOW_COPY_AND_ASSIGN(SkiaGpuTraceMemoryDump);
143 };
144
145 } // namespace
51 146
52 namespace content { 147 namespace content {
53 148
54 ContextProviderCommandBuffer::SharedProviders::SharedProviders() = default; 149 ContextProviderCommandBuffer::SharedProviders::SharedProviders() = default;
55 ContextProviderCommandBuffer::SharedProviders::~SharedProviders() = default; 150 ContextProviderCommandBuffer::SharedProviders::~SharedProviders() = default;
56 151
57 ContextProviderCommandBuffer::ContextProviderCommandBuffer( 152 ContextProviderCommandBuffer::ContextProviderCommandBuffer(
58 scoped_refptr<gpu::GpuChannelHost> channel, 153 scoped_refptr<gpu::GpuChannelHost> channel,
59 int32_t stream_id, 154 int32_t stream_id,
60 gpu::GpuStreamPriority stream_priority, 155 gpu::GpuStreamPriority stream_priority,
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
95 if (it != shared_providers_->list.end()) 190 if (it != shared_providers_->list.end())
96 shared_providers_->list.erase(it); 191 shared_providers_->list.erase(it);
97 } 192 }
98 193
99 if (bind_succeeded_) { 194 if (bind_succeeded_) {
100 // Clear the lock to avoid DCHECKs that the lock is being held during 195 // Clear the lock to avoid DCHECKs that the lock is being held during
101 // shutdown. 196 // shutdown.
102 command_buffer_->SetLock(nullptr); 197 command_buffer_->SetLock(nullptr);
103 // Disconnect lost callbacks during destruction. 198 // Disconnect lost callbacks during destruction.
104 gles2_impl_->SetLostContextCallback(base::Closure()); 199 gles2_impl_->SetLostContextCallback(base::Closure());
200 // Unregister memory dump provider.
201 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
202 this);
105 } 203 }
106 } 204 }
107 205
108 gpu::CommandBufferProxyImpl* 206 gpu::CommandBufferProxyImpl*
109 ContextProviderCommandBuffer::GetCommandBufferProxy() { 207 ContextProviderCommandBuffer::GetCommandBufferProxy() {
110 return command_buffer_.get(); 208 return command_buffer_.get();
111 } 209 }
112 210
113 uint32_t ContextProviderCommandBuffer::GetCopyTextureInternalFormat() { 211 uint32_t ContextProviderCommandBuffer::GetCopyTextureInternalFormat() {
114 if (attributes_.alpha_size > 0) 212 if (attributes_.alpha_size > 0)
115 return GL_RGBA; 213 return GL_RGBA;
116 DCHECK_NE(attributes_.red_size, 0); 214 DCHECK_NE(attributes_.red_size, 0);
117 DCHECK_NE(attributes_.green_size, 0); 215 DCHECK_NE(attributes_.green_size, 0);
118 DCHECK_NE(attributes_.blue_size, 0); 216 DCHECK_NE(attributes_.blue_size, 0);
119 return GL_RGB; 217 return GL_RGB;
120 } 218 }
121 219
122 bool ContextProviderCommandBuffer::BindToCurrentThread() { 220 bool ContextProviderCommandBuffer::BindToCurrentThread() {
123 // This is called on the thread the context will be used. 221 // This is called on the thread the context will be used.
124 DCHECK(context_thread_checker_.CalledOnValidThread()); 222 DCHECK(context_thread_checker_.CalledOnValidThread());
125 223
126 if (bind_failed_) 224 if (bind_failed_)
127 return false; 225 return false;
128 if (bind_succeeded_) 226 if (bind_succeeded_)
129 return true; 227 return true;
130 228
131 // Early outs should report failure. 229 // Early outs should report failure.
132 AutoSet set_bind_failed(&bind_failed_, true); 230 AutoSet set_bind_failed(&bind_failed_, true);
133 231
232 scoped_refptr<base::SingleThreadTaskRunner> task_runner =
233 default_task_runner_;
234 if (!task_runner)
235 task_runner = base::ThreadTaskRunnerHandle::Get();
236
134 // It's possible to be running BindToCurrentThread on two contexts 237 // It's possible to be running BindToCurrentThread on two contexts
135 // on different threads at the same time, but which will be in the same share 238 // on different threads at the same time, but which will be in the same share
136 // group. To ensure they end up in the same group, hold the lock on the 239 // group. To ensure they end up in the same group, hold the lock on the
137 // shared_providers_ (which they will share) after querying the group, until 240 // shared_providers_ (which they will share) after querying the group, until
138 // this context has been added to the list. 241 // this context has been added to the list.
139 { 242 {
140 ContextProviderCommandBuffer* shared_context_provider = nullptr; 243 ContextProviderCommandBuffer* shared_context_provider = nullptr;
141 gpu::CommandBufferProxyImpl* shared_command_buffer = nullptr; 244 gpu::CommandBufferProxyImpl* shared_command_buffer = nullptr;
142 scoped_refptr<gpu::gles2::ShareGroup> share_group; 245 scoped_refptr<gpu::gles2::ShareGroup> share_group;
143 246
144 base::AutoLock hold(shared_providers_->lock); 247 base::AutoLock hold(shared_providers_->lock);
145 248
146 if (!shared_providers_->list.empty()) { 249 if (!shared_providers_->list.empty()) {
147 shared_context_provider = shared_providers_->list.front(); 250 shared_context_provider = shared_providers_->list.front();
148 shared_command_buffer = shared_context_provider->command_buffer_.get(); 251 shared_command_buffer = shared_context_provider->command_buffer_.get();
149 share_group = shared_context_provider->gles2_impl_->share_group(); 252 share_group = shared_context_provider->gles2_impl_->share_group();
150 DCHECK_EQ(!!shared_command_buffer, !!share_group); 253 DCHECK_EQ(!!shared_command_buffer, !!share_group);
151 } 254 }
152 255
153 // This command buffer is a client-side proxy to the command buffer in the 256 // This command buffer is a client-side proxy to the command buffer in the
154 // GPU process. 257 // GPU process.
155 scoped_refptr<base::SingleThreadTaskRunner> task_runner =
156 default_task_runner_;
157 if (!task_runner)
158 task_runner = base::ThreadTaskRunnerHandle::Get();
159 command_buffer_ = gpu::CommandBufferProxyImpl::Create( 258 command_buffer_ = gpu::CommandBufferProxyImpl::Create(
160 std::move(channel_), surface_handle_, shared_command_buffer, stream_id_, 259 std::move(channel_), surface_handle_, shared_command_buffer, stream_id_,
161 stream_priority_, attributes_, active_url_, task_runner); 260 stream_priority_, attributes_, active_url_, task_runner);
162 if (!command_buffer_) { 261 if (!command_buffer_) {
163 DLOG(ERROR) << "GpuChannelHost failed to create command buffer."; 262 DLOG(ERROR) << "GpuChannelHost failed to create command buffer.";
164 command_buffer_metrics::UmaRecordContextInitFailed(context_type_); 263 command_buffer_metrics::UmaRecordContextInitFailed(context_type_);
165 return false; 264 return false;
166 } 265 }
167 266
168 // The GLES2 helper writes the command buffer protocol. 267 // The GLES2 helper writes the command buffer protocol.
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
210 // share group and if a shared context is lost, our context will be informed 309 // share group and if a shared context is lost, our context will be informed
211 // also, and the lost context callback will occur for the owner of the 310 // also, and the lost context callback will occur for the owner of the
212 // context provider. If we check sooner, the shared context may be lost in 311 // context provider. If we check sooner, the shared context may be lost in
213 // between these two states and our context here would be left in an orphan 312 // between these two states and our context here would be left in an orphan
214 // share group. 313 // share group.
215 if (share_group && share_group->IsLost()) 314 if (share_group && share_group->IsLost())
216 return false; 315 return false;
217 316
218 shared_providers_->list.push_back(this); 317 shared_providers_->list.push_back(this);
219 318
220 cache_controller_.reset(new cc::ContextCacheController( 319 cache_controller_.reset(
221 gles2_impl_.get(), std::move(task_runner))); 320 new cc::ContextCacheController(gles2_impl_.get(), task_runner));
222 } 321 }
223 set_bind_failed.Reset(); 322 set_bind_failed.Reset();
224 bind_succeeded_ = true; 323 bind_succeeded_ = true;
225 324
226 gles2_impl_->SetLostContextCallback( 325 gles2_impl_->SetLostContextCallback(
227 base::Bind(&ContextProviderCommandBuffer::OnLostContext, 326 base::Bind(&ContextProviderCommandBuffer::OnLostContext,
228 // |this| owns the GLES2Implementation which holds the 327 // |this| owns the GLES2Implementation which holds the
229 // callback. 328 // callback.
230 base::Unretained(this))); 329 base::Unretained(this)));
231 330
(...skipping 13 matching lines...) Expand all
245 ContextGL()->TraceBeginCHROMIUM("gpu_toplevel", unique_context_name.c_str()); 344 ContextGL()->TraceBeginCHROMIUM("gpu_toplevel", unique_context_name.c_str());
246 // If support_locking_ is true, the context may be used from multiple 345 // If support_locking_ is true, the context may be used from multiple
247 // threads, and any async callstacks will need to hold the same lock, so 346 // threads, and any async callstacks will need to hold the same lock, so
248 // give it to the command buffer and cache controller. 347 // give it to the command buffer and cache controller.
249 // We don't hold a lock here since there's no need, so set the lock very last 348 // We don't hold a lock here since there's no need, so set the lock very last
250 // to prevent asserts that we're not holding it. 349 // to prevent asserts that we're not holding it.
251 if (support_locking_) { 350 if (support_locking_) {
252 command_buffer_->SetLock(&context_lock_); 351 command_buffer_->SetLock(&context_lock_);
253 cache_controller_->SetLock(&context_lock_); 352 cache_controller_->SetLock(&context_lock_);
254 } 353 }
354 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
355 this, "ContextProviderCommandBuffer", std::move(task_runner));
255 return true; 356 return true;
256 } 357 }
257 358
258 void ContextProviderCommandBuffer::DetachFromThread() { 359 void ContextProviderCommandBuffer::DetachFromThread() {
259 context_thread_checker_.DetachFromThread(); 360 context_thread_checker_.DetachFromThread();
260 } 361 }
261 362
262 gpu::gles2::GLES2Interface* ContextProviderCommandBuffer::ContextGL() { 363 gpu::gles2::GLES2Interface* ContextProviderCommandBuffer::ContextGL() {
263 DCHECK(bind_succeeded_); 364 DCHECK(bind_succeeded_);
264 DCHECK(context_thread_checker_.CalledOnValidThread()); 365 DCHECK(context_thread_checker_.CalledOnValidThread());
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
335 } 436 }
336 437
337 void ContextProviderCommandBuffer::SetLostContextCallback( 438 void ContextProviderCommandBuffer::SetLostContextCallback(
338 const LostContextCallback& lost_context_callback) { 439 const LostContextCallback& lost_context_callback) {
339 DCHECK(context_thread_checker_.CalledOnValidThread()); 440 DCHECK(context_thread_checker_.CalledOnValidThread());
340 DCHECK(lost_context_callback_.is_null() || 441 DCHECK(lost_context_callback_.is_null() ||
341 lost_context_callback.is_null()); 442 lost_context_callback.is_null());
342 lost_context_callback_ = lost_context_callback; 443 lost_context_callback_ = lost_context_callback;
343 } 444 }
344 445
446 bool ContextProviderCommandBuffer::OnMemoryDump(
447 const base::trace_event::MemoryDumpArgs& args,
448 base::trace_event::ProcessMemoryDump* pmd) {
449 if (!bind_succeeded_)
ericrk 2016/10/07 23:50:14 per our discussion, this isn't needed (you only re
danakj 2016/10/08 00:03:03 Done.
450 return false;
451 if (!gr_context_)
452 return false;
453
454 base::Optional<base::AutoLock> hold;
ericrk 2016/10/07 23:50:14 you may want to detach from thread here and at the
danakj 2016/10/08 00:03:03 Done.
455 if (support_locking_)
456 hold.emplace(context_lock_);
457
458 SkiaGpuTraceMemoryDump trace_memory_dump(
459 pmd, gles2_impl_->ShareGroupTracingGUID());
460 gr_context_->get()->dumpMemoryStatistics(&trace_memory_dump);
461 return true;
462 }
463
345 } // namespace content 464 } // namespace content
OLDNEW
« cc/output/output_surface.cc ('K') | « content/common/gpu/client/context_provider_command_buffer.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698