Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(349)

Side by Side Diff: components/view_manager/surfaces/command_buffer_local.cc

Issue 1245683004: Mandoline: Merge Surfaces and Views apps (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Remove context_provider.mojom Created 5 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "components/view_manager/surfaces/command_buffer_local.h"
6
7 #include "base/bind.h"
8 #include "gpu/command_buffer/service/context_group.h"
9 #include "gpu/command_buffer/service/gpu_scheduler.h"
10 #include "gpu/command_buffer/service/image_manager.h"
11 #include "gpu/command_buffer/service/memory_tracking.h"
12 #include "gpu/command_buffer/service/shader_translator_cache.h"
13 #include "gpu/command_buffer/service/valuebuffer_manager.h"
14 #include "ui/gfx/vsync_provider.h"
15 #include "ui/gl/gl_bindings.h"
16 #include "ui/gl/gl_context.h"
17 #include "ui/gl/gl_image_ref_counted_memory.h"
18 #include "ui/gl/gl_surface.h"
19
20 namespace {
21
22 size_t NumberOfPlanesForGpuMemoryBufferFormat(
23 gfx::GpuMemoryBuffer::Format format) {
24 switch (format) {
25 case gfx::GpuMemoryBuffer::ATC:
26 case gfx::GpuMemoryBuffer::ATCIA:
27 case gfx::GpuMemoryBuffer::DXT1:
28 case gfx::GpuMemoryBuffer::DXT5:
29 case gfx::GpuMemoryBuffer::ETC1:
30 case gfx::GpuMemoryBuffer::R_8:
31 case gfx::GpuMemoryBuffer::RGBA_4444:
32 case gfx::GpuMemoryBuffer::RGBA_8888:
33 case gfx::GpuMemoryBuffer::RGBX_8888:
34 case gfx::GpuMemoryBuffer::BGRA_8888:
35 return 1;
36 case gfx::GpuMemoryBuffer::YUV_420:
37 return 3;
38 }
39 NOTREACHED();
40 return 0;
41 }
42
43 size_t SubsamplingFactor(gfx::GpuMemoryBuffer::Format format, int plane) {
44 switch (format) {
45 case gfx::GpuMemoryBuffer::ATC:
46 case gfx::GpuMemoryBuffer::ATCIA:
47 case gfx::GpuMemoryBuffer::DXT1:
48 case gfx::GpuMemoryBuffer::DXT5:
49 case gfx::GpuMemoryBuffer::ETC1:
50 case gfx::GpuMemoryBuffer::R_8:
51 case gfx::GpuMemoryBuffer::RGBA_4444:
52 case gfx::GpuMemoryBuffer::RGBA_8888:
53 case gfx::GpuMemoryBuffer::RGBX_8888:
54 case gfx::GpuMemoryBuffer::BGRA_8888:
55 return 1;
56 case gfx::GpuMemoryBuffer::YUV_420: {
57 static size_t factor[] = {1, 2, 2};
58 DCHECK_LT(static_cast<size_t>(plane), arraysize(factor));
59 return factor[plane];
60 }
61 }
62 NOTREACHED();
63 return 0;
64 }
65
66 size_t StrideInBytes(size_t width,
67 gfx::GpuMemoryBuffer::Format format,
68 int plane) {
69 switch (format) {
70 case gfx::GpuMemoryBuffer::ATCIA:
71 case gfx::GpuMemoryBuffer::DXT5:
72 DCHECK_EQ(plane, 0);
73 return width;
74 case gfx::GpuMemoryBuffer::ATC:
75 case gfx::GpuMemoryBuffer::DXT1:
76 case gfx::GpuMemoryBuffer::ETC1:
77 DCHECK_EQ(plane, 0);
78 DCHECK_EQ(width % 2, 0U);
79 return width / 2;
80 case gfx::GpuMemoryBuffer::R_8:
81 return (width + 3) & ~0x3;
82 case gfx::GpuMemoryBuffer::RGBA_4444:
83 DCHECK_EQ(plane, 0);
84 return width * 2;
85 case gfx::GpuMemoryBuffer::RGBA_8888:
86 case gfx::GpuMemoryBuffer::BGRA_8888:
87 DCHECK_EQ(plane, 0);
88 return width * 4;
89 case gfx::GpuMemoryBuffer::RGBX_8888:
90 NOTREACHED();
91 return 0;
92 case gfx::GpuMemoryBuffer::YUV_420:
93 return width / SubsamplingFactor(format, plane);
94 }
95
96 NOTREACHED();
97 return 0;
98 }
99
100 class MemoryTrackerStub : public gpu::gles2::MemoryTracker {
101 public:
102 MemoryTrackerStub() {}
103
104 void TrackMemoryAllocatedChange(
105 size_t old_size,
106 size_t new_size,
107 gpu::gles2::MemoryTracker::Pool pool) override {}
108
109 bool EnsureGPUMemoryAvailable(size_t size_needed) override { return true; };
rjkroege 2015/08/06 00:16:21 The bug that I agreed to look into where running f
Fady Samuel 2015/08/06 16:48:23 That's probably the issue. CommandBufferDriver has
rjkroege 2015/08/06 22:09:02 yes.
110
111 private:
112 ~MemoryTrackerStub() override {}
113
114 DISALLOW_COPY_AND_ASSIGN(MemoryTrackerStub);
115 };
116
117 size_t BufferSizeInBytes(const gfx::Size& size,
118 gfx::GpuMemoryBuffer::Format format) {
119 size_t size_in_bytes = 0;
120 size_t num_planes = NumberOfPlanesForGpuMemoryBufferFormat(format);
121 for (size_t i = 0; i < num_planes; ++i) {
122 size_in_bytes += StrideInBytes(size.width(), format, i) *
123 (size.height() / SubsamplingFactor(format, i));
124 }
125 return size_in_bytes;
126 }
127
128 class GpuMemoryBufferImpl : public gfx::GpuMemoryBuffer {
129 public:
130 GpuMemoryBufferImpl(base::RefCountedBytes* bytes,
131 const gfx::Size& size,
132 gfx::GpuMemoryBuffer::Format format)
133 : bytes_(bytes), size_(size), format_(format), mapped_(false) {}
134
135 static GpuMemoryBufferImpl* FromClientBuffer(ClientBuffer buffer) {
136 return reinterpret_cast<GpuMemoryBufferImpl*>(buffer);
137 }
138
139 // Overridden from gfx::GpuMemoryBuffer:
140 bool Map(void** data) override {
141 size_t offset = 0;
142 size_t num_planes = NumberOfPlanesForGpuMemoryBufferFormat(format_);
143 for (size_t i = 0; i < num_planes; ++i) {
144 data[i] = reinterpret_cast<uint8*>(&bytes_->data().front()) + offset;
145 offset += StrideInBytes(size_.width(), format_, i) *
146 (size_.height() / SubsamplingFactor(format_, i));
147 }
148 mapped_ = true;
149 return true;
150 }
151 void Unmap() override { mapped_ = false; }
152 bool IsMapped() const override { return mapped_; }
153 Format GetFormat() const override { return format_; }
154 gfx::GpuMemoryBufferId GetId() const override {
155 return 0;
156 }
157 void GetStride(int* stride) const override {
158 size_t num_planes = NumberOfPlanesForGpuMemoryBufferFormat(format_);
159 for (size_t i = 0; i < num_planes; ++i)
160 stride[i] = StrideInBytes(size_.width(), format_, i);
161 }
162 gfx::GpuMemoryBufferHandle GetHandle() const override {
163 NOTREACHED();
164 return gfx::GpuMemoryBufferHandle();
165 }
166 ClientBuffer AsClientBuffer() override {
167 return reinterpret_cast<ClientBuffer>(this);
168 }
169
170 base::RefCountedBytes* bytes() { return bytes_.get(); }
171
172 private:
173 scoped_refptr<base::RefCountedBytes> bytes_;
174 const gfx::Size size_;
175 gfx::GpuMemoryBuffer::Format format_;
176 bool mapped_;
177 };
178
179 } // anonymous namespace
180
181 namespace surfaces {
182
183 CommandBufferLocal::CommandBufferLocal(
184 Client* client,
185 gfx::AcceleratedWidget widget,
186 const scoped_refptr<gles2::GpuState>& gpu_state)
187 : widget_(widget),
188 gpu_state_(gpu_state),
189 client_(client),
190 weak_factory_(this) {
191 }
192
193 CommandBufferLocal::~CommandBufferLocal() {
194 command_buffer_.reset();
195 if (decoder_.get()) {
196 bool have_context = decoder_->GetGLContext()->MakeCurrent(surface_.get());
197 decoder_->Destroy(have_context);
198 decoder_.reset();
199 }
200 }
201
202 // static
203 scoped_ptr<gfx::GpuMemoryBuffer> CommandBufferLocal::CreateGpuMemoryBuffer(
204 const gfx::Size& size,
205 gfx::GpuMemoryBuffer::Format format) {
206 std::vector<unsigned char> data(BufferSizeInBytes(size, format), 0);
207 scoped_refptr<base::RefCountedBytes> bytes(new base::RefCountedBytes(data));
208 return make_scoped_ptr<gfx::GpuMemoryBuffer>(
209 new GpuMemoryBufferImpl(bytes.get(), size, format));
210 }
211
212 bool CommandBufferLocal::Initialize() {
213 if (widget_ == gfx::kNullAcceleratedWidget)
214 surface_ = gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size(1, 1));
215 else {
216 surface_ = gfx::GLSurface::CreateViewGLSurface(widget_);
217 if (auto vsync_provider = surface_->GetVSyncProvider()) {
218 vsync_provider->GetVSyncParameters(
219 base::Bind(&CommandBufferLocal::OnUpdateVSyncParameters,
220 weak_factory_.GetWeakPtr()));
221 }
222 }
223
224 if (!surface_.get())
225 return false;
226
227 // TODO(piman): virtual contexts, gpu preference.
228 context_ = gfx::GLContext::CreateGLContext(
229 gpu_state_->share_group(), surface_.get(), gfx::PreferIntegratedGpu);
230 if (!context_.get())
231 return false;
232
233 if (!context_->MakeCurrent(surface_.get()))
234 return false;
235
236 // TODO(piman): ShaderTranslatorCache is currently per-ContextGroup but
237 // only needs to be per-thread.
238 bool bind_generates_resource = false;
239 scoped_refptr<gpu::gles2::ContextGroup> context_group =
240 new gpu::gles2::ContextGroup(
241 gpu_state_->mailbox_manager(), new MemoryTrackerStub,
242 new gpu::gles2::ShaderTranslatorCache, nullptr, nullptr, nullptr,
243 bind_generates_resource);
244
245 command_buffer_.reset(
246 new gpu::CommandBufferService(context_group->transfer_buffer_manager()));
247 bool result = command_buffer_->Initialize();
248 DCHECK(result);
249
250 decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group.get()));
251 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(), decoder_.get(),
252 decoder_.get()));
253 decoder_->set_engine(scheduler_.get());
254 decoder_->SetResizeCallback(
255 base::Bind(&CommandBufferLocal::OnResize, base::Unretained(this)));
256 decoder_->SetWaitSyncPointCallback(base::Bind(
257 &CommandBufferLocal::OnWaitSyncPoint, base::Unretained(this)));
258
259 gpu::gles2::DisallowedFeatures disallowed_features;
260
261 // TODO(piman): attributes.
262 std::vector<int32> attrib_vector;
263 if (!decoder_->Initialize(surface_, context_, false /* offscreen */,
264 gfx::Size(1, 1), disallowed_features,
265 attrib_vector))
266 return false;
267
268 command_buffer_->SetPutOffsetChangeCallback(base::Bind(
269 &CommandBufferLocal::PumpCommands, base::Unretained(this)));
270 command_buffer_->SetGetBufferChangeCallback(base::Bind(
271 &gpu::GpuScheduler::SetGetBuffer, base::Unretained(scheduler_.get())));
272 command_buffer_->SetParseErrorCallback(
273 base::Bind(&CommandBufferLocal::OnParseError, base::Unretained(this)));
274 return true;
275 }
276
277 /******************************************************************************/
278 // gpu::GpuControl:
279 /******************************************************************************/
280
281 gpu::Capabilities CommandBufferLocal::GetCapabilities() {
282 return decoder_->GetCapabilities();
283 }
284
285 int32_t CommandBufferLocal::CreateImage(ClientBuffer buffer,
286 size_t width,
287 size_t height,
288 unsigned internalformat) {
289 GpuMemoryBufferImpl* gpu_memory_buffer =
290 GpuMemoryBufferImpl::FromClientBuffer(buffer);
291
292 scoped_refptr<gfx::GLImageRefCountedMemory> image(
293 new gfx::GLImageRefCountedMemory(gfx::Size(width, height),
294 internalformat));
295 if (!image->Initialize(gpu_memory_buffer->bytes(),
296 gpu_memory_buffer->GetFormat())) {
297 return -1;
298 }
299
300 static int32 next_id = 1;
301 int32 new_id = next_id++;
302
303 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
304 DCHECK(image_manager);
305 image_manager->AddImage(image.get(), new_id);
306 return new_id;
307 }
308
309 void CommandBufferLocal::DestroyImage(int32 id) {
310 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager();
311 DCHECK(image_manager);
312 image_manager->RemoveImage(id);
313 }
314
315 int32_t CommandBufferLocal::CreateGpuMemoryBufferImage(
316 size_t width,
317 size_t height,
318 unsigned internalformat,
319 unsigned usage) {
320 DCHECK_EQ(usage, static_cast<unsigned>(GL_MAP_CHROMIUM));
321 scoped_ptr<gfx::GpuMemoryBuffer> buffer = CreateGpuMemoryBuffer(
322 gfx::Size(width, height), gfx::GpuMemoryBuffer::RGBA_8888);
323 return CreateImage(buffer->AsClientBuffer(), width, height, internalformat);
324 }
325
326 uint32_t CommandBufferLocal::InsertSyncPoint() {
327 return 0;
328 }
329
330 uint32_t CommandBufferLocal::InsertFutureSyncPoint() {
331 NOTIMPLEMENTED();
332 return 0;
333 }
334
335 void CommandBufferLocal::RetireSyncPoint(uint32_t sync_point) {
336 NOTIMPLEMENTED();
337 }
338
339 void CommandBufferLocal::SignalSyncPoint(uint32_t sync_point,
340 const base::Closure& callback) {
341 }
342
343 void CommandBufferLocal::SignalQuery(uint32_t query,
344 const base::Closure& callback) {
345 // TODO(piman)
346 NOTIMPLEMENTED();
347 }
348
349 void CommandBufferLocal::SetSurfaceVisible(bool visible) {
350 // TODO(piman)
351 NOTIMPLEMENTED();
352 }
353
354 uint32_t CommandBufferLocal::CreateStreamTexture(uint32_t texture_id) {
355 // TODO(piman)
356 NOTIMPLEMENTED();
357 return 0;
358 }
359
360 void CommandBufferLocal::SetLock(base::Lock* lock) {
361 NOTIMPLEMENTED();
362 }
363
364 bool CommandBufferLocal::IsGpuChannelLost() {
365 // This is only possible for out-of-process command buffers.
366 return false;
367 }
368
369 void CommandBufferLocal::PumpCommands() {
370 if (!decoder_->MakeCurrent()) {
371 command_buffer_->SetContextLostReason(decoder_->GetContextLostReason());
372 command_buffer_->SetParseError(::gpu::error::kLostContext);
373 return;
374 }
375 scheduler_->PutChanged();
376 }
377
378 void CommandBufferLocal::OnResize(gfx::Size size, float scale_factor) {
379 surface_->Resize(size);
380 }
381
382 void CommandBufferLocal::OnUpdateVSyncParameters(
383 const base::TimeTicks timebase,
384 const base::TimeDelta interval) {
385 if (client_)
386 client_->UpdateVSyncParameters(timebase.ToInternalValue(),
387 interval.ToInternalValue());
388
389 }
390
391 bool CommandBufferLocal::OnWaitSyncPoint(uint32_t sync_point) {
392 if (!sync_point)
393 return true;
394 if (gpu_state_->sync_point_manager()->IsSyncPointRetired(sync_point))
395 return true;
396 scheduler_->SetScheduled(false);
397 gpu_state_->sync_point_manager()->AddSyncPointCallback(
398 sync_point, base::Bind(&CommandBufferLocal::OnSyncPointRetired,
399 weak_factory_.GetWeakPtr()));
400 return scheduler_->IsScheduled();
401 }
402
403 void CommandBufferLocal::OnParseError() {
404 gpu::CommandBuffer::State state = command_buffer_->GetLastState();
405 OnContextLost(state.context_lost_reason);
406 }
407
408 void CommandBufferLocal::OnContextLost(uint32_t reason) {
409 if (client_)
410 client_->DidLoseContext();
411 }
412
413 void CommandBufferLocal::OnSyncPointRetired() {
414 scheduler_->SetScheduled(true);
415 }
416
417 } // namespace surfaces
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698