Index: components/view_manager/surfaces/command_buffer_local.cc |
diff --git a/components/view_manager/surfaces/command_buffer_local.cc b/components/view_manager/surfaces/command_buffer_local.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..cc2a0cac7e8251c65dd714920c0de425dd1e7e9f |
--- /dev/null |
+++ b/components/view_manager/surfaces/command_buffer_local.cc |
@@ -0,0 +1,417 @@ |
+// Copyright 2015 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "components/view_manager/surfaces/command_buffer_local.h" |
+ |
+#include "base/bind.h" |
+#include "gpu/command_buffer/service/context_group.h" |
+#include "gpu/command_buffer/service/gpu_scheduler.h" |
+#include "gpu/command_buffer/service/image_manager.h" |
+#include "gpu/command_buffer/service/memory_tracking.h" |
+#include "gpu/command_buffer/service/shader_translator_cache.h" |
+#include "gpu/command_buffer/service/valuebuffer_manager.h" |
+#include "ui/gfx/vsync_provider.h" |
+#include "ui/gl/gl_bindings.h" |
+#include "ui/gl/gl_context.h" |
+#include "ui/gl/gl_image_ref_counted_memory.h" |
+#include "ui/gl/gl_surface.h" |
+ |
+namespace { |
+ |
+size_t NumberOfPlanesForGpuMemoryBufferFormat( |
+ gfx::BufferFormat format) { |
+ switch (format) { |
+ case gfx::BufferFormat::ATC: |
+ case gfx::BufferFormat::ATCIA: |
+ case gfx::BufferFormat::DXT1: |
+ case gfx::BufferFormat::DXT5: |
+ case gfx::BufferFormat::ETC1: |
+ case gfx::BufferFormat::R_8: |
+ case gfx::BufferFormat::RGBA_4444: |
+ case gfx::BufferFormat::RGBA_8888: |
+ case gfx::BufferFormat::RGBX_8888: |
+ case gfx::BufferFormat::BGRA_8888: |
+ return 1; |
+ case gfx::BufferFormat::YUV_420: |
+ return 3; |
+ } |
+ NOTREACHED(); |
+ return 0; |
+} |
+ |
+size_t SubsamplingFactor(gfx::BufferFormat format, int plane) { |
+ switch (format) { |
+ case gfx::BufferFormat::ATC: |
+ case gfx::BufferFormat::ATCIA: |
+ case gfx::BufferFormat::DXT1: |
+ case gfx::BufferFormat::DXT5: |
+ case gfx::BufferFormat::ETC1: |
+ case gfx::BufferFormat::R_8: |
+ case gfx::BufferFormat::RGBA_4444: |
+ case gfx::BufferFormat::RGBA_8888: |
+ case gfx::BufferFormat::RGBX_8888: |
+ case gfx::BufferFormat::BGRA_8888: |
+ return 1; |
+ case gfx::BufferFormat::YUV_420: { |
+ static size_t factor[] = {1, 2, 2}; |
+ DCHECK_LT(static_cast<size_t>(plane), arraysize(factor)); |
+ return factor[plane]; |
+ } |
+ } |
+ NOTREACHED(); |
+ return 0; |
+} |
+ |
+size_t StrideInBytes(size_t width, gfx::BufferFormat format, int plane) { |
+ switch (format) { |
+ case gfx::BufferFormat::ATCIA: |
+ case gfx::BufferFormat::DXT5: |
+ DCHECK_EQ(plane, 0); |
+ return width; |
+ case gfx::BufferFormat::ATC: |
+ case gfx::BufferFormat::DXT1: |
+ case gfx::BufferFormat::ETC1: |
+ DCHECK_EQ(plane, 0); |
+ DCHECK_EQ(width % 2, 0U); |
+ return width / 2; |
+ case gfx::BufferFormat::R_8: |
+ return (width + 3) & ~0x3; |
+ case gfx::BufferFormat::RGBA_4444: |
+ DCHECK_EQ(plane, 0); |
+ return width * 2; |
+ case gfx::BufferFormat::RGBA_8888: |
+ case gfx::BufferFormat::BGRA_8888: |
+ DCHECK_EQ(plane, 0); |
+ return width * 4; |
+ case gfx::BufferFormat::RGBX_8888: |
+ NOTREACHED(); |
+ return 0; |
+ case gfx::BufferFormat::YUV_420: |
+ return width / SubsamplingFactor(format, plane); |
+ } |
+ |
+ NOTREACHED(); |
+ return 0; |
+} |
+ |
+class MemoryTrackerStub : public gpu::gles2::MemoryTracker { |
+ public: |
+ MemoryTrackerStub() {} |
+ |
+ void TrackMemoryAllocatedChange( |
+ size_t old_size, |
+ size_t new_size, |
+ gpu::gles2::MemoryTracker::Pool pool) override {} |
+ |
+ bool EnsureGPUMemoryAvailable(size_t size_needed) override { return true; }; |
+ uint64_t ClientTracingId() const override { return 0; } |
+ int ClientId() const override { return 0; } |
+ |
+ private: |
+ ~MemoryTrackerStub() override {} |
+ |
+ DISALLOW_COPY_AND_ASSIGN(MemoryTrackerStub); |
+}; |
+ |
+size_t BufferSizeInBytes(const gfx::Size& size, |
+ gfx::BufferFormat format) { |
+ size_t size_in_bytes = 0; |
+ size_t num_planes = NumberOfPlanesForGpuMemoryBufferFormat(format); |
+ for (size_t i = 0; i < num_planes; ++i) { |
+ size_in_bytes += StrideInBytes(size.width(), format, i) * |
+ (size.height() / SubsamplingFactor(format, i)); |
+ } |
+ return size_in_bytes; |
+} |
+ |
+class GpuMemoryBufferImpl : public gfx::GpuMemoryBuffer { |
+ public: |
+ GpuMemoryBufferImpl(base::RefCountedBytes* bytes, |
+ const gfx::Size& size, |
+ gfx::BufferFormat format) |
+ : bytes_(bytes), size_(size), format_(format), mapped_(false) {} |
+ |
+ static GpuMemoryBufferImpl* FromClientBuffer(ClientBuffer buffer) { |
+ return reinterpret_cast<GpuMemoryBufferImpl*>(buffer); |
+ } |
+ |
+ // Overridden from gfx::GpuMemoryBuffer: |
+ bool Map(void** data) override { |
+ size_t offset = 0; |
+ size_t num_planes = NumberOfPlanesForGpuMemoryBufferFormat(format_); |
+ for (size_t i = 0; i < num_planes; ++i) { |
+ data[i] = reinterpret_cast<uint8*>(&bytes_->data().front()) + offset; |
+ offset += StrideInBytes(size_.width(), format_, i) * |
+ (size_.height() / SubsamplingFactor(format_, i)); |
+ } |
+ mapped_ = true; |
+ return true; |
+ } |
+ void Unmap() override { mapped_ = false; } |
+ bool IsMapped() const override { return mapped_; } |
+ gfx::BufferFormat GetFormat() const override { return format_; } |
+ gfx::GpuMemoryBufferId GetId() const override { |
+ return 0; |
+ } |
+ void GetStride(int* stride) const override { |
+ size_t num_planes = NumberOfPlanesForGpuMemoryBufferFormat(format_); |
+ for (size_t i = 0; i < num_planes; ++i) |
+ stride[i] = StrideInBytes(size_.width(), format_, i); |
+ } |
+ gfx::GpuMemoryBufferHandle GetHandle() const override { |
+ NOTREACHED(); |
+ return gfx::GpuMemoryBufferHandle(); |
+ } |
+ ClientBuffer AsClientBuffer() override { |
+ return reinterpret_cast<ClientBuffer>(this); |
+ } |
+ |
+ base::RefCountedBytes* bytes() { return bytes_.get(); } |
+ |
+ private: |
+ scoped_refptr<base::RefCountedBytes> bytes_; |
+ const gfx::Size size_; |
+ gfx::BufferFormat format_; |
+ bool mapped_; |
+}; |
+ |
+} // anonymous namespace |
+ |
+namespace surfaces { |
+ |
+CommandBufferLocal::CommandBufferLocal( |
+ Client* client, |
+ gfx::AcceleratedWidget widget, |
+ const scoped_refptr<gles2::GpuState>& gpu_state) |
+ : widget_(widget), |
+ gpu_state_(gpu_state), |
+ client_(client), |
+ weak_factory_(this) { |
+} |
+ |
+CommandBufferLocal::~CommandBufferLocal() { |
+ command_buffer_.reset(); |
+ if (decoder_.get()) { |
+ bool have_context = decoder_->GetGLContext()->MakeCurrent(surface_.get()); |
+ decoder_->Destroy(have_context); |
+ decoder_.reset(); |
+ } |
+} |
+ |
+// static |
+scoped_ptr<gfx::GpuMemoryBuffer> CommandBufferLocal::CreateGpuMemoryBuffer( |
+ const gfx::Size& size, |
+ gfx::BufferFormat format) { |
+ std::vector<unsigned char> data(BufferSizeInBytes(size, format), 0); |
+ scoped_refptr<base::RefCountedBytes> bytes(new base::RefCountedBytes(data)); |
+ return make_scoped_ptr<gfx::GpuMemoryBuffer>( |
+ new GpuMemoryBufferImpl(bytes.get(), size, format)); |
+} |
+ |
+bool CommandBufferLocal::Initialize() { |
+ if (widget_ == gfx::kNullAcceleratedWidget) |
+ surface_ = gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size(1, 1)); |
+ else { |
+ surface_ = gfx::GLSurface::CreateViewGLSurface(widget_); |
+ if (auto vsync_provider = surface_->GetVSyncProvider()) { |
+ vsync_provider->GetVSyncParameters( |
+ base::Bind(&CommandBufferLocal::OnUpdateVSyncParameters, |
+ weak_factory_.GetWeakPtr())); |
+ } |
+ } |
+ |
+ if (!surface_.get()) |
+ return false; |
+ |
+ // TODO(piman): virtual contexts, gpu preference. |
+ context_ = gfx::GLContext::CreateGLContext( |
+ gpu_state_->share_group(), surface_.get(), gfx::PreferIntegratedGpu); |
+ if (!context_.get()) |
+ return false; |
+ |
+ if (!context_->MakeCurrent(surface_.get())) |
+ return false; |
+ |
+ // TODO(piman): ShaderTranslatorCache is currently per-ContextGroup but |
+ // only needs to be per-thread. |
+ bool bind_generates_resource = false; |
+ scoped_refptr<gpu::gles2::ContextGroup> context_group = |
+ new gpu::gles2::ContextGroup( |
+ gpu_state_->mailbox_manager(), new MemoryTrackerStub, |
+ new gpu::gles2::ShaderTranslatorCache, nullptr, nullptr, nullptr, |
+ bind_generates_resource); |
+ |
+ command_buffer_.reset( |
+ new gpu::CommandBufferService(context_group->transfer_buffer_manager())); |
+ bool result = command_buffer_->Initialize(); |
+ DCHECK(result); |
+ |
+ decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group.get())); |
+ scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(), decoder_.get(), |
+ decoder_.get())); |
+ decoder_->set_engine(scheduler_.get()); |
+ decoder_->SetResizeCallback( |
+ base::Bind(&CommandBufferLocal::OnResize, base::Unretained(this))); |
+ decoder_->SetWaitSyncPointCallback(base::Bind( |
+ &CommandBufferLocal::OnWaitSyncPoint, base::Unretained(this))); |
+ |
+ gpu::gles2::DisallowedFeatures disallowed_features; |
+ |
+ // TODO(piman): attributes. |
+ std::vector<int32> attrib_vector; |
+ if (!decoder_->Initialize(surface_, context_, false /* offscreen */, |
+ gfx::Size(1, 1), disallowed_features, |
+ attrib_vector)) |
+ return false; |
+ |
+ command_buffer_->SetPutOffsetChangeCallback(base::Bind( |
+ &CommandBufferLocal::PumpCommands, base::Unretained(this))); |
+ command_buffer_->SetGetBufferChangeCallback(base::Bind( |
+ &gpu::GpuScheduler::SetGetBuffer, base::Unretained(scheduler_.get()))); |
+ command_buffer_->SetParseErrorCallback( |
+ base::Bind(&CommandBufferLocal::OnParseError, base::Unretained(this))); |
+ return true; |
+} |
+ |
+/******************************************************************************/ |
+// gpu::GpuControl: |
+/******************************************************************************/ |
+ |
+gpu::Capabilities CommandBufferLocal::GetCapabilities() { |
+ return decoder_->GetCapabilities(); |
+} |
+ |
+int32_t CommandBufferLocal::CreateImage(ClientBuffer buffer, |
+ size_t width, |
+ size_t height, |
+ unsigned internalformat) { |
+ GpuMemoryBufferImpl* gpu_memory_buffer = |
+ GpuMemoryBufferImpl::FromClientBuffer(buffer); |
+ |
+ scoped_refptr<gfx::GLImageRefCountedMemory> image( |
+ new gfx::GLImageRefCountedMemory(gfx::Size(width, height), |
+ internalformat)); |
+ if (!image->Initialize(gpu_memory_buffer->bytes(), |
+ gpu_memory_buffer->GetFormat())) { |
+ return -1; |
+ } |
+ |
+ static int32 next_id = 1; |
+ int32 new_id = next_id++; |
+ |
+ gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); |
+ DCHECK(image_manager); |
+ image_manager->AddImage(image.get(), new_id); |
+ return new_id; |
+} |
+ |
+void CommandBufferLocal::DestroyImage(int32 id) { |
+ gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); |
+ DCHECK(image_manager); |
+ image_manager->RemoveImage(id); |
+} |
+ |
+int32_t CommandBufferLocal::CreateGpuMemoryBufferImage( |
+ size_t width, |
+ size_t height, |
+ unsigned internalformat, |
+ unsigned usage) { |
+ DCHECK_EQ(usage, static_cast<unsigned>(GL_MAP_CHROMIUM)); |
+ scoped_ptr<gfx::GpuMemoryBuffer> buffer = CreateGpuMemoryBuffer( |
+ gfx::Size(width, height), gfx::BufferFormat::RGBA_8888); |
+ return CreateImage(buffer->AsClientBuffer(), width, height, internalformat); |
+} |
+ |
+uint32_t CommandBufferLocal::InsertSyncPoint() { |
+ return 0; |
+} |
+ |
+uint32_t CommandBufferLocal::InsertFutureSyncPoint() { |
+ NOTIMPLEMENTED(); |
+ return 0; |
+} |
+ |
+void CommandBufferLocal::RetireSyncPoint(uint32_t sync_point) { |
+ NOTIMPLEMENTED(); |
+} |
+ |
+void CommandBufferLocal::SignalSyncPoint(uint32_t sync_point, |
+ const base::Closure& callback) { |
+} |
+ |
+void CommandBufferLocal::SignalQuery(uint32_t query, |
+ const base::Closure& callback) { |
+ // TODO(piman) |
+ NOTIMPLEMENTED(); |
+} |
+ |
+void CommandBufferLocal::SetSurfaceVisible(bool visible) { |
+ // TODO(piman) |
+ NOTIMPLEMENTED(); |
+} |
+ |
+uint32_t CommandBufferLocal::CreateStreamTexture(uint32_t texture_id) { |
+ // TODO(piman) |
+ NOTIMPLEMENTED(); |
+ return 0; |
+} |
+ |
+void CommandBufferLocal::SetLock(base::Lock* lock) { |
+ NOTIMPLEMENTED(); |
+} |
+ |
+bool CommandBufferLocal::IsGpuChannelLost() { |
+ // This is only possible for out-of-process command buffers. |
+ return false; |
+} |
+ |
+void CommandBufferLocal::PumpCommands() { |
+ if (!decoder_->MakeCurrent()) { |
+ command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); |
+ command_buffer_->SetParseError(::gpu::error::kLostContext); |
+ return; |
+ } |
+ scheduler_->PutChanged(); |
+} |
+ |
+void CommandBufferLocal::OnResize(gfx::Size size, float scale_factor) { |
+ surface_->Resize(size); |
+} |
+ |
+void CommandBufferLocal::OnUpdateVSyncParameters( |
+ const base::TimeTicks timebase, |
+ const base::TimeDelta interval) { |
+ if (client_) |
+ client_->UpdateVSyncParameters(timebase.ToInternalValue(), |
+ interval.ToInternalValue()); |
+ |
+} |
+ |
+bool CommandBufferLocal::OnWaitSyncPoint(uint32_t sync_point) { |
+ if (!sync_point) |
+ return true; |
+ if (gpu_state_->sync_point_manager()->IsSyncPointRetired(sync_point)) |
+ return true; |
+ scheduler_->SetScheduled(false); |
+ gpu_state_->sync_point_manager()->AddSyncPointCallback( |
+ sync_point, base::Bind(&CommandBufferLocal::OnSyncPointRetired, |
+ weak_factory_.GetWeakPtr())); |
+ return scheduler_->IsScheduled(); |
+} |
+ |
+void CommandBufferLocal::OnParseError() { |
+ gpu::CommandBuffer::State state = command_buffer_->GetLastState(); |
+ OnContextLost(state.context_lost_reason); |
+} |
+ |
+void CommandBufferLocal::OnContextLost(uint32_t reason) { |
+ if (client_) |
+ client_->DidLoseContext(); |
+} |
+ |
+void CommandBufferLocal::OnSyncPointRetired() { |
+ scheduler_->SetScheduled(true); |
+} |
+ |
+} // namespace surfaces |