Index: content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_buffer.cc |
diff --git a/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_buffer.cc b/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_buffer.cc |
index 8b3e34264bda827f42fce6375571f017ef75228e..eddff17a288a4234f0d7ba3dce9c7eee39a5392e 100644 |
--- a/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_buffer.cc |
+++ b/content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_buffer.cc |
@@ -4,6 +4,11 @@ |
#include "content/common/gpu/client/gpu_memory_buffer_impl_ozone_native_buffer.h" |
+#include <fcntl.h> |
+#include <sys/mman.h> |
+#include <xf86drm.h> |
+ |
+#include "base/trace_event/trace_event.h" |
#include "ui/ozone/public/surface_factory_ozone.h" |
namespace content { |
@@ -13,10 +18,34 @@ GpuMemoryBufferImplOzoneNativeBuffer::GpuMemoryBufferImplOzoneNativeBuffer( |
const gfx::Size& size, |
Format format, |
const DestructionCallback& callback) |
- : GpuMemoryBufferImpl(id, size, format, callback) { |
+ : GpuMemoryBufferImpl(id, size, format, callback), |
+ vgem_bo_handle_(0), |
+ stride_(0), |
+ mmap_ptr_(nullptr) { |
+ RowSizeInBytes(size_.width(), format_, 0, &stride_); |
} |
GpuMemoryBufferImplOzoneNativeBuffer::~GpuMemoryBufferImplOzoneNativeBuffer() { |
+ if (vgem_bo_handle_) { |
+ // TODO(dshwang): Both causes CRASH. How to release a handle? |
dshwang
2015/05/14 12:25:47
zachr, could you advice how to release vgem bo han
|
+ if (false) { |
+ struct drm_mode_destroy_dumb destroy; |
+ memset(&destroy, 0, sizeof(destroy)); |
+ destroy.handle = vgem_bo_handle_; |
+ int ret = drmIoctl(vgem_fd_.get(), DRM_IOCTL_MODE_DESTROY_DUMB, &destroy); |
+ if (!ret) |
+ LOG(ERROR) << "fail to free a vgem buffer. error:" << ret; |
+ } |
+ if (false) { |
+ struct drm_gem_close close; |
+ memset(&close, 0, sizeof(close)); |
+ close.handle = vgem_bo_handle_; |
+ int ret = drmIoctl(vgem_fd_.get(), DRM_IOCTL_GEM_CLOSE, &close); |
+ if (!ret) |
+ LOG(ERROR) << "fail to free a vgem buffer. error:" << ret; |
+ } |
+ vgem_bo_handle_ = 0; |
+ } |
} |
// static |
@@ -26,26 +55,93 @@ GpuMemoryBufferImplOzoneNativeBuffer::CreateFromHandle( |
const gfx::Size& size, |
Format format, |
const DestructionCallback& callback) { |
- return make_scoped_ptr<GpuMemoryBufferImpl>( |
- new GpuMemoryBufferImplOzoneNativeBuffer( |
+ scoped_ptr<GpuMemoryBufferImplOzoneNativeBuffer> buffer = |
+ make_scoped_ptr(new GpuMemoryBufferImplOzoneNativeBuffer( |
handle.id, size, format, callback)); |
+ |
+ if (!buffer->Initialize(handle)) |
+ return nullptr; |
+ return buffer.Pass(); |
+} |
+ |
+bool GpuMemoryBufferImplOzoneNativeBuffer::Initialize( |
+ const gfx::GpuMemoryBufferHandle& handle) { |
+ // If |fd| is -1, it's SCANOUT buffer. |
+ if (handle.device_handle.fd == -1) { |
+ DCHECK_EQ(handle.device_handle.fd, -1); |
+ return true; |
+ } |
+ |
+ DCHECK(handle.device_handle.auto_close); |
+ DCHECK(handle.device_handle.fd); |
+ vgem_fd_.reset(handle.device_handle.fd); |
+ |
+ DCHECK(handle.handle.auto_close); |
+ dma_buf_.reset(handle.handle.fd); |
+ |
+ int ret = |
+ drmPrimeFDToHandle(vgem_fd_.get(), dma_buf_.get(), &vgem_bo_handle_); |
+ if (ret) { |
+ LOG(ERROR) << "drmPrimeFDToHandle failed, handle:" << vgem_bo_handle_; |
+ return false; |
+ } |
+ return true; |
} |
bool GpuMemoryBufferImplOzoneNativeBuffer::Map(void** data) { |
- NOTREACHED(); |
- return false; |
+ TRACE_EVENT0("gpu", "GpuMemoryBufferImplOzoneNativeBuffer::Map"); |
+ DCHECK(!mapped_); |
+ DCHECK(!mmap_ptr_); |
+ if (!vgem_bo_handle_) { |
+ LOG(ERROR) << "Map is called for SCANOUT buffer."; |
+ return false; |
+ } |
+ |
+ struct drm_mode_map_dumb mmap_arg; |
+ memset(&mmap_arg, 0, sizeof(mmap_arg)); |
+ mmap_arg.handle = vgem_bo_handle_; |
+ |
+ int ret = drmIoctl(vgem_fd_.get(), DRM_IOCTL_MODE_MAP_DUMB, &mmap_arg); |
+ if (ret) { |
+ LOG(ERROR) << "fail to map a vgem buffer. error:" << ret; |
+ return false; |
+ } |
+ DCHECK(mmap_arg.offset); |
+ |
+ size_t size = stride_ * size_.height(); |
+ mmap_ptr_ = mmap(nullptr, size, (PROT_READ | PROT_WRITE), MAP_SHARED, |
+ vgem_fd_.get(), mmap_arg.offset); |
+ DCHECK(mmap_ptr_ != MAP_FAILED); |
+ mapped_ = true; |
+ *data = mmap_ptr_; |
+ return true; |
} |
void GpuMemoryBufferImplOzoneNativeBuffer::Unmap() { |
- NOTREACHED(); |
+ TRACE_EVENT0("gpu", "GpuMemoryBufferImplOzoneNativeBuffer::Unmap"); |
+ DCHECK(mapped_); |
+ DCHECK(mmap_ptr_); |
+ if (!vgem_bo_handle_) { |
+ LOG(ERROR) << "Unmap is called for SCANOUT buffer."; |
+ return; |
+ } |
+ |
+ size_t size = stride_ * size_.height(); |
+ int ret = munmap(mmap_ptr_, size); |
+ DCHECK(!ret); |
+ mmap_ptr_ = nullptr; |
+ mapped_ = false; |
} |
void GpuMemoryBufferImplOzoneNativeBuffer::GetStride(int* stride) const { |
- NOTREACHED(); |
+ *stride = stride_; |
} |
gfx::GpuMemoryBufferHandle GpuMemoryBufferImplOzoneNativeBuffer::GetHandle() |
const { |
+ // Don't need to set |handle.handle| and |handle.device_handle| because gpu |
+ // process can look up the right pixmap only by id. |
+ // See ui::GpuMemoryBufferFactoryOzoneNativeBuffer |
gfx::GpuMemoryBufferHandle handle; |
handle.type = gfx::OZONE_NATIVE_BUFFER; |
handle.id = id_; |