Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(402)

Unified Diff: media/capture/video/android/video_capture_device_tango_android.cc

Issue 2983473002: Android Tango depth camera capture support.
Patch Set: rename tango_api files to tango_client_api_glue Created 3 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: media/capture/video/android/video_capture_device_tango_android.cc
diff --git a/media/capture/video/android/video_capture_device_tango_android.cc b/media/capture/video/android/video_capture_device_tango_android.cc
new file mode 100644
index 0000000000000000000000000000000000000000..07089d0c8a2ea1112fe6e6ffca5eb6773d39c149
--- /dev/null
+++ b/media/capture/video/android/video_capture_device_tango_android.cc
@@ -0,0 +1,365 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "media/capture/video/android/video_capture_device_tango_android.h"
+
+#include <stdint.h>
+#include <utility>
+
+#include "base/android/jni_android.h"
+#include "base/android/jni_array.h"
+#include "base/android/jni_string.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "jni/VideoCaptureTango_jni.h"
+
+using base::android::AttachCurrentThread;
+using Intrinsics = media::TangoClientApiGlue::TangoCameraIntrinsics;
+using Tango = media::TangoClientApiGlue;
+
+namespace media {
+
+namespace {
+
+// Depth frame size is selected as 214 x 120. This has the same aspect ratio as
+// color camera frame - since point cloud is in color camera 3D space
+// coordinates, we want to project point cloud to down-scaled color (1920x1080)
+// that fits the depth resolution but keeping the camera frame aspect ratio,
+// that is 214 x 120. For 3d point cloud produced by depth camera of e.g.
+// 224x172 (Lenovo Phab2 Pro) or larger size, we produce continuous depth frame
+// of size (214 x 120) synchronized to color camera frame with easy mapping
+// color to depth pixel - color frame coordinates divided by scale gives the
+// position in the depth buffer.
+const gfx::Size DepthSize(214, 120);
+
+void OnPointCloudCallback(void* context,
+ const Tango::TangoPointCloud* point_cloud) {
+ VideoCaptureDeviceTangoAndroid* p_this =
+ reinterpret_cast<VideoCaptureDeviceTangoAndroid*>(context);
+ p_this->OnPointCloudAvailable(point_cloud->num_points,
+ reinterpret_cast<float*>(point_cloud->points),
+ point_cloud->timestamp);
+}
+
+} // namespace
+
+VideoCaptureDeviceTangoAndroid::VideoCaptureDeviceTangoAndroid(
+ const VideoCaptureDeviceDescriptor& device_descriptor)
+ : VideoCaptureDeviceAndroid(device_descriptor), weak_ptr_factory_(this) {
+ j_capture_.Reset(Java_VideoCaptureTango_create(
+ AttachCurrentThread(), reinterpret_cast<intptr_t>(this)));
+}
+
+VideoCaptureDeviceTangoAndroid::~VideoCaptureDeviceTangoAndroid() {
+ StopAndDeAllocate();
+}
+
+// static
+void VideoCaptureDeviceTangoAndroid::EnumerateDevices(
+ VideoCaptureDeviceDescriptors* device_descriptors) {
+ // Tango devices have additional depth, color and fisheye cameras. Those
+ // devices are identified by model.
+ static const char* tango_models[] = {"Project Tango Tablet Development Kit",
+ "Lenovo PB2-690M", "ASUS_A002",
+ "ASUS_A002A"};
+ const std::string model = base::SysInfo::HardwareModelName();
+ const char** result =
+ std::find(tango_models, tango_models + arraysize(tango_models), model);
+ if (result == tango_models + arraysize(tango_models))
+ return;
+ device_descriptors->emplace_back(
+ "Tango depth", base::StringPrintf("%d", device_descriptors->size()),
+ VideoCaptureApi::ANDROID_TANGO);
+}
+
+// static
+void VideoCaptureDeviceTangoAndroid::EnumerateDeviceCapabilities(
+ const VideoCaptureDeviceDescriptor& descriptor,
+ VideoCaptureFormats* supported_formats) {
+ DCHECK(descriptor.capture_api == VideoCaptureApi::ANDROID_TANGO);
+ supported_formats->emplace_back(DepthSize, 5,
+ VideoPixelFormat::PIXEL_FORMAT_Y16);
+}
+
+// static
+bool VideoCaptureDeviceTangoAndroid::RegisterVideoCaptureDevice(JNIEnv* env) {
+ return RegisterNativesImpl(env);
+}
+
+void VideoCaptureDeviceTangoAndroid::AllocateAndStart(
+ const VideoCaptureParams& params,
+ std::unique_ptr<Client> client) {
+ {
+ base::AutoLock lock(lock_);
+ if (state_ != kIdle)
+ return;
+ client_ = std::move(client);
+ got_first_frame_ = false;
+ }
+
+ JNIEnv* env = AttachCurrentThread();
+
+ capture_format_.frame_size = DepthSize;
+ capture_format_.frame_rate = 5;
+ capture_format_.pixel_format = VideoPixelFormat::PIXEL_FORMAT_Y16;
+
+ if (capture_format_.frame_rate > 0) {
+ frame_interval_ = base::TimeDelta::FromMicroseconds(
+ (base::Time::kMicrosecondsPerSecond + capture_format_.frame_rate - 1) /
+ capture_format_.frame_rate);
+ }
+
+ if (!Java_VideoCaptureTango_startCapture(env, j_capture_)) {
+ SetErrorState(FROM_HERE, "failed to start capture");
+ return;
+ }
+
+ {
+ base::AutoLock lock(lock_);
+ state_ = kConfigured;
+ }
+}
+
+void VideoCaptureDeviceTangoAndroid::StopAndDeAllocate() {
+ {
+ base::AutoLock lock(lock_);
+ if (state_ != kConfigured && state_ != kError)
+ return;
+ }
+
+ Java_VideoCaptureTango_stopCapture(AttachCurrentThread(), j_capture_);
+
+ {
+ base::AutoLock lock(lock_);
+ state_ = kIdle;
+ client_.reset();
+ }
+}
+
+void VideoCaptureDeviceTangoAndroid::OnPointCloudAvailable(uint32_t num_points,
+ float* const src,
+ double timestamp) {
+ if (!IsClientConfiguredForIncomingData())
+ return;
+ const base::TimeTicks current_time = base::TimeTicks::Now();
+ ProcessFirstFrameAvailable(current_time);
+
+ CHECK(src);
+ const int width = capture_format_.frame_size.width();
+ const int height = capture_format_.frame_size.height();
+
+ const int buffer_length = width * height * 2;
+ std::unique_ptr<uint8_t> buffer(new uint8_t[buffer_length]);
+
+ uint16_t* out = reinterpret_cast<uint16_t*>(buffer.get());
+ memset(out, 0, buffer_length);
+
+ const float fx = intrinsics_->fx;
+ const float fy = intrinsics_->fy;
+ // Add 0.5 to cx and cy to avoid the need for round() in operations bellow.
+ float cx = intrinsics_->cx + 0.5;
+ float cy = intrinsics_->cy + 0.5;
+
+ int current_x = INT_MIN;
+ int current_y = INT_MIN;
+
+ // Optimization: process 4 points at once as they are likely adjacent.
+ // TODO(aleksandar.stojiljkovic): Try optimizing using NEON.
+ // https://crbug.com/674440
+ float* const src_end = src + num_points / 4 * 16;
+
+ for (const float* item = src; item < src_end; item += 16) {
+#define x0 item[0]
+#define y0 item[1]
+#define z0 item[2]
+#define x1 item[4]
+#define y1 item[5]
+#define z1 item[6]
+#define x2 item[8]
+#define y2 item[9]
+#define z2 item[10]
+#define x3 item[12]
+#define y3 item[13]
+#define z3 item[14]
+
+ const int row3 = static_cast<int>(fy * y3 / z3 + cy);
+
+ if (row3 < 0 || row3 >= height) {
+ // heuristics: due to radial distortion, early leave as the rest of data
+ // is outside color image. Constant 5 is large enough to compensate the
+ // radial distortion.
+ if (row3 >= height + 5)
+ break;
+ continue;
+ }
+
+ const int column3 = static_cast<int>(fx * x3 / z3 + cx);
+
+ // We know that the cameras are configured to support range < 7 meters but
+ // allow values up to 16m here. Multiplying depth by 4096 gives values with
+ // good exposure in 16-bit unsigned range.
+ uint16_t depth[] = {
+ static_cast<uint16_t>(z0 * 4096), static_cast<uint16_t>(z1 * 4096),
+ static_cast<uint16_t>(z2 * 4096), static_cast<uint16_t>(z3 * 4096)};
+ DCHECK(z0 * 4096 <= 65535 && z1 * 4096 <= 65535 && z2 * 4096 <= 65535 &&
+ z3 * 4096 <= 65535);
+
+ unsigned out_offset3 = row3 * width + column3;
+ if (column3 == current_x + 4 && row3 == current_y) {
+ // All values are packed. We only need to copy depth while there is no
+ // need to calculate x and y.
+ if (column3 - 3 < width)
+ out[out_offset3 - 3] = depth[0];
+ if (column3 - 2 < width)
+ out[out_offset3 - 2] = depth[1];
+ if (column3 - 1 < width)
+ out[out_offset3 - 1] = depth[2];
+ if (column3 < width)
+ out[out_offset3] = depth[3];
+ current_x += 4;
+ continue;
+ }
+
+ int row1 = static_cast<int>(fy * y1 / z1 + cy);
+ int column1 = static_cast<int>(fx * x1 / z1 + cx);
+
+ unsigned out_offset1 = row1 * width + column1;
+ if (column1 == current_x + 2 && row1 == current_y && column1 < width) {
+ if (row1 >= 0 && row1 < height) {
+ out[out_offset1 - 1] = depth[0];
+ out[out_offset1] = depth[1];
+ }
+ } else if (column1 == current_x + 3 && row1 == current_y &&
+ column1 < width) {
+ // This compensates for the radial distortion side effects (vertical lines
+ // with missing values) near the vertical edges.
+ if (row1 >= 0 && row1 < height) {
+ out[out_offset1 - 2] = depth[0];
+ out[out_offset1 - 1] = depth[0];
+ out[out_offset1] = depth[1];
+ }
+ } else {
+ const int row0 = static_cast<int>(fy * y0 / z0 + cy);
+ const int column0 = static_cast<int>(fx * x0 / z0 + cx);
+ if (row0 >= 0 && row0 < height && column0 >= 0 && column0 < width)
+ out[row0 * width + column0] = depth[0];
+ if (row1 >= 0 && row1 < height && column1 >= 0 && column1 < width)
+ out[out_offset1] = depth[1];
+ }
+
+ if (column3 == column1 + 2 && row3 == row1) {
+ if (column3 - 1 >= 0 && column3 - 1 < width)
+ out[out_offset3 - 1] = depth[2];
+ if (column3 >= 0 && column3 < width)
+ out[out_offset3] = depth[3];
+ } else if (column3 == column1 + 3 && row3 == row1) {
+ // This compensates for the radial distortion side effects (vertical lines
+ // with missing values) near the vertical edges.
+ if (column3 - 2 >= 0 && column3 - 2 < width)
+ out[out_offset3 - 2] = depth[2];
+ if (column3 - 1 >= 0 && column3 - 1 < width)
+ out[out_offset3 - 1] = depth[2];
+ if (column3 >= 0 && column3 < width)
+ out[out_offset3] = depth[3];
+ } else {
+ const int row2 = static_cast<int>(fy * y2 / z2 + cy);
+ const int column2 = static_cast<int>(fx * x2 / z2 + cx);
+ if (row2 >= 0 && row2 < height && column2 >= 0 && column2 < width)
+ out[row2 * width + column2] = depth[2];
+ if (row3 >= 0 && column3 >= 0 && column3 < width)
+ out[out_offset3] = depth[3];
+ }
+ current_x = (column3 >= 0) ? column3 : INT_MIN;
+ current_y = row3;
+ }
+
+ if (AdvanceToNextFrameTime(current_time)) {
+ const base::TimeDelta capture_time =
+ base::TimeDelta::FromSecondsD(timestamp);
+ SendIncomingDataToClient(buffer.get(), buffer_length, 0, current_time,
+ capture_time);
+ }
+}
+
+void VideoCaptureDeviceTangoAndroid::OnTangoServiceConnected(JNIEnv* env,
+ jobject obj,
+ jobject binder) {
+ if (Tango::TangoService_setBinder(env, binder) != Tango::TANGO_SUCCESS) {
+ VLOG(2) << "TangoService_setBinder error.";
+ return;
+ }
+ Connect("");
+}
+
+void VideoCaptureDeviceTangoAndroid::Connect(const std::string& uuid) {
+ Tango::TangoConfig config =
+ Tango::TangoService_getConfig(Tango::TANGO_CONFIG_DEFAULT);
+ if (config == nullptr) {
+ VLOG(2) << "TangoService_getConfig error.";
+ return;
+ }
+
+ Tango::TangoErrorType result =
+ Tango::TangoConfig_setBool(config, "config_enable_depth", true);
+ if (result != Tango::TANGO_SUCCESS) {
+ VLOG(2) << "Tango: config_enable_depth activation error:" << result;
+ return;
+ }
+
+ // Make sure TANGO_POINTCLOUD_XYZC (the default one) is set.
+ if (Tango::TangoConfig_setInt32(config, "config_depth_mode", 0) !=
+ Tango::TANGO_SUCCESS) {
+ VLOG(2) << "TangoConfig_setInt32 config_depth_mode error.";
+ return;
+ }
+
+ if ((result = Tango::TangoService_connectOnPointCloudAvailable(
+ OnPointCloudCallback, this)) != Tango::TANGO_SUCCESS) {
+ VLOG(2) << "Tango: failed to connect to point cloud callback, error code:"
+ << result;
+ return;
+ }
+
+ if ((result = Tango::TangoService_connect(this, config)) !=
+ Tango::TANGO_SUCCESS) {
+ VLOG(2) << "TangoService_connect error, code:" << result;
+ return;
+ }
+
+ Intrinsics color_camera_intrinsics;
+ if ((result = Tango::TangoService_getCameraIntrinsics(
+ Tango::TANGO_CAMERA_COLOR, &color_camera_intrinsics)) !=
+ Tango::TANGO_SUCCESS) {
+ VLOG(2) << "Tango: failed to get the intrinsics for the color camera, "
+ "error code:"
+ << result;
+ return;
+ }
+
+ // Point cloud is in color camera space. We are scaling required intrinsics
+ // to depth camera resolution (214x120) for projecting 3D point cloud to 2D
+ // depth frame.
+ const float scale = ceil(static_cast<float>(color_camera_intrinsics.width) /
+ DepthSize.width());
+ intrinsics_ = std::unique_ptr<Intrinsics>(new Intrinsics);
+ intrinsics_->cx = color_camera_intrinsics.cx / scale;
+ intrinsics_->cy = color_camera_intrinsics.cy / scale;
+ intrinsics_->fx = color_camera_intrinsics.fx / scale;
+ intrinsics_->fy = color_camera_intrinsics.fy / scale;
+
+ {
+ base::AutoLock lock(lock_);
+ if (client_)
+ client_->OnStarted();
+ }
+}
+
+void VideoCaptureDeviceTangoAndroid::TangoDisconnect(JNIEnv* env, jobject obj) {
+ Tango::TangoService_disconnect();
+}
+
+} // namespace media

Powered by Google App Engine
This is Rietveld 408576698