Index: device/vr/android/gvr/gvr_delegate.cc |
diff --git a/device/vr/android/gvr/gvr_delegate.cc b/device/vr/android/gvr/gvr_delegate.cc |
index 5358f73b54874ee47a12642d93956b5fd4e4ee92..4666317087ea8a1f2f33eb66d80fd4b98e6e32df 100644 |
--- a/device/vr/android/gvr/gvr_delegate.cc |
+++ b/device/vr/android/gvr/gvr_delegate.cc |
@@ -5,7 +5,9 @@ |
#include "device/vr/android/gvr/gvr_delegate.h" |
#include "base/trace_event/trace_event.h" |
+#include "device/vr/vr_math.h" |
#include "third_party/gvr-android-sdk/src/libraries/headers/vr/gvr/capi/include/gvr.h" |
+#include "third_party/gvr-android-sdk/src/libraries/headers/vr/gvr/capi/include/gvr_types.h" |
#include "ui/gfx/transform.h" |
#include "ui/gfx/transform_util.h" |
@@ -28,35 +30,17 @@ static constexpr int64_t kPredictionTimeWithoutVsyncNanos = 50000000; |
// less than a frame. |
static constexpr int64_t kAngularVelocityEpsilonNanos = 1000000; |
-// Matrix math copied from vr_shell's vr_math.cc, can't use that here |
-// due to dependency ordering. TODO(mthiesse): move the vr_math code |
-// to this directory so that both locations can use it. |
- |
-// Rotation only, ignore translation components. |
-gvr::Vec3f MatrixVectorRotate(const gvr::Mat4f& m, const gvr::Vec3f& v) { |
- gvr::Vec3f res; |
- res.x = m.m[0][0] * v.x + m.m[0][1] * v.y + m.m[0][2] * v.z; |
- res.y = m.m[1][0] * v.x + m.m[1][1] * v.y + m.m[1][2] * v.z; |
- res.z = m.m[2][0] * v.x + m.m[2][1] * v.y + m.m[2][2] * v.z; |
- return res; |
+void GvrMatToMatf(const gvr::Mat4f& in, vr::Matf* out) { |
+ // If our std::array implementation doesn't have any non-data members, we can |
+ // just cast the gvr matrix to an std::array. |
+ static_assert(sizeof(in) == sizeof(*out), |
+ "Cannot reinterpret gvr::Mat4f as vr::Matf"); |
+ *out = *reinterpret_cast<vr::Matf*>(const_cast<gvr::Mat4f*>(&in)); |
} |
-gvr::Mat4f MatrixMul(const gvr::Mat4f& matrix1, const gvr::Mat4f& matrix2) { |
- gvr::Mat4f result; |
- for (int i = 0; i < 4; ++i) { |
- for (int j = 0; j < 4; ++j) { |
- result.m[i][j] = 0.0f; |
- for (int k = 0; k < 4; ++k) { |
- result.m[i][j] += matrix1.m[i][k] * matrix2.m[k][j]; |
- } |
- } |
- } |
- return result; |
-} |
- |
-gvr::Vec3f GetAngularVelocityFromPoses(gvr::Mat4f head_mat, |
- gvr::Mat4f head_mat_2, |
- double epsilon_seconds) { |
+gfx::Vector3dF GetAngularVelocityFromPoses(vr::Matf head_mat, |
+ vr::Matf head_mat_2, |
+ double epsilon_seconds) { |
// The angular velocity is a 3-element vector pointing along the rotation |
// axis with magnitude equal to rotation speed in radians/second, expressed |
// in the seated frame of reference. |
@@ -74,8 +58,8 @@ gvr::Vec3f GetAngularVelocityFromPoses(gvr::Mat4f head_mat, |
// See: |
// https://en.wikipedia.org/wiki/Angular_velocity#Calculation_from_the_orientation_matrix |
- gvr::Mat4f delta_mat; |
- gvr::Mat4f inverse_head_mat; |
+ vr::Matf delta_mat; |
+ vr::Matf inverse_head_mat; |
// Calculate difference matrix, and inverse head matrix rotation. |
// For the inverse rotation, just transpose the 3x3 subsection. |
// |
@@ -83,41 +67,35 @@ gvr::Vec3f GetAngularVelocityFromPoses(gvr::Mat4f head_mat, |
// provided by the caller. |
for (int j = 0; j < 3; ++j) { |
for (int i = 0; i < 3; ++i) { |
- delta_mat.m[j][i] = |
- (head_mat_2.m[j][i] - head_mat.m[j][i]) / epsilon_seconds; |
- inverse_head_mat.m[j][i] = head_mat.m[i][j]; |
+ delta_mat[j][i] = (head_mat_2[j][i] - head_mat[j][i]) / epsilon_seconds; |
+ inverse_head_mat[j][i] = head_mat[i][j]; |
} |
- delta_mat.m[j][3] = delta_mat.m[3][j] = 0.0; |
- inverse_head_mat.m[j][3] = inverse_head_mat.m[3][j] = 0.0; |
+ delta_mat[j][3] = delta_mat[3][j] = 0.0; |
+ inverse_head_mat[j][3] = inverse_head_mat[3][j] = 0.0; |
} |
- delta_mat.m[3][3] = 1.0; |
- inverse_head_mat.m[3][3] = 1.0; |
- gvr::Mat4f omega_mat = device::MatrixMul(delta_mat, inverse_head_mat); |
- gvr::Vec3f omega_vec; |
- omega_vec.x = -omega_mat.m[2][1]; |
- omega_vec.y = omega_mat.m[2][0]; |
- omega_vec.z = -omega_mat.m[1][0]; |
+ delta_mat[3][3] = 1.0; |
+ inverse_head_mat[3][3] = 1.0; |
+ vr::Matf omega_mat; |
+ vr::MatrixMul(delta_mat, inverse_head_mat, &omega_mat); |
+ gfx::Vector3dF omega_vec(-omega_mat[2][1], omega_mat[2][0], -omega_mat[1][0]); |
// Rotate by inverse head matrix to bring into seated space. |
- gvr::Vec3f angular_velocity = |
- device::MatrixVectorRotate(inverse_head_mat, omega_vec); |
- |
- return angular_velocity; |
+ return vr::MatrixVectorRotate(inverse_head_mat, omega_vec); |
} |
} // namespace |
/* static */ |
-mojom::VRPosePtr GvrDelegate::VRPosePtrFromGvrPose(gvr::Mat4f head_mat) { |
+mojom::VRPosePtr GvrDelegate::VRPosePtrFromGvrPose(const vr::Matf& head_mat) { |
mojom::VRPosePtr pose = mojom::VRPose::New(); |
pose->orientation.emplace(4); |
gfx::Transform inv_transform( |
- head_mat.m[0][0], head_mat.m[0][1], head_mat.m[0][2], head_mat.m[0][3], |
- head_mat.m[1][0], head_mat.m[1][1], head_mat.m[1][2], head_mat.m[1][3], |
- head_mat.m[2][0], head_mat.m[2][1], head_mat.m[2][2], head_mat.m[2][3], |
- head_mat.m[3][0], head_mat.m[3][1], head_mat.m[3][2], head_mat.m[3][3]); |
+ head_mat[0][0], head_mat[0][1], head_mat[0][2], head_mat[0][3], |
+ head_mat[1][0], head_mat[1][1], head_mat[1][2], head_mat[1][3], |
+ head_mat[2][0], head_mat[2][1], head_mat[2][2], head_mat[2][3], |
+ head_mat[3][0], head_mat[3][1], head_mat[3][2], head_mat[3][3]); |
gfx::Transform transform; |
if (inv_transform.GetInverse(&transform)) { |
@@ -139,50 +117,55 @@ mojom::VRPosePtr GvrDelegate::VRPosePtrFromGvrPose(gvr::Mat4f head_mat) { |
} |
/* static */ |
-gvr::Mat4f GvrDelegate::GetGvrPoseWithNeckModel(gvr::GvrApi* gvr_api) { |
+void GvrDelegate::GetGvrPoseWithNeckModel(gvr::GvrApi* gvr_api, vr::Matf* out) { |
gvr::ClockTimePoint target_time = gvr::GvrApi::GetTimePointNow(); |
target_time.monotonic_system_time_nanos += kPredictionTimeWithoutVsyncNanos; |
gvr::Mat4f head_mat = gvr_api->ApplyNeckModel( |
gvr_api->GetHeadSpaceFromStartSpaceRotation(target_time), 1.0f); |
- return head_mat; |
+ GvrMatToMatf(head_mat, out); |
} |
/* static */ |
mojom::VRPosePtr GvrDelegate::GetVRPosePtrWithNeckModel( |
gvr::GvrApi* gvr_api, |
- gvr::Mat4f* head_mat_out) { |
+ vr::Matf* head_mat_out) { |
gvr::ClockTimePoint target_time = gvr::GvrApi::GetTimePointNow(); |
target_time.monotonic_system_time_nanos += kPredictionTimeWithoutVsyncNanos; |
- gvr::Mat4f head_mat = gvr_api->ApplyNeckModel( |
+ gvr::Mat4f gvr_head_mat = gvr_api->ApplyNeckModel( |
gvr_api->GetHeadSpaceFromStartSpaceRotation(target_time), 1.0f); |
- if (head_mat_out) |
- *head_mat_out = head_mat; |
+ vr::Matf* head_mat_ptr = head_mat_out; |
+ vr::Matf head_mat; |
+ if (!head_mat_ptr) |
+ head_mat_ptr = &head_mat; |
+ GvrMatToMatf(gvr_head_mat, head_mat_ptr); |
- mojom::VRPosePtr pose = GvrDelegate::VRPosePtrFromGvrPose(head_mat); |
+ mojom::VRPosePtr pose = GvrDelegate::VRPosePtrFromGvrPose(*head_mat_ptr); |
// Get a second pose a bit later to calculate angular velocity. |
target_time.monotonic_system_time_nanos += kAngularVelocityEpsilonNanos; |
- gvr::Mat4f head_mat_2 = |
+ gvr::Mat4f gvr_head_mat_2 = |
gvr_api->GetHeadSpaceFromStartSpaceRotation(target_time); |
+ vr::Matf head_mat_2; |
+ GvrMatToMatf(gvr_head_mat_2, &head_mat_2); |
// Add headset angular velocity to the pose. |
pose->angularVelocity.emplace(3); |
double epsilon_seconds = kAngularVelocityEpsilonNanos * 1e-9; |
- gvr::Vec3f angular_velocity = |
- GetAngularVelocityFromPoses(head_mat, head_mat_2, epsilon_seconds); |
- pose->angularVelocity.value()[0] = angular_velocity.x; |
- pose->angularVelocity.value()[1] = angular_velocity.y; |
- pose->angularVelocity.value()[2] = angular_velocity.z; |
+ gfx::Vector3dF angular_velocity = |
+ GetAngularVelocityFromPoses(*head_mat_ptr, head_mat_2, epsilon_seconds); |
+ pose->angularVelocity.value()[0] = angular_velocity.x(); |
+ pose->angularVelocity.value()[1] = angular_velocity.y(); |
+ pose->angularVelocity.value()[2] = angular_velocity.z(); |
return pose; |
} |
/* static */ |
-gvr::Sizei GvrDelegate::GetRecommendedWebVrSize(gvr::GvrApi* gvr_api) { |
+gfx::Size GvrDelegate::GetRecommendedWebVrSize(gvr::GvrApi* gvr_api) { |
// Pick a reasonable default size for the WebVR transfer surface |
// based on a downscaled 1:1 render resolution. This size will also |
// be reported to the client via CreateVRDisplayInfo as the |
@@ -192,10 +175,11 @@ gvr::Sizei GvrDelegate::GetRecommendedWebVrSize(gvr::GvrApi* gvr_api) { |
// framebuffer to match. |
gvr::Sizei render_target_size = |
gvr_api->GetMaximumEffectiveRenderTargetSize(); |
- gvr::Sizei webvr_size = {static_cast<int>(render_target_size.width * |
- kWebVrRecommendedResolutionScale), |
- static_cast<int>(render_target_size.height * |
- kWebVrRecommendedResolutionScale)}; |
+ |
+ gfx::Size webvr_size( |
+ render_target_size.width * kWebVrRecommendedResolutionScale, |
+ render_target_size.height * kWebVrRecommendedResolutionScale); |
+ |
// Ensure that the width is an even number so that the eyes each |
// get the same size, the recommended renderWidth is per eye |
// and the client will use the sum of the left and right width. |
@@ -203,15 +187,14 @@ gvr::Sizei GvrDelegate::GetRecommendedWebVrSize(gvr::GvrApi* gvr_api) { |
// TODO(klausw,crbug.com/699350): should we round the recommended |
// size to a multiple of 2^N pixels to be friendlier to the GPU? The |
// exact size doesn't matter, and it might be more efficient. |
- webvr_size.width &= ~1; |
- |
+ webvr_size.set_width(webvr_size.width() & ~1); |
return webvr_size; |
} |
/* static */ |
mojom::VRDisplayInfoPtr GvrDelegate::CreateVRDisplayInfo( |
gvr::GvrApi* gvr_api, |
- gvr::Sizei recommended_size, |
+ gfx::Size recommended_size, |
uint32_t device_id) { |
TRACE_EVENT0("input", "GvrDelegate::CreateVRDisplayInfo"); |
@@ -239,8 +222,8 @@ mojom::VRDisplayInfoPtr GvrDelegate::CreateVRDisplayInfo( |
(eye == GVR_LEFT_EYE) ? device->leftEye : device->rightEye; |
eye_params->fieldOfView = mojom::VRFieldOfView::New(); |
eye_params->offset.resize(3); |
- eye_params->renderWidth = recommended_size.width / 2; |
- eye_params->renderHeight = recommended_size.height; |
+ eye_params->renderWidth = recommended_size.width() / 2; |
+ eye_params->renderHeight = recommended_size.height(); |
gvr::BufferViewport eye_viewport = gvr_api->CreateBufferViewport(); |
gvr_buffer_viewports.GetBufferViewport(eye, &eye_viewport); |