Index: chrome/browser/android/vr_shell/vr_shell.cc |
diff --git a/chrome/browser/android/vr_shell/vr_shell.cc b/chrome/browser/android/vr_shell/vr_shell.cc |
index e8a1b4af58e67ab63bbd0b48fff5c94ea2574dd1..0904a38ada482b9a70d32579753a1fe05b132c59 100644 |
--- a/chrome/browser/android/vr_shell/vr_shell.cc |
+++ b/chrome/browser/android/vr_shell/vr_shell.cc |
@@ -132,7 +132,7 @@ void VrShell::InitializeGl(JNIEnv* env, |
std::vector<gvr::BufferSpec> specs; |
specs.push_back(gvr_api_->CreateBufferSpec()); |
render_size_ = specs[0].GetSize(); |
- swap_chain_.reset(new gvr::SwapChain(gvr_api_->CreateSwapchain(specs))); |
+ swap_chain_.reset(new gvr::SwapChain(gvr_api_->CreateSwapChain(specs))); |
vr_shell_renderer_.reset(new VrShellRenderer()); |
buffer_viewport_list_.reset( |
@@ -183,38 +183,14 @@ void VrShell::UpdateController() { |
look_at_vector_ = GetRayPoint(translation, forward, distance_to_plane); |
} |
-void ApplyNeckModel(gvr::Mat4f& mat_forward) { |
- // This assumes that the input matrix is a pure rotation matrix. The |
- // input object_from_reference matrix has the inverse rotation of |
- // the head rotation. Invert it (this is just a transpose). |
- gvr::Mat4f mat = MatrixTranspose(mat_forward); |
- |
- // Position of the point between the eyes, relative to the neck pivot: |
- const float kNeckHorizontalOffset = -0.080f; // meters in Z |
- const float kNeckVerticalOffset = 0.075f; // meters in Y |
- |
- std::array<float, 4> neckOffset = { |
- {0.0f, kNeckVerticalOffset, kNeckHorizontalOffset, 1.0f}}; |
- |
- // Rotate eyes around neck pivot point. |
- auto offset = MatrixVectorMul(mat, neckOffset); |
- |
- // Measure new position relative to original center of head, because |
- // applying a neck model should not elevate the camera. |
- offset[1] -= kNeckVerticalOffset; |
- |
- // Right-multiply the inverse translation onto the |
- // object_from_reference_matrix. |
- TranslateMRight(mat_forward, mat_forward, -offset[0], -offset[1], -offset[2]); |
-} |
- |
void VrShell::DrawFrame(JNIEnv* env, const JavaParamRef<jobject>& obj) { |
buffer_viewport_list_->SetToRecommendedBufferViewports(); |
gvr::Frame frame = swap_chain_->AcquireFrame(); |
gvr::ClockTimePoint target_time = gvr::GvrApi::GetTimePointNow(); |
target_time.monotonic_system_time_nanos += kPredictionTimeWithoutVsyncNanos; |
- head_pose_ = gvr_api_->GetHeadPoseInStartSpace(target_time); |
+ head_pose_ = gvr_api_->GetHeadSpaceFromStartSpaceRotation(target_time); |
+ head_pose_ = gvr_api_->ApplyNeckModel(head_pose_, 1.0f); |
// Bind back to the default framebuffer. |
frame.BindBuffer(0); |
@@ -234,16 +210,6 @@ void VrShell::DrawVrShell(int64_t time) { |
forward_vector_ = getForwardVector(head_pose_); |
- gvr::Vec3f headPos = getTranslation(head_pose_); |
- if (headPos.x == 0.0f && headPos.y == 0.0f && headPos.z == 0.0f) { |
- // This appears to be a 3DOF pose without a neck model. Add one. |
- // The head pose has redundant data. Assume we're only using the |
- // object_from_reference_matrix, we're not updating position_external. |
- // TODO: Not sure what object_from_reference_matrix is. The new api removed |
- // it. For now, removing it seems working fine. |
- ApplyNeckModel(head_pose_); |
- } |
- |
desktop_plane_->translation.x = desktop_position_.x; |
desktop_plane_->translation.y = desktop_position_.y; |
desktop_plane_->translation.z = desktop_position_.z; |
@@ -380,6 +346,7 @@ void VrShell::DrawWebVr() { |
glDisable(GL_POLYGON_OFFSET_FILL); |
// Don't need to clear, since we're drawing over the entire render target. |
billorr
2016/09/23 17:26:24
comment is now out of date
|
+ glClear(GL_COLOR_BUFFER_BIT); |
glViewport(0, 0, render_size_.width, render_size_.height); |
vr_shell_renderer_->GetWebVrRenderer()->Draw(content_texture_id_); |