| Index: chrome/browser/android/vr_shell/vr_shell_gl.cc
|
| diff --git a/chrome/browser/android/vr_shell/vr_shell_gl.cc b/chrome/browser/android/vr_shell/vr_shell_gl.cc
|
| index cccea831701a7b5b749bf2356f28275e3c39432a..d466f323bdb61695a228d72393064490fa1f140c 100644
|
| --- a/chrome/browser/android/vr_shell/vr_shell_gl.cc
|
| +++ b/chrome/browser/android/vr_shell/vr_shell_gl.cc
|
| @@ -12,7 +12,6 @@
|
| #include "base/memory/ptr_util.h"
|
| #include "base/metrics/histogram_macros.h"
|
| #include "base/threading/thread_task_runner_handle.h"
|
| -#include "chrome/browser/android/vr_shell/mailbox_to_surface_bridge.h"
|
| #include "chrome/browser/android/vr_shell/ui_elements.h"
|
| #include "chrome/browser/android/vr_shell/ui_scene.h"
|
| #include "chrome/browser/android/vr_shell/vr_controller.h"
|
| @@ -80,6 +79,10 @@
|
| // Buffer size large enough to handle the current backlog of poses which is
|
| // 2-3 frames.
|
| static constexpr unsigned kPoseRingBufferSize = 8;
|
| +
|
| +// Magic numbers used to mark valid pose index values encoded in frame
|
| +// data. Must match the magic numbers used in blink's VRDisplay.cpp.
|
| +static constexpr std::array<uint8_t, 2> kWebVrPosePixelMagicNumbers{{42, 142}};
|
|
|
| float Distance(const gvr::Vec3f& vec1, const gvr::Vec3f& vec2) {
|
| float xdiff = (vec1.x - vec2.x);
|
| @@ -169,12 +172,6 @@
|
|
|
| VrShellGl::~VrShellGl() {
|
| vsync_task_.Cancel();
|
| - // TODO(mthiesse): Can we omit the Close() here? Concern is that if
|
| - // both ends of the connection ever live in the same process for
|
| - // some reason, we could receive another VSync request in response
|
| - // to the closing message in the destructor but fail to respond to
|
| - // the callback.
|
| - binding_.Close();
|
| if (!callback_.is_null()) {
|
| // When this VSync provider is going away we have to respond to pending
|
| // callbacks, so instead of providing a VSync, tell the requester to try
|
| @@ -182,7 +179,13 @@
|
| // to this message will go through some other VSyncProvider.
|
| base::ResetAndReturn(&callback_)
|
| .Run(nullptr, base::TimeDelta(), -1,
|
| - device::mojom::VRVSyncProvider::Status::CLOSING);
|
| + device::mojom::VRVSyncProvider::Status::RETRY);
|
| + }
|
| + if (binding_.is_bound()) {
|
| + main_thread_task_runner_->PostTask(
|
| + FROM_HERE,
|
| + base::Bind(&VrShellDelegate::OnVRVsyncProviderRequest,
|
| + delegate_provider_, base::Passed(binding_.Unbind())));
|
| }
|
| }
|
|
|
| @@ -229,33 +232,23 @@
|
| return;
|
| }
|
|
|
| - unsigned int textures[3];
|
| - glGenTextures(3, textures);
|
| + unsigned int textures[2];
|
| + glGenTextures(2, textures);
|
| ui_texture_id_ = textures[0];
|
| content_texture_id_ = textures[1];
|
| - webvr_texture_id_ = textures[2];
|
| ui_surface_texture_ = gl::SurfaceTexture::Create(ui_texture_id_);
|
| content_surface_texture_ = gl::SurfaceTexture::Create(content_texture_id_);
|
| - webvr_surface_texture_ = gl::SurfaceTexture::Create(webvr_texture_id_);
|
| CreateUiSurface();
|
| CreateContentSurface();
|
| ui_surface_texture_->SetFrameAvailableCallback(base::Bind(
|
| &VrShellGl::OnUIFrameAvailable, weak_ptr_factory_.GetWeakPtr()));
|
| content_surface_texture_->SetFrameAvailableCallback(base::Bind(
|
| &VrShellGl::OnContentFrameAvailable, weak_ptr_factory_.GetWeakPtr()));
|
| - webvr_surface_texture_->SetFrameAvailableCallback(base::Bind(
|
| - &VrShellGl::OnWebVRFrameAvailable, weak_ptr_factory_.GetWeakPtr()));
|
| + content_surface_texture_->SetDefaultBufferSize(
|
| + content_tex_physical_size_.width, content_tex_physical_size_.height);
|
| ui_surface_texture_->SetDefaultBufferSize(ui_tex_physical_size_.width,
|
| ui_tex_physical_size_.height);
|
| - content_surface_texture_->SetDefaultBufferSize(
|
| - content_tex_physical_size_.width, content_tex_physical_size_.height);
|
| InitializeRenderer();
|
| -
|
| - gvr::Sizei webvr_size = VrShell::GetRecommendedWebVrSize(gvr_api_.get());
|
| - DVLOG(1) << __FUNCTION__ << ": resize initial to " << webvr_size.width << "x"
|
| - << webvr_size.height;
|
| -
|
| - CreateOrResizeWebVRSurface(webvr_size);
|
|
|
| vsync_task_.Reset(base::Bind(&VrShellGl::OnVSync, base::Unretained(this)));
|
| OnVSync();
|
| @@ -279,73 +272,6 @@
|
| ui_surface_->j_surface().obj()));
|
| }
|
|
|
| -void VrShellGl::CreateOrResizeWebVRSurface(const gvr::Sizei& size) {
|
| - if (!webvr_surface_texture_) {
|
| - DLOG(ERROR) << "No WebVR surface texture available";
|
| - return;
|
| - }
|
| -
|
| - // ContentPhysicalBoundsChanged is getting called twice with
|
| - // identical sizes? Avoid thrashing the existing context.
|
| - if (size == webvr_surface_size_) {
|
| - return;
|
| - }
|
| -
|
| - if (!size.width || !size.height) {
|
| - // Invalid size, defer until a new size arrives on a future bounds update.
|
| - return;
|
| - }
|
| -
|
| - webvr_surface_texture_->SetDefaultBufferSize(size.width, size.height);
|
| - webvr_surface_size_ = size;
|
| -
|
| - if (mailbox_bridge_) {
|
| - mailbox_bridge_->ResizeSurface(size.width, size.height);
|
| - } else {
|
| - mailbox_bridge_ = base::MakeUnique<MailboxToSurfaceBridge>();
|
| - mailbox_bridge_->CreateSurface(webvr_surface_texture_.get());
|
| - }
|
| -}
|
| -
|
| -void VrShellGl::SubmitWebVRFrame(int16_t frame_index,
|
| - const gpu::MailboxHolder& mailbox) {
|
| - TRACE_EVENT0("gpu", "VrShellGl::SubmitWebVRFrame");
|
| -
|
| - // Swapping twice on a Surface without calling updateTexImage in
|
| - // between can lose frames, so don't draw+swap if we already have
|
| - // a pending frame we haven't consumed yet.
|
| - bool swapped = false;
|
| - if (pending_frames_.empty()) {
|
| - swapped = mailbox_bridge_->CopyMailboxToSurfaceAndSwap(mailbox);
|
| - if (swapped) {
|
| - // Tell OnWebVRFrameAvailable to expect a new frame to arrive on
|
| - // the SurfaceTexture, and save the associated frame index.
|
| - pending_frames_.emplace(frame_index);
|
| - }
|
| - }
|
| - // Always notify the client that we're done with the mailbox even
|
| - // if we haven't drawn it, so that it's eligible for destruction.
|
| - submit_client_->OnSubmitFrameTransferred();
|
| - if (!swapped) {
|
| - // We dropped without drawing, report this as completed rendering
|
| - // now to unblock the client. We're not going to receive it in
|
| - // OnWebVRFrameAvailable where we'd normally report that.
|
| - submit_client_->OnSubmitFrameRendered();
|
| - }
|
| -
|
| - TRACE_EVENT0("gpu", "VrShellGl::glFinish");
|
| - // This is a load-bearing glFinish, please don't remove it without
|
| - // before/after timing comparisons. Goal is to clear the GPU queue
|
| - // of the native GL context to avoid stalls later in GVR frame
|
| - // acquire/submit.
|
| - glFinish();
|
| -}
|
| -
|
| -void VrShellGl::SetSubmitClient(
|
| - device::mojom::VRSubmitFrameClientPtrInfo submit_client_info) {
|
| - submit_client_.Bind(std::move(submit_client_info));
|
| -}
|
| -
|
| void VrShellGl::OnUIFrameAvailable() {
|
| ui_surface_texture_->UpdateTexImage();
|
| }
|
| @@ -355,30 +281,42 @@
|
| received_frame_ = true;
|
| }
|
|
|
| -void VrShellGl::OnWebVRFrameAvailable() {
|
| - // A "while" loop here is a bad idea. It's legal to call
|
| - // UpdateTexImage repeatedly even if no frames are available, but
|
| - // that does *not* wait for a new frame, it just reuses the most
|
| - // recent one. That would mess up the count.
|
| - if (pending_frames_.empty()) {
|
| - // We're expecting a frame, but it's not here yet. Retry in OnVsync.
|
| - ++premature_received_frames_;
|
| - return;
|
| - }
|
| -
|
| - webvr_surface_texture_->UpdateTexImage();
|
| - int frame_index = pending_frames_.front();
|
| - TRACE_EVENT1("gpu", "VrShellGl::OnWebVRFrameAvailable", "frame", frame_index);
|
| - pending_frames_.pop();
|
| -
|
| - // It is legal for the WebVR client to submit a new frame now, since
|
| - // we've consumed the image. TODO(klausw): would timing be better if
|
| - // we move the "rendered" notification after draw, or suppress
|
| - // the next vsync until that's done?
|
| -
|
| - submit_client_->OnSubmitFrameRendered();
|
| -
|
| - DrawFrame(frame_index);
|
| +bool VrShellGl::GetPixelEncodedFrameIndex(uint16_t* frame_index) {
|
| + TRACE_EVENT0("gpu", "VrShellGl::GetPixelEncodedFrameIndex");
|
| + if (!received_frame_) {
|
| + if (last_frame_index_ == (uint16_t)-1)
|
| + return false;
|
| + *frame_index = last_frame_index_;
|
| + return true;
|
| + }
|
| + received_frame_ = false;
|
| +
|
| + // Read the pose index encoded in a bottom left pixel as color values.
|
| + // See also third_party/WebKit/Source/modules/vr/VRDisplay.cpp which
|
| + // encodes the pose index, and device/vr/android/gvr/gvr_device.cc
|
| + // which tracks poses. Returns the low byte (0..255) if valid, or -1
|
| + // if not valid due to bad magic number.
|
| + uint8_t pixels[4];
|
| + // Assume we're reading from the framebuffer we just wrote to.
|
| + // That's true currently, we may need to use glReadBuffer(GL_BACK)
|
| + // or equivalent if the rendering setup changes in the future.
|
| + glReadPixels(0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, pixels);
|
| +
|
| + // Check for the magic number written by VRDevice.cpp on submit.
|
| + // This helps avoid glitches from garbage data in the render
|
| + // buffer that can appear during initialization or resizing. These
|
| + // often appear as flashes of all-black or all-white pixels.
|
| + if (pixels[1] == kWebVrPosePixelMagicNumbers[0] &&
|
| + pixels[2] == kWebVrPosePixelMagicNumbers[1]) {
|
| + // Pose is good.
|
| + *frame_index = pixels[0];
|
| + last_frame_index_ = pixels[0];
|
| + return true;
|
| + }
|
| + VLOG(1) << "WebVR: reject decoded pose index " << static_cast<int>(pixels[0])
|
| + << ", bad magic number " << static_cast<int>(pixels[1]) << ", "
|
| + << static_cast<int>(pixels[2]);
|
| + return false;
|
| }
|
|
|
| void VrShellGl::GvrInit(gvr_context* gvr_api) {
|
| @@ -403,6 +341,12 @@
|
| }
|
|
|
| void VrShellGl::InitializeRenderer() {
|
| + // While WebVR is going through the compositor path, it shares
|
| + // the same texture ID. This will change once it gets its own
|
| + // surface, but store it separately to avoid future confusion.
|
| + // TODO(klausw,crbug.com/655722): remove this.
|
| + webvr_texture_id_ = content_texture_id_;
|
| +
|
| gvr_api_->InitializeGl();
|
| webvr_head_pose_.assign(kPoseRingBufferSize,
|
| gvr_api_->GetHeadSpaceFromStartSpaceRotation(
|
| @@ -412,7 +356,6 @@
|
| // For kFramePrimaryBuffer (primary VrShell and WebVR content)
|
| specs.push_back(gvr_api_->CreateBufferSpec());
|
| render_size_primary_ = specs[kFramePrimaryBuffer].GetSize();
|
| - render_size_vrshell_ = render_size_primary_;
|
|
|
| // For kFrameHeadlockedBuffer (for WebVR insecure content warning).
|
| // Set this up at fixed resolution, the (smaller) FOV gets set below.
|
| @@ -689,19 +632,40 @@
|
| base::Bind(target, weak_vr_shell_, base::Passed(std::move(event))));
|
| }
|
|
|
| -void VrShellGl::DrawFrame(int16_t frame_index) {
|
| - TRACE_EVENT1("gpu", "VrShellGl::DrawFrame", "frame", frame_index);
|
| +void VrShellGl::DrawFrame() {
|
| + TRACE_EVENT0("gpu", "VrShellGl::DrawFrame");
|
|
|
| // Reset the viewport list to just the pair of viewports for the
|
| // primary buffer each frame. Head-locked viewports get added by
|
| // DrawVrShell if needed.
|
| buffer_viewport_list_->SetToRecommendedBufferViewports();
|
|
|
| - // If needed, resize the primary buffer for use with WebVR. Resizing
|
| - // needs to happen before acquiring a frame.
|
| + gvr::Frame frame = swap_chain_->AcquireFrame();
|
| + if (!frame.is_valid()) {
|
| + return;
|
| + }
|
| + frame.BindBuffer(kFramePrimaryBuffer);
|
| if (web_vr_mode_) {
|
| - // Process all pending_bounds_ changes targeted for before this
|
| - // frame, being careful of wrapping frame indices.
|
| + DrawWebVr();
|
| + }
|
| +
|
| + uint16_t frame_index;
|
| + gvr::Mat4f head_pose;
|
| +
|
| + // When using async reprojection, we need to know which pose was used in
|
| + // the WebVR app for drawing this frame. Due to unknown amounts of
|
| + // buffering in the compositor and SurfaceTexture, we read the pose number
|
| + // from a corner pixel. There's no point in doing this for legacy
|
| + // distortion rendering since that doesn't need a pose, and reading back
|
| + // pixels is an expensive operation. TODO(klausw,crbug.com/655722): stop
|
| + // doing this once we have working no-compositor rendering for WebVR.
|
| + if (web_vr_mode_ && gvr_api_->GetAsyncReprojectionEnabled() &&
|
| + GetPixelEncodedFrameIndex(&frame_index)) {
|
| + static_assert(!((kPoseRingBufferSize - 1) & kPoseRingBufferSize),
|
| + "kPoseRingBufferSize must be a power of 2");
|
| + head_pose = webvr_head_pose_[frame_index % kPoseRingBufferSize];
|
| + // Process all pending_bounds_ changes targeted for before this frame, being
|
| + // careful of wrapping frame indices.
|
| static constexpr unsigned max =
|
| std::numeric_limits<decltype(frame_index_)>::max();
|
| static_assert(max > kPoseRingBufferSize * 2,
|
| @@ -709,73 +673,26 @@
|
| "than half of frame_index_ range.");
|
| while (!pending_bounds_.empty()) {
|
| uint16_t index = pending_bounds_.front().first;
|
| - // If index is less than the frame_index it's possible we've
|
| - // wrapped, so we extend the range and 'un-wrap' to account
|
| - // for this.
|
| + // If index is less than the frame_index it's possible we've wrapped, so
|
| + // we extend the range and 'un-wrap' to account for this.
|
| if (index < frame_index)
|
| index += max;
|
| - // If the pending bounds change is for an upcoming frame
|
| - // within our buffer size, wait to apply it. Otherwise, apply
|
| - // it immediately. This guarantees that even if we miss many
|
| - // frames, the queue can't fill up with stale bounds.
|
| + // If the pending bounds change is for an upcoming frame within our buffer
|
| + // size, wait to apply it. Otherwise, apply it immediately. This
|
| + // guarantees that even if we miss many frames, the queue can't fill up
|
| + // with stale bounds.
|
| if (index > frame_index && index <= frame_index + kPoseRingBufferSize)
|
| break;
|
|
|
| - const WebVrBounds& bounds = pending_bounds_.front().second;
|
| - webvr_left_viewport_->SetSourceUv(bounds.left_bounds);
|
| - webvr_right_viewport_->SetSourceUv(bounds.right_bounds);
|
| - DVLOG(1) << __FUNCTION__ << ": resize from pending_bounds to "
|
| - << bounds.source_size.width << "x" << bounds.source_size.height;
|
| - CreateOrResizeWebVRSurface(bounds.source_size);
|
| + const BoundsPair& bounds = pending_bounds_.front().second;
|
| + webvr_left_viewport_->SetSourceUv(bounds.first);
|
| + webvr_right_viewport_->SetSourceUv(bounds.second);
|
| pending_bounds_.pop();
|
| }
|
| buffer_viewport_list_->SetBufferViewport(GVR_LEFT_EYE,
|
| *webvr_left_viewport_);
|
| buffer_viewport_list_->SetBufferViewport(GVR_RIGHT_EYE,
|
| *webvr_right_viewport_);
|
| - if (render_size_primary_ != webvr_surface_size_) {
|
| - if (!webvr_surface_size_.width) {
|
| - // Don't try to resize to 0x0 pixels, drop frames until we get a
|
| - // valid size.
|
| - return;
|
| - }
|
| -
|
| - render_size_primary_ = webvr_surface_size_;
|
| - DVLOG(1) << __FUNCTION__ << ": resize GVR to "
|
| - << render_size_primary_.width << "x"
|
| - << render_size_primary_.height;
|
| - swap_chain_->ResizeBuffer(kFramePrimaryBuffer, render_size_primary_);
|
| - }
|
| - } else {
|
| - if (render_size_primary_ != render_size_vrshell_) {
|
| - render_size_primary_ = render_size_vrshell_;
|
| - swap_chain_->ResizeBuffer(kFramePrimaryBuffer, render_size_primary_);
|
| - }
|
| - }
|
| -
|
| - TRACE_EVENT_BEGIN0("gpu", "VrShellGl::AcquireFrame");
|
| - gvr::Frame frame = swap_chain_->AcquireFrame();
|
| - TRACE_EVENT_END0("gpu", "VrShellGl::AcquireFrame");
|
| - if (!frame.is_valid()) {
|
| - return;
|
| - }
|
| - frame.BindBuffer(kFramePrimaryBuffer);
|
| -
|
| - if (web_vr_mode_) {
|
| - DrawWebVr();
|
| - }
|
| -
|
| - gvr::Mat4f head_pose;
|
| -
|
| - // When using async reprojection, we need to know which pose was
|
| - // used in the WebVR app for drawing this frame and supply it when
|
| - // submitting. Technically we don't need a pose if not reprojecting,
|
| - // but keeping it uninitialized seems likely to cause problems down
|
| - // the road. Copying it is cheaper than fetching a new one.
|
| - if (web_vr_mode_) {
|
| - static_assert(!((kPoseRingBufferSize - 1) & kPoseRingBufferSize),
|
| - "kPoseRingBufferSize must be a power of 2");
|
| - head_pose = webvr_head_pose_[frame_index % kPoseRingBufferSize];
|
| } else {
|
| gvr::ClockTimePoint target_time = gvr::GvrApi::GetTimePointNow();
|
| target_time.monotonic_system_time_nanos += kPredictionTimeWithoutVsyncNanos;
|
| @@ -795,31 +712,21 @@
|
| // Update the render position of all UI elements (including desktop).
|
| scene_->UpdateTransforms(TimeInMicroseconds());
|
|
|
| - {
|
| - TRACE_EVENT0("gpu", "VrShellGl::UpdateController");
|
| - UpdateController(GetForwardVector(head_pose));
|
| - }
|
| -
|
| - // Finish drawing in the primary buffer, and draw the headlocked buffer
|
| - // if needed. This must be the last drawing call, this method will
|
| - // return with no frame being bound.
|
| - DrawVrShellAndUnbind(head_pose, frame);
|
| -
|
| - {
|
| - TRACE_EVENT0("gpu", "VrShellGl::Submit");
|
| - frame.Submit(*buffer_viewport_list_, head_pose);
|
| - }
|
| + UpdateController(GetForwardVector(head_pose));
|
| +
|
| + DrawVrShell(head_pose, frame);
|
| +
|
| + frame.Unbind();
|
| + frame.Submit(*buffer_viewport_list_, head_pose);
|
|
|
| // No need to swap buffers for surfaceless rendering.
|
| if (!surfaceless_rendering_) {
|
| // TODO(mthiesse): Support asynchronous SwapBuffers.
|
| - TRACE_EVENT0("gpu", "VrShellGl::SwapBuffers");
|
| surface_->SwapBuffers();
|
| }
|
| }
|
|
|
| -void VrShellGl::DrawVrShellAndUnbind(const gvr::Mat4f& head_pose,
|
| - gvr::Frame& frame) {
|
| +void VrShellGl::DrawVrShell(const gvr::Mat4f& head_pose, gvr::Frame& frame) {
|
| TRACE_EVENT0("gpu", "VrShellGl::DrawVrShell");
|
| std::vector<const ContentRectangle*> head_locked_elements;
|
| std::vector<const ContentRectangle*> world_elements;
|
| @@ -856,7 +763,6 @@
|
| DrawUiView(&head_pose, world_elements, render_size_primary_,
|
| kViewportListPrimaryOffset);
|
| }
|
| - frame.Unbind(); // Done with the primary buffer.
|
|
|
| if (!head_locked_elements.empty()) {
|
| // Add head-locked viewports. The list gets reset to just
|
| @@ -869,13 +775,21 @@
|
| *headlocked_right_viewport_);
|
|
|
| // Bind the headlocked framebuffer.
|
| + // TODO(mthiesse): We don't unbind this? Maybe some cleanup is in order
|
| + // here.
|
| frame.BindBuffer(kFrameHeadlockedBuffer);
|
| glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
|
| glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
|
| DrawUiView(nullptr, head_locked_elements, render_size_headlocked_,
|
| kViewportListHeadlockedOffset);
|
| - frame.Unbind(); // Done with the headlocked buffer.
|
| - }
|
| + }
|
| +}
|
| +
|
| +gvr::Sizei VrShellGl::GetWebVRCompositorSurfaceSize() {
|
| + // This is a stopgap while we're using the WebVR compositor rendering path.
|
| + // TODO(klausw,crbug.com/655722): Remove this method and member once we're
|
| + // using a separate WebVR render surface.
|
| + return content_tex_physical_size_;
|
| }
|
|
|
| void VrShellGl::DrawUiView(const gvr::Mat4f* head_pose,
|
| @@ -1084,15 +998,7 @@
|
| glDisable(GL_BLEND);
|
| glDisable(GL_POLYGON_OFFSET_FILL);
|
|
|
| - // We're redrawing over the entire viewport, but it's generally more
|
| - // efficient on mobile tiling GPUs to clear anyway as a hint that
|
| - // we're done with the old content. TODO(klausw,crbug.com/700389):
|
| - // investigate using glDiscardFramebufferEXT here since that's more
|
| - // efficient on desktop, but it would need a capability check since
|
| - // it's not supported on older devices such as Nexus 5X.
|
| - glClear(GL_COLOR_BUFFER_BIT);
|
| -
|
| - glViewport(0, 0, webvr_surface_size_.width, webvr_surface_size_.height);
|
| + glViewport(0, 0, render_size_primary_.width, render_size_primary_.height);
|
| vr_shell_renderer_->GetWebVrRenderer()->Draw(webvr_texture_id_);
|
| }
|
|
|
| @@ -1123,15 +1029,13 @@
|
|
|
| void VrShellGl::UpdateWebVRTextureBounds(int16_t frame_index,
|
| const gvr::Rectf& left_bounds,
|
| - const gvr::Rectf& right_bounds,
|
| - const gvr::Sizei& source_size) {
|
| + const gvr::Rectf& right_bounds) {
|
| if (frame_index < 0) {
|
| webvr_left_viewport_->SetSourceUv(left_bounds);
|
| webvr_right_viewport_->SetSourceUv(right_bounds);
|
| - CreateOrResizeWebVRSurface(source_size);
|
| } else {
|
| pending_bounds_.emplace(
|
| - frame_index, WebVrBounds(left_bounds, right_bounds, source_size));
|
| + std::make_pair(frame_index, std::make_pair(left_bounds, right_bounds)));
|
| }
|
| }
|
|
|
| @@ -1165,12 +1069,6 @@
|
| }
|
|
|
| void VrShellGl::OnVSync() {
|
| - while (premature_received_frames_ > 0) {
|
| - TRACE_EVENT0("gpu", "VrShellGl::OnWebVRFrameAvailableRetry");
|
| - --premature_received_frames_;
|
| - OnWebVRFrameAvailable();
|
| - }
|
| -
|
| base::TimeTicks now = base::TimeTicks::Now();
|
| base::TimeTicks target;
|
|
|
| @@ -1190,9 +1088,7 @@
|
| pending_vsync_ = true;
|
| pending_time_ = time;
|
| }
|
| - if (!web_vr_mode_) {
|
| - DrawFrame(-1);
|
| - }
|
| + DrawFrame();
|
| }
|
|
|
| void VrShellGl::OnRequest(device::mojom::VRVSyncProviderRequest request) {
|
| @@ -1262,11 +1158,8 @@
|
| void VrShellGl::CreateVRDisplayInfo(
|
| const base::Callback<void(device::mojom::VRDisplayInfoPtr)>& callback,
|
| uint32_t device_id) {
|
| - // This assumes that the initial webvr_surface_size_ was set to the
|
| - // appropriate recommended render resolution as the default size during
|
| - // InitializeGl. Revisit if the initialization order changes.
|
| device::mojom::VRDisplayInfoPtr info = VrShell::CreateVRDisplayInfo(
|
| - gvr_api_.get(), webvr_surface_size_, device_id);
|
| + gvr_api_.get(), content_tex_physical_size_, device_id);
|
| main_thread_task_runner_->PostTask(
|
| FROM_HERE,
|
| base::Bind(&RunVRDisplayInfoCallback, callback, base::Passed(&info)));
|
|
|