OLD | NEW |
| (Empty) |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/media/android_deferred_rendering_backing_strategy.h
" | |
6 | |
7 #include <EGL/egl.h> | |
8 #include <EGL/eglext.h> | |
9 | |
10 #include "base/android/build_info.h" | |
11 #include "base/bind.h" | |
12 #include "base/logging.h" | |
13 #include "base/message_loop/message_loop.h" | |
14 #include "base/metrics/histogram.h" | |
15 #include "content/common/gpu/media/avda_codec_image.h" | |
16 #include "content/common/gpu/media/avda_return_on_failure.h" | |
17 #include "content/common/gpu/media/avda_shared_state.h" | |
18 #include "gpu/command_buffer/service/context_group.h" | |
19 #include "gpu/command_buffer/service/gl_stream_texture_image.h" | |
20 #include "gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h" | |
21 #include "gpu/command_buffer/service/texture_manager.h" | |
22 #include "gpu/ipc/common/gpu_surface_lookup.h" | |
23 #include "gpu/ipc/service/gpu_channel.h" | |
24 #include "ui/gl/android/surface_texture.h" | |
25 #include "ui/gl/egl_util.h" | |
26 #include "ui/gl/gl_bindings.h" | |
27 #include "ui/gl/gl_surface_egl.h" | |
28 #include "ui/gl/scoped_binders.h" | |
29 #include "ui/gl/scoped_make_current.h" | |
30 | |
31 namespace content { | |
32 | |
33 AndroidDeferredRenderingBackingStrategy:: | |
34 AndroidDeferredRenderingBackingStrategy(AVDAStateProvider* state_provider) | |
35 : state_provider_(state_provider), media_codec_(nullptr) {} | |
36 | |
37 AndroidDeferredRenderingBackingStrategy:: | |
38 ~AndroidDeferredRenderingBackingStrategy() {} | |
39 | |
40 gfx::ScopedJavaSurface AndroidDeferredRenderingBackingStrategy::Initialize( | |
41 int surface_view_id) { | |
42 shared_state_ = new AVDASharedState(); | |
43 | |
44 // Create a texture for the SurfaceTexture to use. We don't attach it here | |
45 // so that it gets attached in the compositor gl context in the common case. | |
46 GLuint service_id = 0; | |
47 glGenTextures(1, &service_id); | |
48 DCHECK(service_id); | |
49 shared_state_->set_surface_texture_service_id(service_id); | |
50 | |
51 gfx::ScopedJavaSurface surface; | |
52 if (surface_view_id != media::VideoDecodeAccelerator::Config::kNoSurfaceID) { | |
53 surface = gpu::GpuSurfaceLookup::GetInstance()->AcquireJavaSurface( | |
54 surface_view_id); | |
55 } else { | |
56 if (DoesSurfaceTextureDetachWork()) { | |
57 // Create a detached SurfaceTexture. Detaching it will silently fail to | |
58 // delete texture 0. | |
59 surface_texture_ = gfx::SurfaceTexture::Create(0); | |
60 surface_texture_->DetachFromGLContext(); | |
61 } else { | |
62 // Detach doesn't work so well on all platforms. Just attach the | |
63 // SurfaceTexture here, and probably context switch later. | |
64 surface_texture_ = gfx::SurfaceTexture::Create(service_id); | |
65 shared_state_->DidAttachSurfaceTexture(); | |
66 } | |
67 surface = gfx::ScopedJavaSurface(surface_texture_.get()); | |
68 } | |
69 | |
70 return surface; | |
71 } | |
72 | |
73 void AndroidDeferredRenderingBackingStrategy::Cleanup( | |
74 bool have_context, | |
75 const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) { | |
76 // If we failed before Initialize, then do nothing. | |
77 if (!shared_state_) | |
78 return; | |
79 | |
80 // Make sure that no PictureBuffer textures refer to the SurfaceTexture or to | |
81 // the service_id that we created for it. | |
82 for (const std::pair<int, media::PictureBuffer>& entry : buffers) { | |
83 ReleaseCodecBufferForPicture(entry.second); | |
84 SetImageForPicture(entry.second, nullptr); | |
85 } | |
86 | |
87 // If we're rendering to a SurfaceTexture we can make a copy of the current | |
88 // front buffer so that the PictureBuffer textures are still valid. | |
89 if (surface_texture_ && have_context && ShouldCopyPictures()) | |
90 CopySurfaceTextureToPictures(buffers); | |
91 | |
92 // Now that no AVDACodecImages refer to the SurfaceTexture's texture, delete | |
93 // the texture name. | |
94 GLuint service_id = shared_state_->surface_texture_service_id(); | |
95 if (service_id > 0 && have_context) | |
96 glDeleteTextures(1, &service_id); | |
97 } | |
98 | |
99 scoped_refptr<gfx::SurfaceTexture> | |
100 AndroidDeferredRenderingBackingStrategy::GetSurfaceTexture() const { | |
101 return surface_texture_; | |
102 } | |
103 | |
104 uint32_t AndroidDeferredRenderingBackingStrategy::GetTextureTarget() const { | |
105 // If we're using a surface texture, then we need an external texture target | |
106 // to sample from it. If not, then we'll use 2D transparent textures to draw | |
107 // a transparent hole through which to see the SurfaceView. This is normally | |
108 // needed only for the devtools inspector, since the overlay mechanism handles | |
109 // it otherwise. | |
110 return surface_texture_ ? GL_TEXTURE_EXTERNAL_OES : GL_TEXTURE_2D; | |
111 } | |
112 | |
113 gfx::Size AndroidDeferredRenderingBackingStrategy::GetPictureBufferSize() | |
114 const { | |
115 // For SurfaceView, request a 1x1 2D texture to reduce memory during | |
116 // initialization. For SurfaceTexture, allocate a picture buffer that is the | |
117 // actual frame size. Note that it will be an external texture anyway, so it | |
118 // doesn't allocate an image of that size. However, it's still important to | |
119 // get the coded size right, so that VideoLayerImpl doesn't try to scale the | |
120 // texture when building the quad for it. | |
121 return surface_texture_ ? state_provider_->GetSize() : gfx::Size(1, 1); | |
122 } | |
123 | |
124 void AndroidDeferredRenderingBackingStrategy::SetImageForPicture( | |
125 const media::PictureBuffer& picture_buffer, | |
126 const scoped_refptr<gpu::gles2::GLStreamTextureImage>& image) { | |
127 gpu::gles2::TextureRef* texture_ref = | |
128 state_provider_->GetTextureForPicture(picture_buffer); | |
129 RETURN_IF_NULL(texture_ref); | |
130 | |
131 gpu::gles2::TextureManager* texture_manager = | |
132 state_provider_->GetGlDecoder()->GetContextGroup()->texture_manager(); | |
133 RETURN_IF_NULL(texture_manager); | |
134 | |
135 if (image) { | |
136 // Also set the parameters for the level if we're not clearing | |
137 // the image. | |
138 const gfx::Size size = state_provider_->GetSize(); | |
139 texture_manager->SetLevelInfo(texture_ref, GetTextureTarget(), 0, GL_RGBA, | |
140 size.width(), size.height(), 1, 0, GL_RGBA, | |
141 GL_UNSIGNED_BYTE, gfx::Rect()); | |
142 | |
143 // Override the texture's service_id, so that it will use the one that | |
144 // will be / is attached to the SurfaceTexture. | |
145 DCHECK(shared_state_->surface_texture_service_id()); | |
146 texture_ref->texture()->SetUnownedServiceId( | |
147 shared_state_->surface_texture_service_id()); | |
148 | |
149 static_cast<AVDACodecImage*>(image.get()) | |
150 ->SetTexture(texture_ref->texture()); | |
151 } else { | |
152 // Clear the unowned service_id, so that this texture is no longer going | |
153 // to depend on the surface texture at all. | |
154 texture_ref->texture()->SetUnownedServiceId(0); | |
155 } | |
156 | |
157 // For SurfaceTexture we set the image to UNBOUND so that the implementation | |
158 // will call CopyTexImage, which is where AVDACodecImage updates the | |
159 // SurfaceTexture to the right frame. | |
160 // For SurfaceView we set the image to be BOUND because ScheduleOverlayPlane | |
161 // expects it. If something tries to sample from this texture it won't work, | |
162 // but there's no way to sample from a SurfaceView anyway, so it doesn't | |
163 // matter. The only way to use this texture is to schedule it as an overlay. | |
164 const gpu::gles2::Texture::ImageState image_state = | |
165 surface_texture_ ? gpu::gles2::Texture::UNBOUND | |
166 : gpu::gles2::Texture::BOUND; | |
167 texture_manager->SetLevelStreamTextureImage(texture_ref, GetTextureTarget(), | |
168 0, image.get(), image_state); | |
169 } | |
170 | |
171 void AndroidDeferredRenderingBackingStrategy::UseCodecBufferForPictureBuffer( | |
172 int32_t codec_buf_index, | |
173 const media::PictureBuffer& picture_buffer) { | |
174 // Make sure that the decoder is available. | |
175 RETURN_IF_NULL(state_provider_->GetGlDecoder()); | |
176 | |
177 // Notify the AVDACodecImage for picture_buffer that it should use the | |
178 // decoded buffer codec_buf_index to render this frame. | |
179 AVDACodecImage* avda_image = | |
180 shared_state_->GetImageForPicture(picture_buffer.id()); | |
181 RETURN_IF_NULL(avda_image); | |
182 | |
183 // Note that this is not a race, since we do not re-use a PictureBuffer | |
184 // until after the CC is done drawing it. | |
185 pictures_out_for_display_.push_back(picture_buffer.id()); | |
186 avda_image->SetMediaCodecBufferIndex(codec_buf_index); | |
187 avda_image->SetSize(state_provider_->GetSize()); | |
188 | |
189 MaybeRenderEarly(); | |
190 } | |
191 | |
192 void AndroidDeferredRenderingBackingStrategy::AssignOnePictureBuffer( | |
193 const media::PictureBuffer& picture_buffer, | |
194 bool have_context) { | |
195 // Attach a GLImage to each texture that will use the surface texture. | |
196 // We use a refptr here in case SetImageForPicture fails. | |
197 scoped_refptr<gpu::gles2::GLStreamTextureImage> gl_image = | |
198 new AVDACodecImage(picture_buffer.id(), shared_state_, media_codec_, | |
199 state_provider_->GetGlDecoder(), surface_texture_); | |
200 SetImageForPicture(picture_buffer, gl_image); | |
201 | |
202 if (!surface_texture_ && have_context) { | |
203 // To make devtools work, we're using a 2D texture. Make it transparent, | |
204 // so that it draws a hole for the SV to show through. This is only | |
205 // because devtools draws and reads back, which skips overlay processing. | |
206 // It's unclear why devtools renders twice -- once normally, and once | |
207 // including a readback layer. The result is that the device screen | |
208 // flashes as we alternately draw the overlay hole and this texture, | |
209 // unless we make the texture transparent. | |
210 static const uint8_t rgba[] = {0, 0, 0, 0}; | |
211 const gfx::Size size(1, 1); | |
212 DCHECK_LE(1u, picture_buffer.texture_ids().size()); | |
213 glBindTexture(GL_TEXTURE_2D, picture_buffer.texture_ids()[0]); | |
214 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, size.width(), size.height(), 0, | |
215 GL_RGBA, GL_UNSIGNED_BYTE, rgba); | |
216 } | |
217 } | |
218 | |
219 void AndroidDeferredRenderingBackingStrategy::ReleaseCodecBufferForPicture( | |
220 const media::PictureBuffer& picture_buffer) { | |
221 AVDACodecImage* avda_image = | |
222 shared_state_->GetImageForPicture(picture_buffer.id()); | |
223 RETURN_IF_NULL(avda_image); | |
224 avda_image->UpdateSurface(AVDACodecImage::UpdateMode::DISCARD_CODEC_BUFFER); | |
225 } | |
226 | |
227 void AndroidDeferredRenderingBackingStrategy::ReuseOnePictureBuffer( | |
228 const media::PictureBuffer& picture_buffer) { | |
229 pictures_out_for_display_.erase( | |
230 std::remove(pictures_out_for_display_.begin(), | |
231 pictures_out_for_display_.end(), picture_buffer.id()), | |
232 pictures_out_for_display_.end()); | |
233 | |
234 // At this point, the CC must be done with the picture. We can't really | |
235 // check for that here directly. it's guaranteed in gpu_video_decoder.cc, | |
236 // when it waits on the sync point before releasing the mailbox. That sync | |
237 // point is inserted by destroying the resource in VideoLayerImpl::DidDraw. | |
238 ReleaseCodecBufferForPicture(picture_buffer); | |
239 MaybeRenderEarly(); | |
240 } | |
241 | |
242 void AndroidDeferredRenderingBackingStrategy::ReleaseCodecBuffers( | |
243 const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) { | |
244 for (const std::pair<int, media::PictureBuffer>& entry : buffers) | |
245 ReleaseCodecBufferForPicture(entry.second); | |
246 } | |
247 | |
248 void AndroidDeferredRenderingBackingStrategy::MaybeRenderEarly() { | |
249 // See if we can consume the front buffer / render to the SurfaceView. | |
250 if (pictures_out_for_display_.size() == 1u) { | |
251 AVDACodecImage* avda_image = | |
252 shared_state_->GetImageForPicture(*pictures_out_for_display_.begin()); | |
253 RETURN_IF_NULL(avda_image); | |
254 avda_image->UpdateSurface( | |
255 AVDACodecImage::UpdateMode::RENDER_TO_FRONT_BUFFER); | |
256 return; | |
257 } | |
258 | |
259 // Back buffer rendering is only available for surface textures. | |
260 if (!surface_texture_) | |
261 return; | |
262 | |
263 // See if the back buffer is free. If so, then render the earliest frame. The | |
264 // listing is in render order, so we can just use the first unrendered frame | |
265 // if there is back buffer space. | |
266 AVDACodecImage* first_renderable_image = nullptr; | |
267 for (int id : pictures_out_for_display_) { | |
268 AVDACodecImage* avda_image = shared_state_->GetImageForPicture(id); | |
269 if (!avda_image) | |
270 continue; | |
271 | |
272 // If the back buffer is unavailable, there's nothing left to do. | |
273 if (avda_image->is_rendered_to_back_buffer()) | |
274 return; | |
275 | |
276 // If the image is rendered to the front buffer or has been dropped, it is | |
277 // not valid for rendering. | |
278 if (avda_image->is_rendered()) | |
279 continue; | |
280 | |
281 if (!first_renderable_image) | |
282 first_renderable_image = avda_image; | |
283 } | |
284 | |
285 if (first_renderable_image) { | |
286 first_renderable_image->UpdateSurface( | |
287 AVDACodecImage::UpdateMode::RENDER_TO_BACK_BUFFER); | |
288 } | |
289 } | |
290 | |
291 void AndroidDeferredRenderingBackingStrategy::CodecChanged( | |
292 media::VideoCodecBridge* codec) { | |
293 media_codec_ = codec; | |
294 shared_state_->CodecChanged(codec); | |
295 } | |
296 | |
297 void AndroidDeferredRenderingBackingStrategy::OnFrameAvailable() { | |
298 shared_state_->SignalFrameAvailable(); | |
299 } | |
300 | |
301 bool AndroidDeferredRenderingBackingStrategy::ArePicturesOverlayable() { | |
302 // SurfaceView frames are always overlayable because that's the only way to | |
303 // display them. | |
304 return !surface_texture_; | |
305 } | |
306 | |
307 void AndroidDeferredRenderingBackingStrategy::UpdatePictureBufferSize( | |
308 media::PictureBuffer* picture_buffer, | |
309 const gfx::Size& new_size) { | |
310 // This strategy uses EGL images which manage the texture size for us. We | |
311 // simply update the PictureBuffer meta-data and leave the texture as-is. | |
312 picture_buffer->set_size(new_size); | |
313 } | |
314 | |
315 void AndroidDeferredRenderingBackingStrategy::CopySurfaceTextureToPictures( | |
316 const AndroidVideoDecodeAccelerator::OutputBufferMap& buffers) { | |
317 DVLOG(3) << __FUNCTION__; | |
318 | |
319 // Don't try to copy if the SurfaceTexture was never attached because that | |
320 // means it was never updated. | |
321 if (!shared_state_->surface_texture_is_attached()) | |
322 return; | |
323 | |
324 gpu::gles2::GLES2Decoder* gl_decoder = state_provider_->GetGlDecoder().get(); | |
325 if (!gl_decoder) | |
326 return; | |
327 | |
328 const gfx::Size size = state_provider_->GetSize(); | |
329 | |
330 // Create a 2D texture to hold a copy of the SurfaceTexture's front buffer. | |
331 GLuint tmp_texture_id; | |
332 glGenTextures(1, &tmp_texture_id); | |
333 { | |
334 gfx::ScopedTextureBinder texture_binder(GL_TEXTURE_2D, tmp_texture_id); | |
335 // The target texture's size will exactly match the source. | |
336 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); | |
337 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); | |
338 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); | |
339 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); | |
340 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, size.width(), size.height(), 0, | |
341 GL_RGBA, GL_UNSIGNED_BYTE, nullptr); | |
342 } | |
343 | |
344 float transform_matrix[16]; | |
345 surface_texture_->GetTransformMatrix(transform_matrix); | |
346 | |
347 gpu::CopyTextureCHROMIUMResourceManager copier; | |
348 copier.Initialize( | |
349 gl_decoder, | |
350 gl_decoder->GetContextGroup()->feature_info()->feature_flags()); | |
351 copier.DoCopyTextureWithTransform(gl_decoder, GL_TEXTURE_EXTERNAL_OES, | |
352 shared_state_->surface_texture_service_id(), | |
353 GL_TEXTURE_2D, tmp_texture_id, size.width(), | |
354 size.height(), true, false, false, | |
355 transform_matrix); | |
356 | |
357 // Create an EGLImage from the 2D texture we just copied into. By associating | |
358 // the EGLImage with the PictureBuffer textures they will remain valid even | |
359 // after we delete the 2D texture and EGLImage. | |
360 const EGLImageKHR egl_image = eglCreateImageKHR( | |
361 gfx::GLSurfaceEGL::GetHardwareDisplay(), eglGetCurrentContext(), | |
362 EGL_GL_TEXTURE_2D_KHR, reinterpret_cast<EGLClientBuffer>(tmp_texture_id), | |
363 nullptr /* attrs */); | |
364 | |
365 glDeleteTextures(1, &tmp_texture_id); | |
366 DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError()); | |
367 | |
368 if (egl_image == EGL_NO_IMAGE_KHR) { | |
369 DLOG(ERROR) << "Failed creating EGLImage: " << ui::GetLastEGLErrorString(); | |
370 return; | |
371 } | |
372 | |
373 for (const std::pair<int, media::PictureBuffer>& entry : buffers) { | |
374 gpu::gles2::TextureRef* texture_ref = | |
375 state_provider_->GetTextureForPicture(entry.second); | |
376 if (!texture_ref) | |
377 continue; | |
378 gfx::ScopedTextureBinder texture_binder( | |
379 GL_TEXTURE_EXTERNAL_OES, texture_ref->texture()->service_id()); | |
380 glEGLImageTargetTexture2DOES(GL_TEXTURE_EXTERNAL_OES, egl_image); | |
381 DCHECK_EQ(static_cast<GLenum>(GL_NO_ERROR), glGetError()); | |
382 } | |
383 | |
384 EGLBoolean result = | |
385 eglDestroyImageKHR(gfx::GLSurfaceEGL::GetHardwareDisplay(), egl_image); | |
386 if (result == EGL_FALSE) { | |
387 DLOG(ERROR) << "Error destroying EGLImage: " | |
388 << ui::GetLastEGLErrorString(); | |
389 } | |
390 } | |
391 | |
392 bool AndroidDeferredRenderingBackingStrategy::DoesSurfaceTextureDetachWork() | |
393 const { | |
394 bool surface_texture_detach_works = true; | |
395 if (gpu::gles2::GLES2Decoder* gl_decoder = | |
396 state_provider_->GetGlDecoder().get()) { | |
397 if (gpu::gles2::ContextGroup* group = gl_decoder->GetContextGroup()) { | |
398 if (gpu::gles2::FeatureInfo* feature_info = group->feature_info()) { | |
399 surface_texture_detach_works = | |
400 !feature_info->workarounds().surface_texture_cant_detach; | |
401 } | |
402 } | |
403 } | |
404 | |
405 // As a special case, the MicroMax A114 doesn't get the workaround, even | |
406 // though it should. Hardcode it here until we get a device and figure out | |
407 // why. crbug.com/591600 | |
408 if (base::android::BuildInfo::GetInstance()->sdk_int() <= 18) { // JB | |
409 const std::string brand( | |
410 base::ToLowerASCII(base::android::BuildInfo::GetInstance()->brand())); | |
411 if (brand == "micromax") { | |
412 const std::string model( | |
413 base::ToLowerASCII(base::android::BuildInfo::GetInstance()->model())); | |
414 if (model.find("a114") != std::string::npos) | |
415 surface_texture_detach_works = false; | |
416 } | |
417 } | |
418 | |
419 return surface_texture_detach_works; | |
420 } | |
421 | |
422 bool AndroidDeferredRenderingBackingStrategy::ShouldCopyPictures() const { | |
423 // Mali + <= KitKat crashes when we try to do this. We don't know if it's | |
424 // due to detaching a surface texture, but it's the same set of devices. | |
425 if (!DoesSurfaceTextureDetachWork()) | |
426 return false; | |
427 | |
428 // Other devices are unreliable for other reasons (e.g., EGLImage). | |
429 if (gpu::gles2::GLES2Decoder* gl_decoder = | |
430 state_provider_->GetGlDecoder().get()) { | |
431 if (gpu::gles2::ContextGroup* group = gl_decoder->GetContextGroup()) { | |
432 if (gpu::gles2::FeatureInfo* feature_info = group->feature_info()) { | |
433 return !feature_info->workarounds().avda_dont_copy_pictures; | |
434 } | |
435 } | |
436 } | |
437 | |
438 // Assume so. | |
439 return true; | |
440 } | |
441 | |
442 } // namespace content | |
OLD | NEW |