OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/pepper/video_decoder_shim.h" | 5 #include "content/renderer/pepper/video_decoder_shim.h" |
6 | 6 |
7 #include <GLES2/gl2.h> | 7 #include <GLES2/gl2.h> |
8 #include <GLES2/gl2ext.h> | 8 #include <GLES2/gl2ext.h> |
9 #include <GLES2/gl2extchromium.h> | 9 #include <GLES2/gl2extchromium.h> |
10 | 10 |
11 #include "base/bind.h" | 11 #include "base/bind.h" |
| 12 #ifndef NDEBUG |
| 13 #include "base/logging.h" |
| 14 #endif |
12 #include "base/numerics/safe_conversions.h" | 15 #include "base/numerics/safe_conversions.h" |
13 #include "base/single_thread_task_runner.h" | 16 #include "base/single_thread_task_runner.h" |
14 #include "cc/blink/context_provider_web_context.h" | 17 #include "cc/blink/context_provider_web_context.h" |
15 #include "content/public/renderer/render_thread.h" | 18 #include "content/public/renderer/render_thread.h" |
16 #include "content/renderer/pepper/pepper_video_decoder_host.h" | 19 #include "content/renderer/pepper/pepper_video_decoder_host.h" |
17 #include "content/renderer/render_thread_impl.h" | 20 #include "content/renderer/render_thread_impl.h" |
18 #include "gpu/command_buffer/client/gles2_implementation.h" | 21 #include "gpu/command_buffer/client/gles2_implementation.h" |
19 #include "media/base/decoder_buffer.h" | 22 #include "media/base/decoder_buffer.h" |
20 #include "media/base/limits.h" | 23 #include "media/base/limits.h" |
21 #include "media/base/video_decoder.h" | 24 #include "media/base/video_decoder.h" |
22 #include "media/blink/skcanvas_video_renderer.h" | 25 #include "media/blink/skcanvas_video_renderer.h" |
23 #include "media/filters/ffmpeg_video_decoder.h" | 26 #include "media/filters/ffmpeg_video_decoder.h" |
24 #include "media/filters/vpx_video_decoder.h" | 27 #include "media/filters/vpx_video_decoder.h" |
25 #include "media/video/picture.h" | 28 #include "media/video/picture.h" |
26 #include "media/video/video_decode_accelerator.h" | 29 #include "media/video/video_decode_accelerator.h" |
27 #include "ppapi/c/pp_errors.h" | 30 #include "ppapi/c/pp_errors.h" |
| 31 #include "third_party/skia/include/gpu/GrTypes.h" |
28 | 32 |
29 namespace content { | 33 namespace content { |
30 | 34 |
| 35 static const uint32_t kGrInvalidateState = |
| 36 kRenderTarget_GrGLBackendState | kTextureBinding_GrGLBackendState | |
| 37 kView_GrGLBackendState | kVertex_GrGLBackendState | |
| 38 kProgram_GrGLBackendState; |
| 39 |
| 40 // YUV->RGB converter class using a shader and FBO. |
| 41 class VideoDecoderShim::YUVConverter { |
| 42 public: |
| 43 YUVConverter(const scoped_refptr<cc_blink::ContextProviderWebContext>&); |
| 44 ~YUVConverter(); |
| 45 bool Initialize(); |
| 46 void Convert(const scoped_refptr<media::VideoFrame>& frame, GLuint tex_out); |
| 47 |
| 48 private: |
| 49 GLuint CreateShader(); |
| 50 GLuint CompileShader(const char* name, GLuint type, const char* code); |
| 51 GLuint CreateProgram(const char* name, GLuint vshader, GLuint fshader); |
| 52 GLuint CreateTexture(); |
| 53 void SetTexcoordClamp(uint32_t stride, uint32_t width); |
| 54 |
| 55 scoped_refptr<cc_blink::ContextProviderWebContext> context_provider_; |
| 56 gpu::gles2::GLES2Interface* gl_; |
| 57 GLuint frame_buffer_; |
| 58 GLuint vertex_buffer_; |
| 59 GLuint program_; |
| 60 |
| 61 GLuint y_texture_; |
| 62 GLuint u_texture_; |
| 63 GLuint v_texture_; |
| 64 GLuint a_texture_; |
| 65 |
| 66 GLuint internal_format_; |
| 67 GLuint format_; |
| 68 media::VideoFrame::Format video_format_; |
| 69 |
| 70 GLuint y_width_; |
| 71 GLuint y_height_; |
| 72 |
| 73 GLuint uv_width_; |
| 74 GLuint uv_height_; |
| 75 uint32_t uv_height_divisor_; |
| 76 |
| 77 GLfloat clamp_value_; |
| 78 GLuint clamp_width_; |
| 79 GLint clamp_width_loc_; |
| 80 |
| 81 GLint yuv_matrix_loc_; |
| 82 GLint yuv_adjust_loc_; |
| 83 |
| 84 DISALLOW_COPY_AND_ASSIGN(YUVConverter); |
| 85 }; |
| 86 |
| 87 VideoDecoderShim::YUVConverter::YUVConverter( |
| 88 const scoped_refptr<cc_blink::ContextProviderWebContext>& context_provider) |
| 89 : context_provider_(context_provider), |
| 90 gl_(context_provider_->ContextGL()), |
| 91 frame_buffer_(0), |
| 92 vertex_buffer_(0), |
| 93 program_(0), |
| 94 y_texture_(0), |
| 95 u_texture_(0), |
| 96 v_texture_(0), |
| 97 a_texture_(0), |
| 98 internal_format_(0), |
| 99 format_(0), |
| 100 video_format_(media::VideoFrame::UNKNOWN), |
| 101 y_width_(2), |
| 102 y_height_(2), |
| 103 uv_width_(2), |
| 104 uv_height_(2), |
| 105 uv_height_divisor_(1), |
| 106 clamp_value_(1.f), |
| 107 clamp_width_(0), |
| 108 clamp_width_loc_(0), |
| 109 yuv_matrix_loc_(0), |
| 110 yuv_adjust_loc_(0) { |
| 111 DCHECK(gl_); |
| 112 } |
| 113 |
| 114 VideoDecoderShim::YUVConverter::~YUVConverter() { |
| 115 if (y_texture_) |
| 116 gl_->DeleteTextures(1, &y_texture_); |
| 117 |
| 118 if (u_texture_) |
| 119 gl_->DeleteTextures(1, &u_texture_); |
| 120 |
| 121 if (v_texture_) |
| 122 gl_->DeleteTextures(1, &v_texture_); |
| 123 |
| 124 if (a_texture_) |
| 125 gl_->DeleteTextures(1, &a_texture_); |
| 126 |
| 127 if (frame_buffer_) |
| 128 gl_->DeleteFramebuffers(1, &frame_buffer_); |
| 129 |
| 130 if (vertex_buffer_) |
| 131 gl_->DeleteBuffers(1, &vertex_buffer_); |
| 132 |
| 133 if (program_) |
| 134 gl_->DeleteProgram(program_); |
| 135 } |
| 136 |
| 137 GLuint VideoDecoderShim::YUVConverter::CreateTexture() { |
| 138 GLuint tex = 0; |
| 139 |
| 140 gl_->GenTextures(1, &tex); |
| 141 gl_->BindTexture(GL_TEXTURE_2D, tex); |
| 142 |
| 143 // Create texture with default size - will be resized upon first frame. |
| 144 gl_->TexImage2D(GL_TEXTURE_2D, 0, internal_format_, 2, 2, 0, format_, |
| 145 GL_UNSIGNED_BYTE, NULL); |
| 146 |
| 147 gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); |
| 148 gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); |
| 149 gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); |
| 150 gl_->TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); |
| 151 |
| 152 gl_->BindTexture(GL_TEXTURE_2D, 0); |
| 153 |
| 154 return tex; |
| 155 } |
| 156 |
| 157 GLuint VideoDecoderShim::YUVConverter::CompileShader(const char* name, |
| 158 GLuint type, |
| 159 const char* code) { |
| 160 GLuint shader = gl_->CreateShader(type); |
| 161 |
| 162 gl_->ShaderSource(shader, 1, (const GLchar**)&code, NULL); |
| 163 gl_->CompileShader(shader); |
| 164 |
| 165 #ifndef NDEBUG |
| 166 GLint status = 0; |
| 167 |
| 168 gl_->GetShaderiv(shader, GL_COMPILE_STATUS, &status); |
| 169 if (status != GL_TRUE) { |
| 170 GLint max_length = 0; |
| 171 GLint actual_length = 0; |
| 172 gl_->GetShaderiv(shader, GL_INFO_LOG_LENGTH, &max_length); |
| 173 |
| 174 // The max_length includes the NULL character. |
| 175 std::string error_log(max_length, 0); |
| 176 gl_->GetShaderInfoLog(shader, max_length, &actual_length, &error_log[0]); |
| 177 |
| 178 LOG(ERROR) << name << " shader compilation failed: " << error_log.c_str(); |
| 179 gl_->DeleteShader(shader); |
| 180 return 0; |
| 181 } |
| 182 #endif |
| 183 |
| 184 return shader; |
| 185 } |
| 186 |
| 187 GLuint VideoDecoderShim::YUVConverter::CreateProgram(const char* name, |
| 188 GLuint vshader, |
| 189 GLuint fshader) { |
| 190 GLuint program = gl_->CreateProgram(); |
| 191 gl_->AttachShader(program, vshader); |
| 192 gl_->AttachShader(program, fshader); |
| 193 |
| 194 gl_->BindAttribLocation(program, 0, "position"); |
| 195 |
| 196 gl_->LinkProgram(program); |
| 197 |
| 198 #ifndef NDEBUG |
| 199 GLint status = 0; |
| 200 |
| 201 gl_->GetProgramiv(program, GL_LINK_STATUS, &status); |
| 202 if (status != GL_TRUE) { |
| 203 GLint max_length = 0; |
| 204 GLint actual_length = 0; |
| 205 gl_->GetProgramiv(program, GL_INFO_LOG_LENGTH, &max_length); |
| 206 |
| 207 // The max_length includes the NULL character. |
| 208 std::string error_log(max_length, 0); |
| 209 gl_->GetProgramInfoLog(program, max_length, &actual_length, &error_log[0]); |
| 210 |
| 211 LOG(ERROR) << name << " program linking failed: " << error_log.c_str(); |
| 212 return 0; |
| 213 } |
| 214 #endif |
| 215 |
| 216 return program; |
| 217 } |
| 218 |
| 219 GLuint VideoDecoderShim::YUVConverter::CreateShader() { |
| 220 const char* vert_shader = |
| 221 "precision mediump float;\n" |
| 222 "attribute vec2 position;\n" |
| 223 "varying vec2 texcoord;\n" |
| 224 "uniform float clamp_width;\n" |
| 225 "void main()\n" |
| 226 "{\n" |
| 227 " gl_Position = vec4( position.xy, 0, 1 );\n" |
| 228 " vec2 tmp = position*0.5+0.5;\n" |
| 229 " texcoord = vec2(min(tmp.x, clamp_width), tmp.y);\n" |
| 230 "}"; |
| 231 |
| 232 const char* frag_shader = |
| 233 "precision mediump float;\n" |
| 234 "varying vec2 texcoord;\n" |
| 235 "uniform sampler2D y_sampler;\n" |
| 236 "uniform sampler2D u_sampler;\n" |
| 237 "uniform sampler2D v_sampler;\n" |
| 238 "uniform sampler2D a_sampler;\n" |
| 239 "uniform mat3 yuv_matrix;\n" |
| 240 "uniform vec3 yuv_adjust;\n" |
| 241 "void main()\n" |
| 242 "{\n" |
| 243 " vec3 yuv = vec3(texture2D(y_sampler, texcoord).x,\n" |
| 244 " texture2D(u_sampler, texcoord).x,\n" |
| 245 " texture2D(v_sampler, texcoord).x) +\n" |
| 246 " yuv_adjust;\n" |
| 247 " gl_FragColor = vec4(yuv_matrix * yuv, texture2D(a_sampler, " |
| 248 "texcoord).x);\n" |
| 249 "}"; |
| 250 |
| 251 GLuint vertex_shader = |
| 252 CompileShader("Vertex Shader", GL_VERTEX_SHADER, vert_shader); |
| 253 if (!vertex_shader) { |
| 254 return 0; |
| 255 } |
| 256 |
| 257 GLuint fragment_shader = |
| 258 CompileShader("Fragment Shader", GL_FRAGMENT_SHADER, frag_shader); |
| 259 if (!fragment_shader) { |
| 260 gl_->DeleteShader(vertex_shader); |
| 261 return 0; |
| 262 } |
| 263 |
| 264 GLuint program = |
| 265 CreateProgram("YUVConverter Program", vertex_shader, fragment_shader); |
| 266 |
| 267 gl_->DeleteShader(vertex_shader); |
| 268 gl_->DeleteShader(fragment_shader); |
| 269 |
| 270 if (!program) { |
| 271 return 0; |
| 272 } |
| 273 |
| 274 gl_->UseProgram(program); |
| 275 |
| 276 GLint uniform_location; |
| 277 uniform_location = gl_->GetUniformLocation(program, "y_sampler"); |
| 278 DCHECK(uniform_location != -1); |
| 279 gl_->Uniform1i(uniform_location, 0); |
| 280 |
| 281 uniform_location = gl_->GetUniformLocation(program, "u_sampler"); |
| 282 DCHECK(uniform_location != -1); |
| 283 gl_->Uniform1i(uniform_location, 1); |
| 284 |
| 285 uniform_location = gl_->GetUniformLocation(program, "v_sampler"); |
| 286 DCHECK(uniform_location != -1); |
| 287 gl_->Uniform1i(uniform_location, 2); |
| 288 |
| 289 uniform_location = gl_->GetUniformLocation(program, "a_sampler"); |
| 290 DCHECK(uniform_location != -1); |
| 291 gl_->Uniform1i(uniform_location, 3); |
| 292 |
| 293 clamp_width_loc_ = gl_->GetUniformLocation(program, "clamp_width"); |
| 294 DCHECK(clamp_width_loc_ != -1); |
| 295 gl_->Uniform1f(clamp_width_loc_, clamp_value_); |
| 296 |
| 297 gl_->UseProgram(0); |
| 298 |
| 299 yuv_matrix_loc_ = gl_->GetUniformLocation(program, "yuv_matrix"); |
| 300 DCHECK(yuv_matrix_loc_ != -1); |
| 301 |
| 302 yuv_adjust_loc_ = gl_->GetUniformLocation(program, "yuv_adjust"); |
| 303 DCHECK(yuv_adjust_loc_ != -1); |
| 304 |
| 305 return program; |
| 306 } |
| 307 |
| 308 bool VideoDecoderShim::YUVConverter::Initialize() { |
| 309 // If texture_rg extension is not available, use slower GL_LUMINANCE. |
| 310 if (context_provider_->ContextCapabilities().gpu.texture_rg) { |
| 311 internal_format_ = GL_RED_EXT; |
| 312 format_ = GL_RED_EXT; |
| 313 } else { |
| 314 internal_format_ = GL_LUMINANCE; |
| 315 format_ = GL_LUMINANCE; |
| 316 } |
| 317 |
| 318 if (context_provider_->ContextCapabilities().gpu.max_texture_image_units < |
| 319 4) { |
| 320 // We support YUVA textures and require 4 texture units in the fragment |
| 321 // stage. |
| 322 return false; |
| 323 } |
| 324 |
| 325 gl_->PushGroupMarkerEXT(0, "YUVConverterContext"); |
| 326 |
| 327 gl_->GenFramebuffers(1, &frame_buffer_); |
| 328 |
| 329 y_texture_ = CreateTexture(); |
| 330 u_texture_ = CreateTexture(); |
| 331 v_texture_ = CreateTexture(); |
| 332 a_texture_ = CreateTexture(); |
| 333 |
| 334 // Vertex positions. Also converted to texcoords in vertex shader. |
| 335 GLfloat vertex_positions[] = {-1.f, -1.f, 1.f, -1.f, -1.f, 1.f, 1.f, 1.f}; |
| 336 |
| 337 gl_->GenBuffers(1, &vertex_buffer_); |
| 338 gl_->BindBuffer(GL_ARRAY_BUFFER, vertex_buffer_); |
| 339 gl_->BufferData(GL_ARRAY_BUFFER, 2 * sizeof(GLfloat) * 4, vertex_positions, |
| 340 GL_STATIC_DRAW); |
| 341 gl_->BindBuffer(GL_ARRAY_BUFFER, 0); |
| 342 |
| 343 program_ = CreateShader(); |
| 344 |
| 345 gl_->PopGroupMarkerEXT(); |
| 346 |
| 347 context_provider_->InvalidateGrContext(kGrInvalidateState); |
| 348 |
| 349 return (program_ != 0); |
| 350 } |
| 351 |
| 352 void VideoDecoderShim::YUVConverter::SetTexcoordClamp(uint32_t stride, |
| 353 uint32_t width) { |
| 354 clamp_width_ = width; |
| 355 if (width != stride) { |
| 356 // Clamp texcoord width to avoid sampling padding pixels. |
| 357 clamp_value_ = static_cast<float>(width) / static_cast<float>(stride); |
| 358 // Further clamp to 1/2 pixel inside to avoid bilinear sampling errors. |
| 359 clamp_value_ -= (1.f / (2.f * static_cast<float>(stride))); |
| 360 } else { |
| 361 // No clamping necessary if width and stride are equal. |
| 362 clamp_value_ = 1.f; |
| 363 } |
| 364 } |
| 365 |
| 366 void VideoDecoderShim::YUVConverter::Convert( |
| 367 const scoped_refptr<media::VideoFrame>& frame, |
| 368 GLuint tex_out) { |
| 369 const float* yuv_matrix = 0; |
| 370 const float* yuv_adjust = 0; |
| 371 |
| 372 if (video_format_ != frame->format()) { |
| 373 // The constants below were taken from cc/output/gl_renderer.cc. |
| 374 // These values are magic numbers that are used in the transformation from |
| 375 // YUV to RGB color values. They are taken from the following webpage: |
| 376 // http://www.fourcc.org/fccyvrgb.php |
| 377 const float yuv_to_rgb_rec601[9] = { |
| 378 1.164f, 1.164f, 1.164f, 0.0f, -.391f, 2.018f, 1.596f, -.813f, 0.0f, |
| 379 }; |
| 380 const float yuv_to_rgb_jpeg[9] = { |
| 381 1.f, 1.f, 1.f, 0.0f, -.34414f, 1.772f, 1.402f, -.71414f, 0.0f, |
| 382 }; |
| 383 const float yuv_to_rgb_rec709[9] = { |
| 384 1.164f, 1.164f, 1.164f, 0.0f, -0.213f, 2.112f, 1.793f, -0.533f, 0.0f, |
| 385 }; |
| 386 |
| 387 // These values map to 16, 128, and 128 respectively, and are computed |
| 388 // as a fraction over 256 (e.g. 16 / 256 = 0.0625). |
| 389 // They are used in the YUV to RGBA conversion formula: |
| 390 // Y - 16 : Gives 16 values of head and footroom for overshooting |
| 391 // U - 128 : Turns unsigned U into signed U [-128,127] |
| 392 // V - 128 : Turns unsigned V into signed V [-128,127] |
| 393 const float yuv_adjust_constrained[3] = { |
| 394 -0.0625f, -0.5f, -0.5f, |
| 395 }; |
| 396 // Same as above, but without the head and footroom. |
| 397 const float yuv_adjust_full[3] = { |
| 398 0.0f, -0.5f, -0.5f, |
| 399 }; |
| 400 |
| 401 switch (frame->format()) { |
| 402 case media::VideoFrame::YV12: // 420 |
| 403 case media::VideoFrame::YV12A: |
| 404 case media::VideoFrame::I420: |
| 405 uv_height_divisor_ = 2; |
| 406 yuv_matrix = yuv_to_rgb_rec601; |
| 407 yuv_adjust = yuv_adjust_constrained; |
| 408 break; |
| 409 |
| 410 case media::VideoFrame::YV12HD: // 420 |
| 411 uv_height_divisor_ = 2; |
| 412 yuv_matrix = yuv_to_rgb_rec709; |
| 413 yuv_adjust = yuv_adjust_constrained; |
| 414 break; |
| 415 |
| 416 case media::VideoFrame::YV12J: // 420 |
| 417 uv_height_divisor_ = 2; |
| 418 yuv_matrix = yuv_to_rgb_jpeg; |
| 419 yuv_adjust = yuv_adjust_full; |
| 420 break; |
| 421 |
| 422 case media::VideoFrame::YV16: // 422 |
| 423 case media::VideoFrame::YV24: // 444 |
| 424 uv_height_divisor_ = 1; |
| 425 yuv_matrix = yuv_to_rgb_rec601; |
| 426 yuv_adjust = yuv_adjust_constrained; |
| 427 break; |
| 428 |
| 429 default: |
| 430 NOTREACHED(); |
| 431 } |
| 432 |
| 433 video_format_ = frame->format(); |
| 434 |
| 435 // Zero these so everything is reset below. |
| 436 y_width_ = y_height_ = 0; |
| 437 } |
| 438 |
| 439 gl_->PushGroupMarkerEXT(0, "YUVConverterContext"); |
| 440 |
| 441 bool set_clamp = false; |
| 442 |
| 443 uint32_t ywidth = frame->coded_size().width(); |
| 444 uint32_t yheight = frame->coded_size().height(); |
| 445 |
| 446 DCHECK_EQ(frame->stride(media::VideoFrame::kUPlane), |
| 447 frame->stride(media::VideoFrame::kVPlane)); |
| 448 |
| 449 uint32_t ystride = frame->stride(media::VideoFrame::kYPlane); |
| 450 uint32_t uvstride = frame->stride(media::VideoFrame::kUPlane); |
| 451 |
| 452 // The following code assumes that extended GLES 2.0 state like |
| 453 // UNPACK_SKIP* and UNPACK_ROW_LENGTH (if available) are set to defaults. |
| 454 gl_->PixelStorei(GL_UNPACK_ALIGNMENT, 1); |
| 455 |
| 456 if (ystride != y_width_ || yheight != y_height_) { |
| 457 // Choose width based on the stride. Clamp texcoords below. |
| 458 y_width_ = ystride; |
| 459 y_height_ = yheight; |
| 460 |
| 461 uv_width_ = uvstride; |
| 462 uv_height_ = y_height_ / uv_height_divisor_; |
| 463 |
| 464 SetTexcoordClamp(ystride, ywidth); |
| 465 set_clamp = true; |
| 466 |
| 467 // Re-create to resize the textures and upload data. |
| 468 gl_->ActiveTexture(GL_TEXTURE0); |
| 469 gl_->BindTexture(GL_TEXTURE_2D, y_texture_); |
| 470 gl_->TexImage2D(GL_TEXTURE_2D, 0, internal_format_, y_width_, y_height_, 0, |
| 471 format_, GL_UNSIGNED_BYTE, |
| 472 frame->data(media::VideoFrame::kYPlane)); |
| 473 |
| 474 gl_->ActiveTexture(GL_TEXTURE1); |
| 475 gl_->BindTexture(GL_TEXTURE_2D, u_texture_); |
| 476 gl_->TexImage2D(GL_TEXTURE_2D, 0, internal_format_, uv_width_, uv_height_, |
| 477 0, format_, GL_UNSIGNED_BYTE, |
| 478 frame->data(media::VideoFrame::kUPlane)); |
| 479 |
| 480 gl_->ActiveTexture(GL_TEXTURE2); |
| 481 gl_->BindTexture(GL_TEXTURE_2D, v_texture_); |
| 482 gl_->TexImage2D(GL_TEXTURE_2D, 0, internal_format_, uv_width_, uv_height_, |
| 483 0, format_, GL_UNSIGNED_BYTE, |
| 484 frame->data(media::VideoFrame::kVPlane)); |
| 485 |
| 486 if (video_format_ == media::VideoFrame::YV12A) { |
| 487 DCHECK_EQ(frame->stride(media::VideoFrame::kYPlane), |
| 488 frame->stride(media::VideoFrame::kAPlane)); |
| 489 gl_->ActiveTexture(GL_TEXTURE3); |
| 490 gl_->BindTexture(GL_TEXTURE_2D, a_texture_); |
| 491 gl_->TexImage2D(GL_TEXTURE_2D, 0, internal_format_, y_width_, y_height_, |
| 492 0, format_, GL_UNSIGNED_BYTE, |
| 493 frame->data(media::VideoFrame::kAPlane)); |
| 494 } else { |
| 495 // if there is no alpha channel, then create a 2x2 texture with full |
| 496 // alpha. |
| 497 const uint8_t alpha[4] = {0xff, 0xff, 0xff, 0xff}; |
| 498 gl_->ActiveTexture(GL_TEXTURE3); |
| 499 gl_->BindTexture(GL_TEXTURE_2D, a_texture_); |
| 500 gl_->TexImage2D(GL_TEXTURE_2D, 0, internal_format_, 2, 2, 0, format_, |
| 501 GL_UNSIGNED_BYTE, alpha); |
| 502 } |
| 503 } else { |
| 504 // Width may have changed even though stride remained the same. |
| 505 if (clamp_width_ != ywidth) { |
| 506 SetTexcoordClamp(ystride, ywidth); |
| 507 set_clamp = true; |
| 508 } |
| 509 |
| 510 // Bind textures and upload texture data |
| 511 gl_->ActiveTexture(GL_TEXTURE0); |
| 512 gl_->BindTexture(GL_TEXTURE_2D, y_texture_); |
| 513 gl_->TexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, y_width_, y_height_, format_, |
| 514 GL_UNSIGNED_BYTE, |
| 515 frame->data(media::VideoFrame::kYPlane)); |
| 516 |
| 517 gl_->ActiveTexture(GL_TEXTURE1); |
| 518 gl_->BindTexture(GL_TEXTURE_2D, u_texture_); |
| 519 gl_->TexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, uv_width_, uv_height_, format_, |
| 520 GL_UNSIGNED_BYTE, |
| 521 frame->data(media::VideoFrame::kUPlane)); |
| 522 |
| 523 gl_->ActiveTexture(GL_TEXTURE2); |
| 524 gl_->BindTexture(GL_TEXTURE_2D, v_texture_); |
| 525 gl_->TexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, uv_width_, uv_height_, format_, |
| 526 GL_UNSIGNED_BYTE, |
| 527 frame->data(media::VideoFrame::kVPlane)); |
| 528 |
| 529 if (video_format_ == media::VideoFrame::YV12A) { |
| 530 DCHECK_EQ(frame->stride(media::VideoFrame::kYPlane), |
| 531 frame->stride(media::VideoFrame::kAPlane)); |
| 532 gl_->ActiveTexture(GL_TEXTURE3); |
| 533 gl_->BindTexture(GL_TEXTURE_2D, a_texture_); |
| 534 gl_->TexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, y_width_, y_height_, format_, |
| 535 GL_UNSIGNED_BYTE, |
| 536 frame->data(media::VideoFrame::kAPlane)); |
| 537 } else { |
| 538 gl_->ActiveTexture(GL_TEXTURE3); |
| 539 gl_->BindTexture(GL_TEXTURE_2D, a_texture_); |
| 540 } |
| 541 } |
| 542 |
| 543 gl_->BindFramebuffer(GL_FRAMEBUFFER, frame_buffer_); |
| 544 gl_->FramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, |
| 545 tex_out, 0); |
| 546 |
| 547 #ifndef NDEBUG |
| 548 // We should probably check for framebuffer complete here, but that |
| 549 // will slow this method down so check only in debug mode. |
| 550 GLint status = gl_->CheckFramebufferStatus(GL_FRAMEBUFFER); |
| 551 if (status != GL_FRAMEBUFFER_COMPLETE) { |
| 552 return; |
| 553 } |
| 554 #endif |
| 555 |
| 556 gl_->Viewport(0, 0, ywidth, yheight); |
| 557 |
| 558 gl_->UseProgram(program_); |
| 559 |
| 560 if (set_clamp) { |
| 561 gl_->Uniform1f(clamp_width_loc_, clamp_value_); |
| 562 } |
| 563 |
| 564 if (yuv_matrix) { |
| 565 gl_->UniformMatrix3fv(yuv_matrix_loc_, 1, 0, yuv_matrix); |
| 566 gl_->Uniform3fv(yuv_adjust_loc_, 1, yuv_adjust); |
| 567 } |
| 568 |
| 569 gl_->BindBuffer(GL_ARRAY_BUFFER, vertex_buffer_); |
| 570 gl_->EnableVertexAttribArray(0); |
| 571 gl_->VertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(GLfloat), |
| 572 static_cast<const void*>(0)); |
| 573 |
| 574 gl_->DrawArrays(GL_TRIANGLE_STRIP, 0, 4); |
| 575 |
| 576 // The YUVConverter shares the context with Skia and possibly other modules |
| 577 // that may make OpenGL calls. To be a "good OpenGL citizen" for other |
| 578 // (non-Skia) modules that may share this context we restore |
| 579 // buffer/texture/state bindings to OpenGL defaults here. If we were only |
| 580 // sharing the context with Skia this may not be necessary as we also |
| 581 // Invalidate the GrContext below so that Skia is aware that its state |
| 582 // caches need to be reset. |
| 583 |
| 584 gl_->BindBuffer(GL_ARRAY_BUFFER, 0); |
| 585 gl_->DisableVertexAttribArray(0); |
| 586 gl_->UseProgram(0); |
| 587 gl_->BindFramebuffer(GL_FRAMEBUFFER, 0); |
| 588 |
| 589 gl_->BindTexture(GL_TEXTURE_2D, 0); |
| 590 |
| 591 gl_->ActiveTexture(GL_TEXTURE2); |
| 592 gl_->BindTexture(GL_TEXTURE_2D, 0); |
| 593 |
| 594 gl_->ActiveTexture(GL_TEXTURE1); |
| 595 gl_->BindTexture(GL_TEXTURE_2D, 0); |
| 596 |
| 597 gl_->ActiveTexture(GL_TEXTURE0); |
| 598 gl_->BindTexture(GL_TEXTURE_2D, 0); |
| 599 |
| 600 gl_->PopGroupMarkerEXT(); |
| 601 |
| 602 context_provider_->InvalidateGrContext(kGrInvalidateState); |
| 603 } |
| 604 |
31 struct VideoDecoderShim::PendingDecode { | 605 struct VideoDecoderShim::PendingDecode { |
32 PendingDecode(uint32_t decode_id, | 606 PendingDecode(uint32_t decode_id, |
33 const scoped_refptr<media::DecoderBuffer>& buffer); | 607 const scoped_refptr<media::DecoderBuffer>& buffer); |
34 ~PendingDecode(); | 608 ~PendingDecode(); |
35 | 609 |
36 const uint32_t decode_id; | 610 const uint32_t decode_id; |
37 const scoped_refptr<media::DecoderBuffer> buffer; | 611 const scoped_refptr<media::DecoderBuffer> buffer; |
38 }; | 612 }; |
39 | 613 |
40 VideoDecoderShim::PendingDecode::PendingDecode( | 614 VideoDecoderShim::PendingDecode::PendingDecode( |
41 uint32_t decode_id, | 615 uint32_t decode_id, |
42 const scoped_refptr<media::DecoderBuffer>& buffer) | 616 const scoped_refptr<media::DecoderBuffer>& buffer) |
43 : decode_id(decode_id), buffer(buffer) { | 617 : decode_id(decode_id), buffer(buffer) { |
44 } | 618 } |
45 | 619 |
46 VideoDecoderShim::PendingDecode::~PendingDecode() { | 620 VideoDecoderShim::PendingDecode::~PendingDecode() { |
47 } | 621 } |
48 | 622 |
49 struct VideoDecoderShim::PendingFrame { | 623 struct VideoDecoderShim::PendingFrame { |
50 explicit PendingFrame(uint32_t decode_id); | 624 explicit PendingFrame(uint32_t decode_id); |
51 PendingFrame(uint32_t decode_id, | 625 PendingFrame(uint32_t decode_id, |
52 const gfx::Size& coded_size, | 626 const scoped_refptr<media::VideoFrame>& frame); |
53 const gfx::Rect& visible_rect); | |
54 ~PendingFrame(); | 627 ~PendingFrame(); |
55 | 628 |
56 const uint32_t decode_id; | 629 const uint32_t decode_id; |
57 const gfx::Size coded_size; | 630 scoped_refptr<media::VideoFrame> video_frame; |
58 const gfx::Rect visible_rect; | |
59 std::vector<uint8_t> argb_pixels; | |
60 | 631 |
61 private: | 632 private: |
62 // This could be expensive to copy, so guard against that. | 633 // This could be expensive to copy, so guard against that. |
63 DISALLOW_COPY_AND_ASSIGN(PendingFrame); | 634 DISALLOW_COPY_AND_ASSIGN(PendingFrame); |
64 }; | 635 }; |
65 | 636 |
66 VideoDecoderShim::PendingFrame::PendingFrame(uint32_t decode_id) | 637 VideoDecoderShim::PendingFrame::PendingFrame(uint32_t decode_id) |
67 : decode_id(decode_id) { | 638 : decode_id(decode_id) { |
68 } | 639 } |
69 | 640 |
70 VideoDecoderShim::PendingFrame::PendingFrame(uint32_t decode_id, | 641 VideoDecoderShim::PendingFrame::PendingFrame( |
71 const gfx::Size& coded_size, | 642 uint32_t decode_id, |
72 const gfx::Rect& visible_rect) | 643 const scoped_refptr<media::VideoFrame>& frame) |
73 : decode_id(decode_id), | 644 : decode_id(decode_id), video_frame(frame) { |
74 coded_size(coded_size), | |
75 visible_rect(visible_rect), | |
76 argb_pixels(coded_size.width() * coded_size.height() * 4) { | |
77 } | 645 } |
78 | 646 |
79 VideoDecoderShim::PendingFrame::~PendingFrame() { | 647 VideoDecoderShim::PendingFrame::~PendingFrame() { |
80 } | 648 } |
81 | 649 |
82 // DecoderImpl runs the underlying VideoDecoder on the media thread, receiving | 650 // DecoderImpl runs the underlying VideoDecoder on the media thread, receiving |
83 // calls from the VideoDecodeShim on the main thread and sending results back. | 651 // calls from the VideoDecodeShim on the main thread and sending results back. |
84 // This class is constructed on the main thread, but used and destructed on the | 652 // This class is constructed on the main thread, but used and destructed on the |
85 // media thread. | 653 // media thread. |
86 class VideoDecoderShim::DecoderImpl { | 654 class VideoDecoderShim::DecoderImpl { |
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
262 } | 830 } |
263 | 831 |
264 void VideoDecoderShim::DecoderImpl::OnOutputComplete( | 832 void VideoDecoderShim::DecoderImpl::OnOutputComplete( |
265 const scoped_refptr<media::VideoFrame>& frame) { | 833 const scoped_refptr<media::VideoFrame>& frame) { |
266 // Software decoders are expected to generated frames only when a Decode() | 834 // Software decoders are expected to generated frames only when a Decode() |
267 // call is pending. | 835 // call is pending. |
268 DCHECK(awaiting_decoder_); | 836 DCHECK(awaiting_decoder_); |
269 | 837 |
270 scoped_ptr<PendingFrame> pending_frame; | 838 scoped_ptr<PendingFrame> pending_frame; |
271 if (!frame->end_of_stream()) { | 839 if (!frame->end_of_stream()) { |
272 pending_frame.reset(new PendingFrame( | 840 pending_frame.reset(new PendingFrame(decode_id_, frame)); |
273 decode_id_, frame->coded_size(), frame->visible_rect())); | |
274 // Convert the VideoFrame pixels to ABGR to match VideoDecodeAccelerator. | |
275 media::SkCanvasVideoRenderer::ConvertVideoFrameToRGBPixels( | |
276 frame, | |
277 &pending_frame->argb_pixels.front(), | |
278 frame->coded_size().width() * 4); | |
279 } else { | 841 } else { |
280 pending_frame.reset(new PendingFrame(decode_id_)); | 842 pending_frame.reset(new PendingFrame(decode_id_)); |
281 } | 843 } |
282 | 844 |
283 main_message_loop_->PostTask(FROM_HERE, | 845 main_message_loop_->PostTask(FROM_HERE, |
284 base::Bind(&VideoDecoderShim::OnOutputComplete, | 846 base::Bind(&VideoDecoderShim::OnOutputComplete, |
285 shim_, | 847 shim_, |
286 base::Passed(&pending_frame))); | 848 base::Passed(&pending_frame))); |
287 } | 849 } |
288 | 850 |
289 void VideoDecoderShim::DecoderImpl::OnResetComplete() { | 851 void VideoDecoderShim::DecoderImpl::OnResetComplete() { |
290 main_message_loop_->PostTask( | 852 main_message_loop_->PostTask( |
291 FROM_HERE, base::Bind(&VideoDecoderShim::OnResetComplete, shim_)); | 853 FROM_HERE, base::Bind(&VideoDecoderShim::OnResetComplete, shim_)); |
292 } | 854 } |
293 | 855 |
294 VideoDecoderShim::VideoDecoderShim(PepperVideoDecoderHost* host) | 856 VideoDecoderShim::VideoDecoderShim(PepperVideoDecoderHost* host) |
295 : state_(UNINITIALIZED), | 857 : state_(UNINITIALIZED), |
296 host_(host), | 858 host_(host), |
297 media_task_runner_( | 859 media_task_runner_( |
298 RenderThreadImpl::current()->GetMediaThreadTaskRunner()), | 860 RenderThreadImpl::current()->GetMediaThreadTaskRunner()), |
299 context_provider_( | 861 context_provider_( |
300 RenderThreadImpl::current()->SharedMainThreadContextProvider()), | 862 RenderThreadImpl::current()->SharedMainThreadContextProvider()), |
301 texture_pool_size_(0), | 863 texture_pool_size_(0), |
302 num_pending_decodes_(0), | 864 num_pending_decodes_(0), |
| 865 yuv_converter_(new YUVConverter(context_provider_)), |
303 weak_ptr_factory_(this) { | 866 weak_ptr_factory_(this) { |
304 DCHECK(host_); | 867 DCHECK(host_); |
305 DCHECK(media_task_runner_.get()); | 868 DCHECK(media_task_runner_.get()); |
306 DCHECK(context_provider_.get()); | 869 DCHECK(context_provider_.get()); |
307 decoder_impl_.reset(new DecoderImpl(weak_ptr_factory_.GetWeakPtr())); | 870 decoder_impl_.reset(new DecoderImpl(weak_ptr_factory_.GetWeakPtr())); |
308 } | 871 } |
309 | 872 |
310 VideoDecoderShim::~VideoDecoderShim() { | 873 VideoDecoderShim::~VideoDecoderShim() { |
311 DCHECK(RenderThreadImpl::current()); | 874 DCHECK(RenderThreadImpl::current()); |
312 // Delete any remaining textures. | 875 // Delete any remaining textures. |
(...skipping 23 matching lines...) Expand all Loading... |
336 DCHECK_EQ(state_, UNINITIALIZED); | 899 DCHECK_EQ(state_, UNINITIALIZED); |
337 media::VideoCodec codec = media::kUnknownVideoCodec; | 900 media::VideoCodec codec = media::kUnknownVideoCodec; |
338 if (profile <= media::H264PROFILE_MAX) | 901 if (profile <= media::H264PROFILE_MAX) |
339 codec = media::kCodecH264; | 902 codec = media::kCodecH264; |
340 else if (profile <= media::VP8PROFILE_MAX) | 903 else if (profile <= media::VP8PROFILE_MAX) |
341 codec = media::kCodecVP8; | 904 codec = media::kCodecVP8; |
342 else if (profile <= media::VP9PROFILE_MAX) | 905 else if (profile <= media::VP9PROFILE_MAX) |
343 codec = media::kCodecVP9; | 906 codec = media::kCodecVP9; |
344 DCHECK_NE(codec, media::kUnknownVideoCodec); | 907 DCHECK_NE(codec, media::kUnknownVideoCodec); |
345 | 908 |
| 909 if (!yuv_converter_->Initialize()) { |
| 910 return false; |
| 911 } |
| 912 |
346 media::VideoDecoderConfig config( | 913 media::VideoDecoderConfig config( |
347 codec, | 914 codec, |
348 profile, | 915 profile, |
349 media::VideoFrame::YV12, | 916 media::VideoFrame::YV12, |
350 gfx::Size(32, 24), // Small sizes that won't fail. | 917 gfx::Size(32, 24), // Small sizes that won't fail. |
351 gfx::Rect(32, 24), | 918 gfx::Rect(32, 24), |
352 gfx::Size(32, 24), | 919 gfx::Size(32, 24), |
353 NULL /* extra_data */, // TODO(bbudge) Verify this isn't needed. | 920 NULL /* extra_data */, // TODO(bbudge) Verify this isn't needed. |
354 0 /* extra_data_size */, | 921 0 /* extra_data_size */, |
355 false /* decryption */); | 922 false /* decryption */); |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
468 // the host that decode has completed. This exerts "back pressure" to keep | 1035 // the host that decode has completed. This exerts "back pressure" to keep |
469 // the host from sending buffers that will cause pending_frames_ to grow. | 1036 // the host from sending buffers that will cause pending_frames_ to grow. |
470 if (pending_frames_.empty()) | 1037 if (pending_frames_.empty()) |
471 NotifyCompletedDecodes(); | 1038 NotifyCompletedDecodes(); |
472 } | 1039 } |
473 | 1040 |
474 void VideoDecoderShim::OnOutputComplete(scoped_ptr<PendingFrame> frame) { | 1041 void VideoDecoderShim::OnOutputComplete(scoped_ptr<PendingFrame> frame) { |
475 DCHECK(RenderThreadImpl::current()); | 1042 DCHECK(RenderThreadImpl::current()); |
476 DCHECK(host_); | 1043 DCHECK(host_); |
477 | 1044 |
478 if (!frame->argb_pixels.empty()) { | 1045 if (frame->video_frame) { |
479 if (texture_size_ != frame->coded_size) { | 1046 if (texture_size_ != frame->video_frame->coded_size()) { |
480 // If the size has changed, all current textures must be dismissed. Add | 1047 // If the size has changed, all current textures must be dismissed. Add |
481 // all textures to |textures_to_dismiss_| and dismiss any that aren't in | 1048 // all textures to |textures_to_dismiss_| and dismiss any that aren't in |
482 // use by the plugin. We will dismiss the rest as they are recycled. | 1049 // use by the plugin. We will dismiss the rest as they are recycled. |
483 for (TextureIdMap::const_iterator it = texture_id_map_.begin(); | 1050 for (TextureIdMap::const_iterator it = texture_id_map_.begin(); |
484 it != texture_id_map_.end(); | 1051 it != texture_id_map_.end(); |
485 ++it) { | 1052 ++it) { |
486 textures_to_dismiss_.insert(it->first); | 1053 textures_to_dismiss_.insert(it->first); |
487 } | 1054 } |
488 for (TextureIdSet::const_iterator it = available_textures_.begin(); | 1055 for (TextureIdSet::const_iterator it = available_textures_.begin(); |
489 it != available_textures_.end(); | 1056 it != available_textures_.end(); |
490 ++it) { | 1057 ++it) { |
491 DismissTexture(*it); | 1058 DismissTexture(*it); |
492 } | 1059 } |
493 available_textures_.clear(); | 1060 available_textures_.clear(); |
494 FlushCommandBuffer(); | 1061 FlushCommandBuffer(); |
495 | 1062 |
496 DCHECK(pending_texture_mailboxes_.empty()); | 1063 DCHECK(pending_texture_mailboxes_.empty()); |
497 for (uint32_t i = 0; i < texture_pool_size_; i++) | 1064 for (uint32_t i = 0; i < texture_pool_size_; i++) |
498 pending_texture_mailboxes_.push_back(gpu::Mailbox::Generate()); | 1065 pending_texture_mailboxes_.push_back(gpu::Mailbox::Generate()); |
499 | 1066 |
500 host_->RequestTextures(texture_pool_size_, | 1067 host_->RequestTextures(texture_pool_size_, |
501 frame->coded_size, | 1068 frame->video_frame->coded_size(), GL_TEXTURE_2D, |
502 GL_TEXTURE_2D, | |
503 pending_texture_mailboxes_); | 1069 pending_texture_mailboxes_); |
504 texture_size_ = frame->coded_size; | 1070 texture_size_ = frame->video_frame->coded_size(); |
505 } | 1071 } |
506 | 1072 |
507 pending_frames_.push(linked_ptr<PendingFrame>(frame.release())); | 1073 pending_frames_.push(linked_ptr<PendingFrame>(frame.release())); |
508 SendPictures(); | 1074 SendPictures(); |
509 } | 1075 } |
510 } | 1076 } |
511 | 1077 |
512 void VideoDecoderShim::SendPictures() { | 1078 void VideoDecoderShim::SendPictures() { |
513 DCHECK(RenderThreadImpl::current()); | 1079 DCHECK(RenderThreadImpl::current()); |
514 DCHECK(host_); | 1080 DCHECK(host_); |
515 while (!pending_frames_.empty() && !available_textures_.empty()) { | 1081 while (!pending_frames_.empty() && !available_textures_.empty()) { |
516 const linked_ptr<PendingFrame>& frame = pending_frames_.front(); | 1082 const linked_ptr<PendingFrame>& frame = pending_frames_.front(); |
517 | 1083 |
518 TextureIdSet::iterator it = available_textures_.begin(); | 1084 TextureIdSet::iterator it = available_textures_.begin(); |
519 uint32_t texture_id = *it; | 1085 uint32_t texture_id = *it; |
520 available_textures_.erase(it); | 1086 available_textures_.erase(it); |
521 | 1087 |
522 uint32_t local_texture_id = texture_id_map_[texture_id]; | 1088 uint32_t local_texture_id = texture_id_map_[texture_id]; |
523 gpu::gles2::GLES2Interface* gles2 = context_provider_->ContextGL(); | 1089 |
524 gles2->ActiveTexture(GL_TEXTURE0); | 1090 yuv_converter_->Convert(frame->video_frame, local_texture_id); |
525 gles2->BindTexture(GL_TEXTURE_2D, local_texture_id); | |
526 #if !defined(OS_ANDROID) | |
527 // BGRA is the native texture format, except on Android, where textures | |
528 // would be uploaded as GL_RGBA. | |
529 gles2->TexImage2D(GL_TEXTURE_2D, | |
530 0, | |
531 GL_BGRA_EXT, | |
532 texture_size_.width(), | |
533 texture_size_.height(), | |
534 0, | |
535 GL_BGRA_EXT, | |
536 GL_UNSIGNED_BYTE, | |
537 &frame->argb_pixels.front()); | |
538 #else | |
539 #error Not implemented. | |
540 #endif | |
541 | 1091 |
542 host_->PictureReady(media::Picture(texture_id, frame->decode_id, | 1092 host_->PictureReady(media::Picture(texture_id, frame->decode_id, |
543 frame->visible_rect, false)); | 1093 frame->video_frame->visible_rect(), |
| 1094 false)); |
544 pending_frames_.pop(); | 1095 pending_frames_.pop(); |
545 } | 1096 } |
546 | 1097 |
547 FlushCommandBuffer(); | 1098 FlushCommandBuffer(); |
548 | 1099 |
549 if (pending_frames_.empty()) { | 1100 if (pending_frames_.empty()) { |
550 // If frames aren't backing up, notify the host of any completed decodes so | 1101 // If frames aren't backing up, notify the host of any completed decodes so |
551 // it can send more buffers. | 1102 // it can send more buffers. |
552 NotifyCompletedDecodes(); | 1103 NotifyCompletedDecodes(); |
553 | 1104 |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
593 void VideoDecoderShim::DeleteTexture(uint32_t texture_id) { | 1144 void VideoDecoderShim::DeleteTexture(uint32_t texture_id) { |
594 gpu::gles2::GLES2Interface* gles2 = context_provider_->ContextGL(); | 1145 gpu::gles2::GLES2Interface* gles2 = context_provider_->ContextGL(); |
595 gles2->DeleteTextures(1, &texture_id); | 1146 gles2->DeleteTextures(1, &texture_id); |
596 } | 1147 } |
597 | 1148 |
598 void VideoDecoderShim::FlushCommandBuffer() { | 1149 void VideoDecoderShim::FlushCommandBuffer() { |
599 context_provider_->ContextGL()->Flush(); | 1150 context_provider_->ContextGL()->Flush(); |
600 } | 1151 } |
601 | 1152 |
602 } // namespace content | 1153 } // namespace content |
OLD | NEW |