OLD | NEW |
1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/common/gpu/media/android_video_decode_accelerator.h" | 5 #include "content/common/gpu/media/android_video_decode_accelerator.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/logging.h" | 8 #include "base/logging.h" |
9 #include "base/message_loop/message_loop.h" | 9 #include "base/message_loop/message_loop.h" |
10 #include "base/metrics/histogram.h" | 10 #include "base/metrics/histogram.h" |
11 #include "content/common/gpu/gpu_channel.h" | 11 #include "content/common/gpu/gpu_channel.h" |
12 #include "gpu/command_buffer/service/gles2_cmd_decoder.h" | 12 #include "gpu/command_buffer/service/gles2_cmd_decoder.h" |
13 #include "media/base/bitstream_buffer.h" | 13 #include "media/base/bitstream_buffer.h" |
14 #include "media/base/limits.h" | 14 #include "media/base/limits.h" |
15 #include "media/base/video_decoder_config.h" | 15 #include "media/base/video_decoder_config.h" |
16 #include "media/video/picture.h" | 16 #include "media/video/picture.h" |
17 #include "ui/gl/android/scoped_java_surface.h" | 17 #include "ui/gl/android/scoped_java_surface.h" |
18 #include "ui/gl/android/surface_texture.h" | 18 #include "ui/gl/android/surface_texture.h" |
19 #include "ui/gl/gl_bindings.h" | 19 #include "ui/gl/gl_bindings.h" |
20 | 20 |
21 namespace content { | 21 namespace content { |
22 | 22 |
23 // Helper macros for dealing with failure. If |result| evaluates false, emit | |
24 // |log| to ERROR, register |error| with the decoder, and return. | |
25 #define RETURN_ON_FAILURE(result, log, error) \ | |
26 do { \ | |
27 if (!(result)) { \ | |
28 DLOG(ERROR) << log; \ | |
29 base::MessageLoop::current()->PostTask( \ | |
30 FROM_HERE, \ | |
31 base::Bind(&AndroidVideoDecodeAccelerator::NotifyError, \ | |
32 weak_this_factory_.GetWeakPtr(), \ | |
33 error)); \ | |
34 state_ = ERROR; \ | |
35 return; \ | |
36 } \ | |
37 } while (0) | |
38 | |
39 // TODO(dwkang): We only need kMaxVideoFrames to pass media stack's prerolling | 23 // TODO(dwkang): We only need kMaxVideoFrames to pass media stack's prerolling |
40 // phase, but 1 is added due to crbug.com/176036. This should be tuned when we | 24 // phase, but 1 is added due to crbug.com/176036. This should be tuned when we |
41 // have actual use case. | 25 // have actual use case. |
42 enum { kNumPictureBuffers = media::limits::kMaxVideoFrames + 1 }; | 26 enum { kNumPictureBuffers = media::limits::kMaxVideoFrames + 1 }; |
43 | 27 |
44 // Max number of bitstreams notified to the client with | |
45 // NotifyEndOfBitstreamBuffer() before getting output from the bitstream. | |
46 enum { kMaxBitstreamsNotifiedInAdvance = 32 }; | |
47 | |
48 #if defined(ENABLE_MEDIA_PIPELINE_ON_ANDROID) | |
49 // MediaCodec is only guaranteed to support baseline, but some devices may | |
50 // support others. Advertise support for all H264 profiles and let the | |
51 // MediaCodec fail when decoding if it's not actually supported. It's assumed | |
52 // that consumers won't have software fallback for H264 on Android anyway. | |
53 static const media::VideoCodecProfile kSupportedH264Profiles[] = { | |
54 media::H264PROFILE_BASELINE, | |
55 media::H264PROFILE_MAIN, | |
56 media::H264PROFILE_EXTENDED, | |
57 media::H264PROFILE_HIGH, | |
58 media::H264PROFILE_HIGH10PROFILE, | |
59 media::H264PROFILE_HIGH422PROFILE, | |
60 media::H264PROFILE_HIGH444PREDICTIVEPROFILE, | |
61 media::H264PROFILE_SCALABLEBASELINE, | |
62 media::H264PROFILE_SCALABLEHIGH, | |
63 media::H264PROFILE_STEREOHIGH, | |
64 media::H264PROFILE_MULTIVIEWHIGH | |
65 }; | |
66 #endif | |
67 | |
68 // Because MediaCodec is thread-hostile (must be poked on a single thread) and | |
69 // has no callback mechanism (b/11990118), we must drive it by polling for | |
70 // complete frames (and available input buffers, when the codec is fully | |
71 // saturated). This function defines the polling delay. The value used is an | |
72 // arbitrary choice that trades off CPU utilization (spinning) against latency. | |
73 // Mirrors android_video_encode_accelerator.cc:EncodePollDelay(). | |
74 static inline const base::TimeDelta DecodePollDelay() { | |
75 // An alternative to this polling scheme could be to dedicate a new thread | |
76 // (instead of using the ChildThread) to run the MediaCodec, and make that | |
77 // thread use the timeout-based flavor of MediaCodec's dequeue methods when it | |
78 // believes the codec should complete "soon" (e.g. waiting for an input | |
79 // buffer, or waiting for a picture when it knows enough complete input | |
80 // pictures have been fed to saturate any internal buffering). This is | |
81 // speculative and it's unclear that this would be a win (nor that there's a | |
82 // reasonably device-agnostic way to fill in the "believes" above). | |
83 return base::TimeDelta::FromMilliseconds(10); | |
84 } | |
85 | |
86 static inline const base::TimeDelta NoWaitTimeOut() { | |
87 return base::TimeDelta::FromMicroseconds(0); | |
88 } | |
89 | |
90 AndroidVideoDecodeAccelerator::AndroidVideoDecodeAccelerator( | 28 AndroidVideoDecodeAccelerator::AndroidVideoDecodeAccelerator( |
91 const base::WeakPtr<gpu::gles2::GLES2Decoder> decoder, | 29 const base::WeakPtr<gpu::gles2::GLES2Decoder> decoder, |
92 const base::Callback<bool(void)>& make_context_current) | 30 const base::Callback<bool(void)>& make_context_current) |
93 : client_(NULL), | 31 : AndroidVideoDecodeAcceleratorBase(decoder, make_context_current) { |
94 make_context_current_(make_context_current), | 32 } |
95 codec_(media::kCodecH264), | |
96 state_(NO_ERROR), | |
97 surface_texture_id_(0), | |
98 picturebuffers_requested_(false), | |
99 gl_decoder_(decoder), | |
100 weak_this_factory_(this) {} | |
101 | 33 |
102 AndroidVideoDecodeAccelerator::~AndroidVideoDecodeAccelerator() { | 34 AndroidVideoDecodeAccelerator::~AndroidVideoDecodeAccelerator() { |
103 DCHECK(thread_checker_.CalledOnValidThread()); | 35 DCHECK(ThreadChecker().CalledOnValidThread()); |
104 } | |
105 | |
106 bool AndroidVideoDecodeAccelerator::Initialize(media::VideoCodecProfile profile, | |
107 Client* client) { | |
108 DCHECK(!media_codec_); | |
109 DCHECK(thread_checker_.CalledOnValidThread()); | |
110 | |
111 client_ = client; | |
112 codec_ = VideoCodecProfileToVideoCodec(profile); | |
113 | |
114 bool profile_supported = codec_ == media::kCodecVP8; | |
115 #if defined(ENABLE_MEDIA_PIPELINE_ON_ANDROID) | |
116 profile_supported |= | |
117 (codec_ == media::kCodecVP9 || codec_ == media::kCodecH264); | |
118 #endif | |
119 | |
120 if (!profile_supported) { | |
121 LOG(ERROR) << "Unsupported profile: " << profile; | |
122 return false; | |
123 } | |
124 | |
125 // Only use MediaCodec for VP8/9 if it's likely backed by hardware. | |
126 if ((codec_ == media::kCodecVP8 || codec_ == media::kCodecVP9) && | |
127 media::VideoCodecBridge::IsKnownUnaccelerated( | |
128 codec_, media::MEDIA_CODEC_DECODER)) { | |
129 DVLOG(1) << "Initialization failed: " | |
130 << (codec_ == media::kCodecVP8 ? "vp8" : "vp9") | |
131 << " is not hardware accelerated"; | |
132 return false; | |
133 } | |
134 | |
135 if (!make_context_current_.Run()) { | |
136 LOG(ERROR) << "Failed to make this decoder's GL context current."; | |
137 return false; | |
138 } | |
139 | |
140 if (!gl_decoder_) { | |
141 LOG(ERROR) << "Failed to get gles2 decoder instance."; | |
142 return false; | |
143 } | |
144 glGenTextures(1, &surface_texture_id_); | |
145 glActiveTexture(GL_TEXTURE0); | |
146 glBindTexture(GL_TEXTURE_EXTERNAL_OES, surface_texture_id_); | |
147 | |
148 glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_NEAREST); | |
149 glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST); | |
150 glTexParameteri(GL_TEXTURE_EXTERNAL_OES, | |
151 GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); | |
152 glTexParameteri(GL_TEXTURE_EXTERNAL_OES, | |
153 GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); | |
154 gl_decoder_->RestoreTextureUnitBindings(0); | |
155 gl_decoder_->RestoreActiveTexture(); | |
156 | |
157 surface_texture_ = gfx::SurfaceTexture::Create(surface_texture_id_); | |
158 | |
159 if (!ConfigureMediaCodec()) { | |
160 LOG(ERROR) << "Failed to create MediaCodec instance."; | |
161 return false; | |
162 } | |
163 | |
164 return true; | |
165 } | |
166 | |
167 void AndroidVideoDecodeAccelerator::DoIOTask() { | |
168 DCHECK(thread_checker_.CalledOnValidThread()); | |
169 if (state_ == ERROR) { | |
170 return; | |
171 } | |
172 | |
173 QueueInput(); | |
174 DequeueOutput(); | |
175 } | |
176 | |
177 void AndroidVideoDecodeAccelerator::QueueInput() { | |
178 DCHECK(thread_checker_.CalledOnValidThread()); | |
179 if (bitstreams_notified_in_advance_.size() > kMaxBitstreamsNotifiedInAdvance) | |
180 return; | |
181 if (pending_bitstream_buffers_.empty()) | |
182 return; | |
183 | |
184 int input_buf_index = 0; | |
185 media::MediaCodecStatus status = media_codec_->DequeueInputBuffer( | |
186 NoWaitTimeOut(), &input_buf_index); | |
187 if (status != media::MEDIA_CODEC_OK) { | |
188 DCHECK(status == media::MEDIA_CODEC_DEQUEUE_INPUT_AGAIN_LATER || | |
189 status == media::MEDIA_CODEC_ERROR); | |
190 return; | |
191 } | |
192 | |
193 base::Time queued_time = pending_bitstream_buffers_.front().second; | |
194 UMA_HISTOGRAM_TIMES("Media.AVDA.InputQueueTime", | |
195 base::Time::Now() - queued_time); | |
196 media::BitstreamBuffer bitstream_buffer = | |
197 pending_bitstream_buffers_.front().first; | |
198 pending_bitstream_buffers_.pop(); | |
199 | |
200 if (bitstream_buffer.id() == -1) { | |
201 media_codec_->QueueEOS(input_buf_index); | |
202 return; | |
203 } | |
204 | |
205 // Abuse the presentation time argument to propagate the bitstream | |
206 // buffer ID to the output, so we can report it back to the client in | |
207 // PictureReady(). | |
208 base::TimeDelta timestamp = | |
209 base::TimeDelta::FromMicroseconds(bitstream_buffer.id()); | |
210 | |
211 scoped_ptr<base::SharedMemory> shm( | |
212 new base::SharedMemory(bitstream_buffer.handle(), true)); | |
213 | |
214 RETURN_ON_FAILURE(shm->Map(bitstream_buffer.size()), | |
215 "Failed to SharedMemory::Map()", | |
216 UNREADABLE_INPUT); | |
217 | |
218 status = | |
219 media_codec_->QueueInputBuffer(input_buf_index, | |
220 static_cast<const uint8*>(shm->memory()), | |
221 bitstream_buffer.size(), | |
222 timestamp); | |
223 RETURN_ON_FAILURE(status == media::MEDIA_CODEC_OK, | |
224 "Failed to QueueInputBuffer: " << status, | |
225 PLATFORM_FAILURE); | |
226 | |
227 // We should call NotifyEndOfBitstreamBuffer(), when no more decoded output | |
228 // will be returned from the bitstream buffer. However, MediaCodec API is | |
229 // not enough to guarantee it. | |
230 // So, here, we calls NotifyEndOfBitstreamBuffer() in advance in order to | |
231 // keep getting more bitstreams from the client, and throttle them by using | |
232 // |bitstreams_notified_in_advance_|. | |
233 // TODO(dwkang): check if there is a way to remove this workaround. | |
234 base::MessageLoop::current()->PostTask( | |
235 FROM_HERE, | |
236 base::Bind(&AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer, | |
237 weak_this_factory_.GetWeakPtr(), | |
238 bitstream_buffer.id())); | |
239 bitstreams_notified_in_advance_.push_back(bitstream_buffer.id()); | |
240 } | |
241 | |
242 void AndroidVideoDecodeAccelerator::DequeueOutput() { | |
243 DCHECK(thread_checker_.CalledOnValidThread()); | |
244 if (picturebuffers_requested_ && output_picture_buffers_.empty()) | |
245 return; | |
246 | |
247 if (!output_picture_buffers_.empty() && free_picture_ids_.empty()) { | |
248 // Don't have any picture buffer to send. Need to wait more. | |
249 return; | |
250 } | |
251 | |
252 bool eos = false; | |
253 base::TimeDelta timestamp; | |
254 int32 buf_index = 0; | |
255 do { | |
256 size_t offset = 0; | |
257 size_t size = 0; | |
258 | |
259 media::MediaCodecStatus status = media_codec_->DequeueOutputBuffer( | |
260 NoWaitTimeOut(), &buf_index, &offset, &size, ×tamp, &eos, NULL); | |
261 switch (status) { | |
262 case media::MEDIA_CODEC_DEQUEUE_OUTPUT_AGAIN_LATER: | |
263 case media::MEDIA_CODEC_ERROR: | |
264 return; | |
265 | |
266 case media::MEDIA_CODEC_OUTPUT_FORMAT_CHANGED: { | |
267 int32 width, height; | |
268 media_codec_->GetOutputFormat(&width, &height); | |
269 | |
270 if (!picturebuffers_requested_) { | |
271 picturebuffers_requested_ = true; | |
272 size_ = gfx::Size(width, height); | |
273 base::MessageLoop::current()->PostTask( | |
274 FROM_HERE, | |
275 base::Bind(&AndroidVideoDecodeAccelerator::RequestPictureBuffers, | |
276 weak_this_factory_.GetWeakPtr())); | |
277 } else { | |
278 // Dynamic resolution change support is not specified by the Android | |
279 // platform at and before JB-MR1, so it's not possible to smoothly | |
280 // continue playback at this point. Instead, error out immediately, | |
281 // expecting clients to Reset() as appropriate to avoid this. | |
282 // b/7093648 | |
283 RETURN_ON_FAILURE(size_ == gfx::Size(width, height), | |
284 "Dynamic resolution change is not supported.", | |
285 PLATFORM_FAILURE); | |
286 } | |
287 return; | |
288 } | |
289 | |
290 case media::MEDIA_CODEC_OUTPUT_BUFFERS_CHANGED: | |
291 break; | |
292 | |
293 case media::MEDIA_CODEC_OK: | |
294 DCHECK_GE(buf_index, 0); | |
295 break; | |
296 | |
297 default: | |
298 NOTREACHED(); | |
299 break; | |
300 } | |
301 } while (buf_index < 0); | |
302 | |
303 // This ignores the emitted ByteBuffer and instead relies on rendering to the | |
304 // codec's SurfaceTexture and then copying from that texture to the client's | |
305 // PictureBuffer's texture. This means that each picture's data is written | |
306 // three times: once to the ByteBuffer, once to the SurfaceTexture, and once | |
307 // to the client's texture. It would be nicer to either: | |
308 // 1) Render directly to the client's texture from MediaCodec (one write); or | |
309 // 2) Upload the ByteBuffer to the client's texture (two writes). | |
310 // Unfortunately neither is possible: | |
311 // 1) MediaCodec's use of SurfaceTexture is a singleton, and the texture | |
312 // written to can't change during the codec's lifetime. b/11990461 | |
313 // 2) The ByteBuffer is likely to contain the pixels in a vendor-specific, | |
314 // opaque/non-standard format. It's not possible to negotiate the decoder | |
315 // to emit a specific colorspace, even using HW CSC. b/10706245 | |
316 // So, we live with these two extra copies per picture :( | |
317 media_codec_->ReleaseOutputBuffer(buf_index, true); | |
318 | |
319 if (eos) { | |
320 base::MessageLoop::current()->PostTask( | |
321 FROM_HERE, | |
322 base::Bind(&AndroidVideoDecodeAccelerator::NotifyFlushDone, | |
323 weak_this_factory_.GetWeakPtr())); | |
324 } else { | |
325 int64 bitstream_buffer_id = timestamp.InMicroseconds(); | |
326 SendCurrentSurfaceToClient(static_cast<int32>(bitstream_buffer_id)); | |
327 | |
328 // Removes ids former or equal than the id from decoder. Note that | |
329 // |bitstreams_notified_in_advance_| does not mean bitstream ids in decoder | |
330 // because of frame reordering issue. We just maintain this roughly and use | |
331 // for the throttling purpose. | |
332 std::list<int32>::iterator it; | |
333 for (it = bitstreams_notified_in_advance_.begin(); | |
334 it != bitstreams_notified_in_advance_.end(); | |
335 ++it) { | |
336 if (*it == bitstream_buffer_id) { | |
337 bitstreams_notified_in_advance_.erase( | |
338 bitstreams_notified_in_advance_.begin(), ++it); | |
339 break; | |
340 } | |
341 } | |
342 } | |
343 } | |
344 | |
345 void AndroidVideoDecodeAccelerator::SendCurrentSurfaceToClient( | |
346 int32 bitstream_id) { | |
347 DCHECK(thread_checker_.CalledOnValidThread()); | |
348 DCHECK_NE(bitstream_id, -1); | |
349 DCHECK(!free_picture_ids_.empty()); | |
350 | |
351 RETURN_ON_FAILURE(make_context_current_.Run(), | |
352 "Failed to make this decoder's GL context current.", | |
353 PLATFORM_FAILURE); | |
354 | |
355 int32 picture_buffer_id = free_picture_ids_.front(); | |
356 free_picture_ids_.pop(); | |
357 | |
358 float transfrom_matrix[16]; | |
359 surface_texture_->UpdateTexImage(); | |
360 surface_texture_->GetTransformMatrix(transfrom_matrix); | |
361 | |
362 OutputBufferMap::const_iterator i = | |
363 output_picture_buffers_.find(picture_buffer_id); | |
364 RETURN_ON_FAILURE(i != output_picture_buffers_.end(), | |
365 "Can't find a PictureBuffer for " << picture_buffer_id, | |
366 PLATFORM_FAILURE); | |
367 uint32 picture_buffer_texture_id = i->second.texture_id(); | |
368 | |
369 RETURN_ON_FAILURE(gl_decoder_.get(), | |
370 "Failed to get gles2 decoder instance.", | |
371 ILLEGAL_STATE); | |
372 // Defer initializing the CopyTextureCHROMIUMResourceManager until it is | |
373 // needed because it takes 10s of milliseconds to initialize. | |
374 if (!copier_) { | |
375 copier_.reset(new gpu::CopyTextureCHROMIUMResourceManager()); | |
376 copier_->Initialize(gl_decoder_.get()); | |
377 } | |
378 | |
379 // Here, we copy |surface_texture_id_| to the picture buffer instead of | |
380 // setting new texture to |surface_texture_| by calling attachToGLContext() | |
381 // because: | |
382 // 1. Once we call detachFrameGLContext(), it deletes the texture previous | |
383 // attached. | |
384 // 2. SurfaceTexture requires us to apply a transform matrix when we show | |
385 // the texture. | |
386 // TODO(hkuang): get the StreamTexture transform matrix in GPU process | |
387 // instead of using default matrix crbug.com/226218. | |
388 const static GLfloat default_matrix[16] = {1.0f, 0.0f, 0.0f, 0.0f, | |
389 0.0f, 1.0f, 0.0f, 0.0f, | |
390 0.0f, 0.0f, 1.0f, 0.0f, | |
391 0.0f, 0.0f, 0.0f, 1.0f}; | |
392 copier_->DoCopyTextureWithTransform(gl_decoder_.get(), | |
393 GL_TEXTURE_EXTERNAL_OES, | |
394 surface_texture_id_, | |
395 picture_buffer_texture_id, | |
396 size_.width(), | |
397 size_.height(), | |
398 false, | |
399 false, | |
400 false, | |
401 default_matrix); | |
402 | |
403 // TODO(henryhsu): Pass (0, 0) as visible size will cause several test | |
404 // cases failed. We should make sure |size_| is coded size or visible size. | |
405 base::MessageLoop::current()->PostTask( | |
406 FROM_HERE, base::Bind(&AndroidVideoDecodeAccelerator::NotifyPictureReady, | |
407 weak_this_factory_.GetWeakPtr(), | |
408 media::Picture(picture_buffer_id, bitstream_id, | |
409 gfx::Rect(size_), false))); | |
410 } | |
411 | |
412 void AndroidVideoDecodeAccelerator::Decode( | |
413 const media::BitstreamBuffer& bitstream_buffer) { | |
414 DCHECK(thread_checker_.CalledOnValidThread()); | |
415 if (bitstream_buffer.id() != -1 && bitstream_buffer.size() == 0) { | |
416 base::MessageLoop::current()->PostTask( | |
417 FROM_HERE, | |
418 base::Bind(&AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer, | |
419 weak_this_factory_.GetWeakPtr(), | |
420 bitstream_buffer.id())); | |
421 return; | |
422 } | |
423 | |
424 pending_bitstream_buffers_.push( | |
425 std::make_pair(bitstream_buffer, base::Time::Now())); | |
426 | |
427 DoIOTask(); | |
428 } | |
429 | |
430 void AndroidVideoDecodeAccelerator::AssignPictureBuffers( | |
431 const std::vector<media::PictureBuffer>& buffers) { | |
432 DCHECK(thread_checker_.CalledOnValidThread()); | |
433 DCHECK(output_picture_buffers_.empty()); | |
434 DCHECK(free_picture_ids_.empty()); | |
435 | |
436 for (size_t i = 0; i < buffers.size(); ++i) { | |
437 RETURN_ON_FAILURE(buffers[i].size() == size_, | |
438 "Invalid picture buffer size was passed.", | |
439 INVALID_ARGUMENT); | |
440 int32 id = buffers[i].id(); | |
441 output_picture_buffers_.insert(std::make_pair(id, buffers[i])); | |
442 free_picture_ids_.push(id); | |
443 // Since the client might be re-using |picture_buffer_id| values, forget | |
444 // about previously-dismissed IDs now. See ReusePictureBuffer() comment | |
445 // about "zombies" for why we maintain this set in the first place. | |
446 dismissed_picture_ids_.erase(id); | |
447 } | |
448 | |
449 RETURN_ON_FAILURE(output_picture_buffers_.size() >= kNumPictureBuffers, | |
450 "Invalid picture buffers were passed.", | |
451 INVALID_ARGUMENT); | |
452 | |
453 DoIOTask(); | |
454 } | |
455 | |
456 void AndroidVideoDecodeAccelerator::ReusePictureBuffer( | |
457 int32 picture_buffer_id) { | |
458 DCHECK(thread_checker_.CalledOnValidThread()); | |
459 | |
460 // This ReusePictureBuffer() might have been in a pipe somewhere (queued in | |
461 // IPC, or in a PostTask either at the sender or receiver) when we sent a | |
462 // DismissPictureBuffer() for this |picture_buffer_id|. Account for such | |
463 // potential "zombie" IDs here. | |
464 if (dismissed_picture_ids_.erase(picture_buffer_id)) | |
465 return; | |
466 | |
467 free_picture_ids_.push(picture_buffer_id); | |
468 | |
469 DoIOTask(); | |
470 } | |
471 | |
472 void AndroidVideoDecodeAccelerator::Flush() { | |
473 DCHECK(thread_checker_.CalledOnValidThread()); | |
474 | |
475 Decode(media::BitstreamBuffer(-1, base::SharedMemoryHandle(), 0)); | |
476 } | |
477 | |
478 bool AndroidVideoDecodeAccelerator::ConfigureMediaCodec() { | |
479 DCHECK(thread_checker_.CalledOnValidThread()); | |
480 DCHECK(surface_texture_.get()); | |
481 | |
482 gfx::ScopedJavaSurface surface(surface_texture_.get()); | |
483 | |
484 // Pass a dummy 320x240 canvas size and let the codec signal the real size | |
485 // when it's known from the bitstream. | |
486 media_codec_.reset(media::VideoCodecBridge::CreateDecoder( | |
487 codec_, false, gfx::Size(320, 240), surface.j_surface().obj(), NULL)); | |
488 if (!media_codec_) | |
489 return false; | |
490 | |
491 io_timer_.Start(FROM_HERE, | |
492 DecodePollDelay(), | |
493 this, | |
494 &AndroidVideoDecodeAccelerator::DoIOTask); | |
495 return true; | |
496 } | |
497 | |
498 void AndroidVideoDecodeAccelerator::Reset() { | |
499 DCHECK(thread_checker_.CalledOnValidThread()); | |
500 | |
501 while (!pending_bitstream_buffers_.empty()) { | |
502 int32 bitstream_buffer_id = pending_bitstream_buffers_.front().first.id(); | |
503 pending_bitstream_buffers_.pop(); | |
504 | |
505 if (bitstream_buffer_id != -1) { | |
506 base::MessageLoop::current()->PostTask( | |
507 FROM_HERE, | |
508 base::Bind(&AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer, | |
509 weak_this_factory_.GetWeakPtr(), | |
510 bitstream_buffer_id)); | |
511 } | |
512 } | |
513 bitstreams_notified_in_advance_.clear(); | |
514 | |
515 for (OutputBufferMap::iterator it = output_picture_buffers_.begin(); | |
516 it != output_picture_buffers_.end(); | |
517 ++it) { | |
518 client_->DismissPictureBuffer(it->first); | |
519 dismissed_picture_ids_.insert(it->first); | |
520 } | |
521 output_picture_buffers_.clear(); | |
522 std::queue<int32> empty; | |
523 std::swap(free_picture_ids_, empty); | |
524 CHECK(free_picture_ids_.empty()); | |
525 picturebuffers_requested_ = false; | |
526 | |
527 // On some devices, and up to at least JB-MR1, | |
528 // - flush() can fail after EOS (b/8125974); and | |
529 // - mid-stream resolution change is unsupported (b/7093648). | |
530 // To cope with these facts, we always stop & restart the codec on Reset(). | |
531 io_timer_.Stop(); | |
532 media_codec_->Stop(); | |
533 ConfigureMediaCodec(); | |
534 state_ = NO_ERROR; | |
535 | |
536 base::MessageLoop::current()->PostTask( | |
537 FROM_HERE, | |
538 base::Bind(&AndroidVideoDecodeAccelerator::NotifyResetDone, | |
539 weak_this_factory_.GetWeakPtr())); | |
540 } | 36 } |
541 | 37 |
542 void AndroidVideoDecodeAccelerator::Destroy() { | 38 void AndroidVideoDecodeAccelerator::Destroy() { |
543 DCHECK(thread_checker_.CalledOnValidThread()); | 39 if (copier_) |
544 | 40 copier_->Destroy(); |
545 weak_this_factory_.InvalidateWeakPtrs(); | 41 AndroidVideoDecodeAcceleratorBase::Destroy(); |
546 if (media_codec_) { | |
547 io_timer_.Stop(); | |
548 media_codec_->Stop(); | |
549 } | |
550 if (surface_texture_id_) | |
551 glDeleteTextures(1, &surface_texture_id_); | |
552 if (copier_) | |
553 copier_->Destroy(); | |
554 delete this; | |
555 } | 42 } |
556 | 43 |
557 bool AndroidVideoDecodeAccelerator::CanDecodeOnIOThread() { | 44 uint32 AndroidVideoDecodeAccelerator::GetNumPictureBuffers() const { |
558 return false; | 45 return kNumPictureBuffers; |
559 } | 46 } |
560 | 47 |
561 void AndroidVideoDecodeAccelerator::RequestPictureBuffers() { | 48 uint32 AndroidVideoDecodeAccelerator::GetTextureTarget() const { |
562 client_->ProvidePictureBuffers(kNumPictureBuffers, size_, GL_TEXTURE_2D); | 49 return GL_TEXTURE_2D; |
563 } | 50 } |
564 | 51 |
565 void AndroidVideoDecodeAccelerator::NotifyPictureReady( | 52 void AndroidVideoDecodeAccelerator::AssignCurrentSurfaceToPictureBuffer( |
566 const media::Picture& picture) { | 53 int32 codec_buf_index, const media::PictureBuffer& picture_buffer) { |
567 client_->PictureReady(picture); | |
568 } | |
569 | 54 |
570 void AndroidVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer( | 55 // Render the codec buffer into |surface_texture_|, and switch it to be |
571 int input_buffer_id) { | 56 // the front buffer. |
572 client_->NotifyEndOfBitstreamBuffer(input_buffer_id); | 57 // This ignores the emitted ByteBuffer and instead relies on rendering to |
573 } | 58 // the codec's SurfaceTexture and then copying from that texture to the |
| 59 // client's PictureBuffer's texture. This means that each picture's data |
| 60 // is written three times: once to the ByteBuffer, once to the |
| 61 // SurfaceTexture, and once to the client's texture. It would be nicer to |
| 62 // either: |
| 63 // 1) Render directly to the client's texture from MediaCodec (one write); |
| 64 // or |
| 65 // 2) Upload the ByteBuffer to the client's texture (two writes). |
| 66 // Unfortunately neither is possible: |
| 67 // 1) MediaCodec's use of SurfaceTexture is a singleton, and the texture |
| 68 // written to can't change during the codec's lifetime. b/11990461 |
| 69 // 2) The ByteBuffer is likely to contain the pixels in a vendor-specific, |
| 70 // opaque/non-standard format. It's not possible to negotiate the |
| 71 // decoder to emit a specific colorspace, even using HW CSC. b/10706245 |
| 72 // So, we live with these two extra copies per picture :( |
| 73 GetMediaCodec()->ReleaseOutputBuffer(codec_buf_index, true); |
| 74 GetSurfaceTexture()->UpdateTexImage(); |
574 | 75 |
575 void AndroidVideoDecodeAccelerator::NotifyFlushDone() { | 76 float transfrom_matrix[16]; |
576 client_->NotifyFlushDone(); | 77 GetSurfaceTexture()->GetTransformMatrix(transfrom_matrix); |
577 } | |
578 | 78 |
579 void AndroidVideoDecodeAccelerator::NotifyResetDone() { | 79 uint32 picture_buffer_texture_id = picture_buffer.texture_id(); |
580 client_->NotifyResetDone(); | |
581 } | |
582 | 80 |
583 void AndroidVideoDecodeAccelerator::NotifyError( | 81 // Defer initializing the CopyTextureCHROMIUMResourceManager until it is |
584 media::VideoDecodeAccelerator::Error error) { | 82 // needed because it takes 10s of milliseconds to initialize. |
585 client_->NotifyError(error); | 83 if (!copier_) { |
586 } | 84 copier_.reset(new gpu::CopyTextureCHROMIUMResourceManager()); |
| 85 copier_->Initialize(GetGlDecoder()); |
| 86 } |
587 | 87 |
588 // static | 88 // Here, we copy |surface_texture_id_| to the picture buffer instead of |
589 media::VideoDecodeAccelerator::SupportedProfiles | 89 // setting new texture to |surface_texture_| by calling attachToGLContext() |
590 AndroidVideoDecodeAccelerator::GetSupportedProfiles() { | 90 // because: |
591 SupportedProfiles profiles; | 91 // 1. Once we call detachFrameGLContext(), it deletes the texture previous |
592 | 92 // attached. |
593 if (!media::VideoCodecBridge::IsKnownUnaccelerated( | 93 // 2. SurfaceTexture requires us to apply a transform matrix when we show |
594 media::kCodecVP8, media::MEDIA_CODEC_DECODER)) { | 94 // the texture. |
595 SupportedProfile profile; | 95 // TODO(hkuang): get the StreamTexture transform matrix in GPU process |
596 profile.profile = media::VP8PROFILE_ANY; | 96 // instead of using default matrix crbug.com/226218. |
597 profile.min_resolution.SetSize(0, 0); | 97 const static GLfloat default_matrix[16] = {1.0f, 0.0f, 0.0f, 0.0f, |
598 profile.max_resolution.SetSize(1920, 1088); | 98 0.0f, 1.0f, 0.0f, 0.0f, |
599 profiles.push_back(profile); | 99 0.0f, 0.0f, 1.0f, 0.0f, |
600 } | 100 0.0f, 0.0f, 0.0f, 1.0f}; |
601 | 101 copier_->DoCopyTextureWithTransform(GetGlDecoder(), |
602 #if defined(ENABLE_MEDIA_PIPELINE_ON_ANDROID) | 102 GL_TEXTURE_EXTERNAL_OES, |
603 if (!media::VideoCodecBridge::IsKnownUnaccelerated( | 103 GetSurfaceTextureId(), |
604 media::kCodecVP9, media::MEDIA_CODEC_DECODER)) { | 104 picture_buffer_texture_id, |
605 SupportedProfile profile; | 105 GetSize().width(), |
606 profile.profile = media::VP9PROFILE_ANY; | 106 GetSize().height(), |
607 profile.min_resolution.SetSize(0, 0); | 107 false, |
608 profile.max_resolution.SetSize(1920, 1088); | 108 false, |
609 profiles.push_back(profile); | 109 false, |
610 } | 110 default_matrix); |
611 | |
612 for (const auto& supported_profile : kSupportedH264Profiles) { | |
613 SupportedProfile profile; | |
614 profile.profile = supported_profile; | |
615 profile.min_resolution.SetSize(0, 0); | |
616 // Advertise support for 4k and let the MediaCodec fail when decoding if it | |
617 // doesn't support the resolution. It's assumed that consumers won't have | |
618 // software fallback for H264 on Android anyway. | |
619 profile.max_resolution.SetSize(3840, 2160); | |
620 profiles.push_back(profile); | |
621 } | |
622 #endif | |
623 | |
624 return profiles; | |
625 } | 111 } |
626 | 112 |
627 } // namespace content | 113 } // namespace content |
OLD | NEW |