OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/media/dxva_video_decode_accelerator.h" | |
6 | |
7 #include <ks.h> | |
8 #include <codecapi.h> | |
9 #include <d3dx9tex.h> | |
10 #include <mfapi.h> | |
11 #include <mferror.h> | |
12 #include <wmcodecdsp.h> | |
13 | |
14 #include "base/lazy_instance.h" | |
15 #include "base/logging.h" | |
16 #include "base/memory/scoped_handle.h" | |
17 #include "base/memory/scoped_ptr.h" | |
18 #include "base/shared_memory.h" | |
19 #include "base/time.h" | |
20 #include "media/video/video_decode_accelerator.h" | |
21 #include "third_party/angle/include/EGL/egl.h" | |
Ami GONE FROM CHROMIUM
2011/12/13 02:37:12
minimize headers?
ananta
2011/12/13 02:51:18
Done.
| |
22 #include "third_party/angle/include/GLES2/gl2.h" | |
23 #include "third_party/angle/include/GLES2/gl2ext.h" | |
24 | |
25 namespace { | |
26 | |
27 static const int kNumPictureBuffers = 5; | |
28 static const int kNumInputs = 100; | |
29 | |
30 IMFSample* CreateEmptySample() { | |
31 HRESULT hr = E_FAIL; | |
32 base::win::ScopedComPtr<IMFSample> sample; | |
33 hr = MFCreateSample(sample.Receive()); | |
34 if (FAILED(hr)) { | |
35 NOTREACHED() << "Unable to create an empty sample"; | |
36 return NULL; | |
37 } | |
38 return sample.Detach(); | |
39 } | |
40 | |
41 // Creates a Media Foundation sample with one buffer of length |buffer_length| | |
42 // on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0. | |
43 // If |align| is 0, then no alignment is specified. | |
44 IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) { | |
45 CHECK_GT(buffer_length, 0); | |
46 base::win::ScopedComPtr<IMFSample> sample; | |
47 sample.Attach(CreateEmptySample()); | |
48 if (!sample.get()) | |
49 return NULL; | |
50 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
51 HRESULT hr = E_FAIL; | |
52 if (align == 0) { | |
53 // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer | |
54 // with the align argument being 0. | |
55 hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive()); | |
56 } else { | |
57 hr = MFCreateAlignedMemoryBuffer(buffer_length, | |
58 align - 1, | |
59 buffer.Receive()); | |
60 } | |
61 if (FAILED(hr)) { | |
62 NOTREACHED() << "Unable to create an empty buffer"; | |
63 return NULL; | |
64 } | |
65 hr = sample->AddBuffer(buffer.get()); | |
66 if (FAILED(hr)) { | |
67 NOTREACHED() << "Failed to add empty buffer to sample"; | |
68 return NULL; | |
69 } | |
70 return sample.Detach(); | |
71 } | |
72 | |
73 // Creates a Media Foundation sample with one buffer containing a copy of the | |
74 // given Annex B stream data. | |
75 // If duration and sample time are not known, provide 0. | |
76 // |min_size| specifies the minimum size of the buffer (might be required by | |
77 // the decoder for input). The times here should be given in 100ns units. | |
78 // |alignment| specifies the buffer in the sample to be aligned. If no | |
79 // alignment is required, provide 0 or 1. | |
80 static IMFSample* CreateInputSample(const uint8* stream, int size, | |
81 int64 timestamp, int64 duration, | |
82 int min_size, int alignment) { | |
83 CHECK(stream); | |
84 CHECK_GT(size, 0); | |
85 base::win::ScopedComPtr<IMFSample> sample; | |
86 sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size), | |
87 alignment)); | |
88 if (!sample.get()) { | |
89 NOTREACHED() << "Failed to create empty buffer for input"; | |
90 return NULL; | |
91 } | |
92 HRESULT hr = E_FAIL; | |
93 if (duration > 0) { | |
94 hr = sample->SetSampleDuration(duration); | |
95 if (FAILED(hr)) { | |
96 NOTREACHED() << "Failed to set sample duration"; | |
97 return NULL; | |
98 } | |
99 } | |
100 if (timestamp > 0) { | |
101 hr = sample->SetSampleTime(timestamp); | |
102 if (FAILED(hr)) { | |
103 NOTREACHED() << "Failed to set sample time"; | |
104 return NULL; | |
105 } | |
106 } | |
107 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
108 hr = sample->GetBufferByIndex(0, buffer.Receive()); | |
109 if (FAILED(hr)) { | |
110 NOTREACHED() << "Failed to get buffer in sample"; | |
111 return NULL; | |
112 } | |
113 DWORD max_length = 0, current_length = 0; | |
114 uint8* destination = NULL; | |
115 hr = buffer->Lock(&destination, &max_length, ¤t_length); | |
116 if (FAILED(hr)) { | |
117 NOTREACHED() << "Failed to lock buffer"; | |
118 return NULL; | |
119 } | |
120 CHECK_EQ(current_length, 0u); | |
121 CHECK_GE(static_cast<int>(max_length), size); | |
122 memcpy(destination, stream, size); | |
123 CHECK(SUCCEEDED(buffer->Unlock())); | |
124 hr = buffer->SetCurrentLength(size); | |
125 if (FAILED(hr)) { | |
126 NOTREACHED() << "Failed to set current length to " << size; | |
127 return NULL; | |
128 } | |
129 hr = sample->SetUINT32(MFSampleExtension_CleanPoint, TRUE); | |
130 if (FAILED(hr)) { | |
131 NOTREACHED() << "Failed to mark sample as key sample"; | |
132 return NULL; | |
133 } | |
134 return sample.Detach(); | |
135 } | |
136 | |
137 } // namespace | |
138 | |
139 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator( | |
140 media::VideoDecodeAccelerator::Client* client, | |
141 base::ProcessHandle renderer_process) | |
142 : client_(client), | |
143 message_loop_(MessageLoop::current()), | |
144 surface_width_(0), | |
145 surface_height_(0), | |
146 state_(kUninitialized), | |
147 input_stream_info_(), | |
148 output_stream_info_(), | |
149 pictures_requested_(false), | |
150 renderer_process_(renderer_process) { | |
151 input_buffer_frame_times_.reserve(kNumInputs); | |
152 | |
153 #if !defined(NDEBUG) | |
154 decode_start_time_ = 0; | |
155 inputs_before_decode_ = 0; | |
156 #endif // !defined(NDEBUG) | |
157 } | |
158 | |
159 DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() { | |
160 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
161 client_ = NULL; | |
162 message_loop_ = NULL; | |
163 } | |
164 | |
165 bool DXVAVideoDecodeAccelerator::Initialize(Profile profile) { | |
166 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
167 | |
168 if (state_ != kUninitialized) { | |
169 NOTREACHED() << "Initialize: invalid state: " | |
170 << state_; | |
171 return false; | |
172 } | |
173 | |
174 HRESULT hr = MFStartup(MF_VERSION, MFSTARTUP_FULL); | |
175 if (FAILED(hr)) { | |
176 NOTREACHED() << "MFStartup failed. Error:" | |
177 << std::hex << std::showbase << hr; | |
178 return false; | |
179 } | |
180 if (!CreateD3DDevManager()) | |
181 return false; | |
182 if (!InitDecoder()) | |
183 return false; | |
184 if (!GetStreamsInfoAndBufferReqs()) | |
185 return false; | |
186 if (SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0)) { | |
187 state_ = DXVAVideoDecodeAccelerator::kNormal; | |
188 client_->NotifyInitializeDone(); | |
189 return true; | |
190 } | |
191 return false; | |
192 } | |
193 | |
194 void DXVAVideoDecodeAccelerator::Decode( | |
195 const media::BitstreamBuffer& bitstream_buffer) { | |
196 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
197 if (state_ == DXVAVideoDecodeAccelerator::kUninitialized) { | |
198 NOTREACHED() << "ConsumeVideoSample: invalid state"; | |
199 return; | |
200 } | |
201 | |
202 HANDLE shared_memory_handle = NULL; | |
203 if (!::DuplicateHandle(renderer_process_, | |
204 bitstream_buffer.handle(), | |
205 ::GetCurrentProcess(), | |
206 &shared_memory_handle, | |
207 0, | |
208 FALSE, | |
209 DUPLICATE_SAME_ACCESS)) { | |
210 NOTREACHED() << "Failed to open duplicate shared mem handle"; | |
211 return; | |
212 } | |
213 | |
214 base::SharedMemory shm(shared_memory_handle, true); | |
215 if (!shm.Map(bitstream_buffer.size())) { | |
216 NOTREACHED() << "Failed in SharedMemory::Map()"; | |
217 return; | |
218 } | |
219 | |
220 base::Time::Exploded exploded; | |
221 base::Time::Now().LocalExplode(&exploded); | |
222 base::win::ScopedComPtr<IMFSample> sample; | |
223 sample.Attach(CreateInputSample(reinterpret_cast<const uint8*>(shm.memory()), | |
224 bitstream_buffer.size(), | |
225 exploded.second * 10000000, | |
226 400000, | |
227 input_stream_info_.cbSize, | |
228 input_stream_info_.cbAlignment)); | |
229 if (!sample.get()) { | |
230 NOTREACHED() << "Failed to create an input sample"; | |
231 return; | |
232 } else { | |
233 #if !defined(NDEBUG) | |
234 inputs_before_decode_++; | |
235 if (!decode_start_time_) | |
236 decode_start_time_ = ::GetTickCount(); | |
237 #endif // !defined(NDEBUG) | |
238 SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0); | |
239 | |
240 if (FAILED(decoder_->ProcessInput(0, sample.get(), 0))) { | |
241 NOTREACHED() << "Failed to process input"; | |
242 return; | |
243 } | |
244 } | |
245 if (state_ != DXVAVideoDecodeAccelerator::kEosDrain) { | |
246 if (!SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0)) { | |
247 NOTREACHED() << "Failed to send eos message to MFT"; | |
248 } else { | |
249 state_ = DXVAVideoDecodeAccelerator::kEosDrain; | |
250 } | |
251 } | |
252 input_buffer_frame_times_.push_back(bitstream_buffer.id()); | |
253 DoDecode(); | |
254 client_->NotifyEndOfBitstreamBuffer(bitstream_buffer.id()); | |
255 } | |
256 | |
257 void DXVAVideoDecodeAccelerator::AssignPictureBuffers( | |
258 const std::vector<media::PictureBuffer>& buffers) { | |
259 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
260 // Copy the picture buffers provided by the client to the available list, | |
261 // and mark these buffers as available for use. | |
262 for (size_t buffer_index = 0; buffer_index < buffers.size(); ++buffer_index) { | |
263 DXVAPictureBuffer picture_buffer; | |
264 picture_buffer.available = true; | |
265 picture_buffer.picture_buffer = buffers[buffer_index]; | |
266 | |
267 DCHECK(available_pictures_.find(buffers[buffer_index].id()) == | |
268 available_pictures_.end()); | |
269 available_pictures_[buffers[buffer_index].id()] = picture_buffer; | |
270 } | |
271 int buffer_index = 0; | |
272 PendingOutputSamples::iterator sample_index = | |
273 pending_output_samples_.begin(); | |
274 HRESULT hr = E_FAIL; | |
275 | |
276 ProcessPendingSamples(); | |
277 } | |
278 | |
279 void DXVAVideoDecodeAccelerator::ReusePictureBuffer( | |
280 int32 picture_buffer_id) { | |
281 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
282 DCHECK(available_pictures_.find(picture_buffer_id) != | |
283 available_pictures_.end()); | |
284 available_pictures_[picture_buffer_id].available = true; | |
285 | |
286 PendingOutputSamples::iterator sample_index = | |
287 pending_output_samples_.begin(); | |
288 | |
289 if (sample_index != pending_output_samples_.end()) { | |
290 const PendingSampleInfo& sample_info = *sample_index; | |
291 CopyOutputSampleDataToPictureBuffer( | |
292 sample_info.surface, | |
293 available_pictures_[picture_buffer_id].picture_buffer, | |
294 sample_info.input_buffer_id); | |
295 available_pictures_[picture_buffer_id].available = false; | |
296 pending_output_samples_.erase(sample_index); | |
297 } | |
298 } | |
299 | |
300 void DXVAVideoDecodeAccelerator::Flush() { | |
301 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
302 | |
303 LOG(INFO) << "DXVAVideoDecodeAccelerator::Flush"; | |
304 if (state_ == DXVAVideoDecodeAccelerator::kUninitialized) { | |
305 NOTREACHED() << "ConsumeVideoSample: invalid state"; | |
306 client_->NotifyFlushDone(); | |
307 return; | |
308 } | |
309 | |
310 if (state_ == DXVAVideoDecodeAccelerator::kStopped) { | |
311 LOG(INFO) << "No data available from the decoder"; | |
312 client_->NotifyFlushDone(); | |
313 return; | |
314 } | |
315 | |
316 state_ = DXVAVideoDecodeAccelerator::kEosDrain; | |
317 if (!SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0)) { | |
318 LOG(WARNING) << "Failed to send drain message"; | |
319 state_ = DXVAVideoDecodeAccelerator::kStopped; | |
320 client_->NotifyFlushDone(); | |
321 return; | |
322 } | |
323 | |
324 // As per MSDN docs after the client sends this message, it calls | |
325 // IMFTransform::ProcessOutput in a loop, until ProcessOutput returns the | |
326 // error code MF_E_TRANSFORM_NEED_MORE_INPUT. The DoDecode function sets the | |
327 // state to DXVAVideoDecodeAccelerator::kStopped when the decoder returns | |
328 // MF_E_TRANSFORM_NEED_MORE_INPUT. | |
329 while (state_ != DXVAVideoDecodeAccelerator::kStopped) { | |
330 DoDecode(); | |
331 } | |
332 client_->NotifyFlushDone(); | |
333 } | |
334 | |
335 void DXVAVideoDecodeAccelerator::Reset() { | |
336 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
337 LOG(INFO) << "DXVAVideoDecodeAccelerator::Reset"; | |
338 if (state_ != kNormal && state_ != kStopped) { | |
339 NOTREACHED() << "Reset: invalid state"; | |
340 client_->NotifyResetDone(); | |
341 return; | |
342 } | |
343 | |
344 state_ = DXVAVideoDecodeAccelerator::kResetting; | |
345 if (!SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0)) { | |
346 LOG(WARNING) << "DXVAVideoDecodeAccelerator::Flush failed to send message"; | |
347 client_->NotifyResetDone(); | |
348 return; | |
349 } | |
350 state_ = DXVAVideoDecodeAccelerator::kNormal; | |
351 client_->NotifyResetDone(); | |
352 input_buffer_frame_times_.clear(); | |
353 } | |
354 | |
355 void DXVAVideoDecodeAccelerator::Destroy() { | |
356 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
357 OutputBuffers::iterator index; | |
358 for (index = available_pictures_.begin(); index != available_pictures_.end(); | |
359 ++index) { | |
360 client_->DismissPictureBuffer(index->second.picture_buffer.id()); | |
361 } | |
362 available_pictures_.clear(); | |
363 pending_output_samples_.clear(); | |
364 input_buffer_frame_times_.clear(); | |
365 } | |
366 | |
367 bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() { | |
368 d3d9_.Attach(Direct3DCreate9(D3D_SDK_VERSION)); | |
369 if (d3d9_.get() == NULL) { | |
370 NOTREACHED() << "Failed to create D3D9"; | |
371 return false; | |
372 } | |
373 | |
374 D3DPRESENT_PARAMETERS present_params = {0}; | |
375 present_params.BackBufferWidth = 0; | |
376 present_params.BackBufferHeight = 0; | |
377 present_params.BackBufferFormat = D3DFMT_UNKNOWN; | |
378 present_params.BackBufferCount = 1; | |
379 present_params.SwapEffect = D3DSWAPEFFECT_DISCARD; | |
380 present_params.hDeviceWindow = GetShellWindow(); | |
381 present_params.Windowed = TRUE; | |
382 present_params.Flags = D3DPRESENTFLAG_VIDEO; | |
383 present_params.FullScreen_RefreshRateInHz = 0; | |
384 present_params.PresentationInterval = 0; | |
385 | |
386 HRESULT hr = d3d9_->CreateDevice(D3DADAPTER_DEFAULT, | |
387 D3DDEVTYPE_HAL, | |
388 GetShellWindow(), | |
389 (D3DCREATE_HARDWARE_VERTEXPROCESSING | | |
390 D3DCREATE_MULTITHREADED), | |
391 &present_params, | |
392 device_.Receive()); | |
393 if (FAILED(hr)) { | |
394 NOTREACHED() << "Failed to create D3D Device"; | |
395 return false; | |
396 } | |
397 | |
398 UINT dev_manager_reset_token = 0; | |
399 hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token, | |
400 device_manager_.Receive()); | |
401 if (FAILED(hr)) { | |
402 NOTREACHED() << "Couldn't create D3D Device manager"; | |
403 return false; | |
404 } | |
405 | |
406 hr = device_manager_->ResetDevice(device_.get(), | |
407 dev_manager_reset_token); | |
408 if (FAILED(hr)) { | |
409 NOTREACHED() << "Failed to set device to device manager"; | |
410 return false; | |
411 } | |
412 return true; | |
413 } | |
414 | |
415 | |
416 bool DXVAVideoDecodeAccelerator::InitDecoder() { | |
417 HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT), | |
418 NULL, | |
419 CLSCTX_INPROC_SERVER, | |
420 __uuidof(IMFTransform), | |
421 reinterpret_cast<void**>(decoder_.Receive())); | |
422 if (FAILED(hr) || !decoder_.get()) { | |
423 NOTREACHED() << "CoCreateInstance failed " | |
424 << std::hex << std::showbase << hr; | |
425 return false; | |
426 } | |
427 | |
428 if (!CheckDecoderDxvaSupport()) | |
429 return false; | |
430 hr = decoder_->ProcessMessage( | |
431 MFT_MESSAGE_SET_D3D_MANAGER, | |
432 reinterpret_cast<ULONG_PTR>(device_manager_.get())); | |
433 if (FAILED(hr)) { | |
434 NOTREACHED() << "Failed to pass D3D9 device to decoder " | |
435 << std::hex << hr; | |
436 return false; | |
437 } | |
438 return SetDecoderMediaTypes(); | |
439 } | |
440 | |
441 bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() { | |
442 base::win::ScopedComPtr<IMFAttributes> attributes; | |
443 HRESULT hr = decoder_->GetAttributes(attributes.Receive()); | |
444 if (FAILED(hr)) { | |
445 NOTREACHED() << "Unlock: Failed to get attributes, hr = " | |
446 << std::hex << std::showbase << hr; | |
447 return false; | |
448 } | |
449 UINT32 dxva = 0; | |
450 hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva); | |
451 if (FAILED(hr) || !dxva) { | |
452 NOTREACHED() << "Failed to get DXVA attr. Error:" | |
453 << std::hex << std::showbase << hr | |
454 << " .This might not be the right decoder."; | |
455 return false; | |
456 } | |
457 | |
458 hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE); | |
459 DCHECK(SUCCEEDED(hr)); | |
460 return true; | |
461 } | |
462 | |
463 bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() { | |
464 MFT_REGISTER_TYPE_INFO* input_types = NULL; | |
465 MFT_REGISTER_TYPE_INFO* output_types = NULL; | |
466 uint32 num_inputs = 0; | |
467 uint32 num_outputs = 0; | |
468 HRESULT hr = MFTGetInfo(__uuidof(CMSH264DecoderMFT), NULL, &input_types, | |
469 &num_inputs, &output_types, &num_outputs, NULL); | |
470 if (!SetDecoderInputMediaType()) | |
471 return false; | |
472 return SetDecoderOutputMediaType(MFVideoFormat_NV12); | |
473 } | |
474 | |
475 bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() { | |
476 base::win::ScopedComPtr<IMFMediaType> media_type; | |
477 HRESULT hr = MFCreateMediaType(media_type.Receive()); | |
478 if (FAILED(hr)) { | |
479 NOTREACHED() << "Failed to create empty media type object"; | |
480 return false; | |
481 } | |
482 | |
483 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); | |
484 if (FAILED(hr)) { | |
485 NOTREACHED() << "SetGUID for major type failed"; | |
486 return false; | |
487 } | |
488 | |
489 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264); | |
490 if (FAILED(hr)) { | |
491 NOTREACHED() << "SetGUID for subtype failed"; | |
492 return false; | |
493 } | |
494 | |
495 hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags | |
496 if (FAILED(hr)) { | |
497 NOTREACHED() << "Failed to set decoder's input type"; | |
498 return false; | |
499 } | |
500 return true; | |
501 } | |
502 | |
503 bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType( | |
504 const GUID& subtype) { | |
505 DWORD i = 0; | |
506 IMFMediaType* out_media_type = NULL; | |
507 bool found = false; | |
508 while (SUCCEEDED(decoder_->GetOutputAvailableType(0, i, &out_media_type))) { | |
509 GUID out_subtype = {0}; | |
510 HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype); | |
511 if (FAILED(hr)) { | |
512 NOTREACHED() << "Failed to GetGUID() on GetOutputAvailableType() " | |
513 << i; | |
514 out_media_type->Release(); | |
515 continue; | |
516 } | |
517 if (out_subtype == subtype) { | |
518 hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags | |
519 hr = MFGetAttributeSize(out_media_type, MF_MT_FRAME_SIZE, | |
520 reinterpret_cast<UINT32*>(&surface_width_), | |
521 reinterpret_cast<UINT32*>(&surface_height_)); | |
522 if (FAILED(hr)) { | |
523 NOTREACHED() << "Failed to SetOutputType to |subtype| or obtain " | |
524 << "width/height " << std::hex << hr; | |
525 } | |
526 out_media_type->Release(); | |
527 return true; | |
528 } | |
529 i++; | |
530 out_media_type->Release(); | |
531 } | |
532 return false; | |
533 } | |
534 | |
535 bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg, | |
536 int32 param) { | |
537 HRESULT hr = decoder_->ProcessMessage(msg, param); | |
538 return SUCCEEDED(hr); | |
539 } | |
540 | |
541 // Gets the minimum buffer sizes for input and output samples. | |
542 // The MFT will not allocate buffer for neither input nor output, so we have | |
543 // to do it ourselves and make sure they're the correct size. | |
544 // Exception is when dxva is enabled, the decoder will allocate output. | |
545 bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() { | |
546 HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_); | |
547 if (FAILED(hr)) { | |
548 LOG(ERROR) << "Failed to get input stream info"; | |
549 return false; | |
550 } | |
551 LOG(INFO) << "Input stream info: "; | |
552 LOG(INFO) << "Max latency: " << input_stream_info_.hnsMaxLatency; | |
553 // There should be three flags, one for requiring a whole frame be in a | |
554 // single sample, one for requiring there be one buffer only in a single | |
555 // sample, and one that specifies a fixed sample size. (as in cbSize) | |
556 LOG(INFO) << "Flags: " | |
557 << std::hex << std::showbase << input_stream_info_.dwFlags; | |
558 CHECK_EQ(input_stream_info_.dwFlags, 0x7u); | |
559 LOG(INFO) << "Min buffer size: " << input_stream_info_.cbSize; | |
560 LOG(INFO) << "Max lookahead: " << input_stream_info_.cbMaxLookahead; | |
561 LOG(INFO) << "Alignment: " << input_stream_info_.cbAlignment; | |
562 | |
563 hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_); | |
564 if (FAILED(hr)) { | |
565 LOG(ERROR) << "Failed to get output stream info"; | |
566 return false; | |
567 } | |
568 LOG(INFO) << "Output stream info: "; | |
569 // The flags here should be the same and mean the same thing, except when | |
570 // DXVA is enabled, there is an extra 0x100 flag meaning decoder will | |
571 // allocate its own sample. | |
572 LOG(INFO) << "Flags: " | |
573 << std::hex << std::showbase << output_stream_info_.dwFlags; | |
574 CHECK_EQ(output_stream_info_.dwFlags, 0x107u); | |
575 LOG(INFO) << "Min buffer size: " << output_stream_info_.cbSize; | |
576 LOG(INFO) << "Alignment: " << output_stream_info_.cbAlignment; | |
577 return true; | |
578 } | |
579 | |
580 bool DXVAVideoDecodeAccelerator::DoDecode() { | |
581 if (state_ != kNormal && state_ != kEosDrain) { | |
582 NOTREACHED() << "DoDecode: not in normal or drain state"; | |
583 return false; | |
584 } | |
585 // scoped_refptr<VideoFrame> frame; | |
586 base::win::ScopedComPtr<IMFSample> output_sample; | |
587 MFT_OUTPUT_DATA_BUFFER output_data_buffer; | |
588 DWORD status = 0; | |
589 | |
590 memset(&output_data_buffer, 0, sizeof(output_data_buffer)); | |
591 output_data_buffer.pSample = output_sample; | |
592 status = 0; | |
593 | |
594 HRESULT hr = decoder_->ProcessOutput(0, // No flags | |
595 1, // # of out streams to pull from | |
596 &output_data_buffer, | |
597 &status); | |
598 IMFCollection* events = output_data_buffer.pEvents; | |
599 if (events != NULL) { | |
600 LOG(INFO) << "Got events from ProcessOuput, but discarding"; | |
601 events->Release(); | |
602 } | |
603 if (FAILED(hr)) { | |
604 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { | |
605 hr = SetDecoderOutputMediaType(MFVideoFormat_NV12); | |
606 if (SUCCEEDED(hr)) { | |
607 return true; | |
608 } else { | |
609 NOTREACHED() << "Failed to set decoder output media type"; | |
610 return false; | |
611 } | |
612 } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { | |
613 // No more output from the decoder. Notify EOS and stop playback. | |
614 state_ = DXVAVideoDecodeAccelerator::kStopped; | |
615 return false; | |
616 } else { | |
617 NOTREACHED() << "Unhandled error in DoDecode()"; | |
618 state_ = DXVAVideoDecodeAccelerator::kStopped; | |
619 return false; | |
620 } | |
621 } | |
622 | |
623 #if !defined(NDEBUG) | |
624 DLOG(INFO) << "Number of input packets before successful decode: " | |
625 << inputs_before_decode_; | |
626 inputs_before_decode_ = 0; | |
627 uint32 end_decode = GetTickCount(); | |
628 DLOG(INFO) << "Total time for decode: " | |
629 << end_decode - decode_start_time_; | |
630 decode_start_time_ = 0; | |
631 #endif // !defined(NDEBUG) | |
632 if (!ProcessOutputSample(output_data_buffer.pSample)) { | |
633 state_ = DXVAVideoDecodeAccelerator::kStopped; | |
634 return false; | |
635 } | |
636 | |
637 input_buffer_frame_times_.clear(); | |
638 state_ = DXVAVideoDecodeAccelerator::kNormal; | |
639 return true; | |
640 } | |
641 | |
642 bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) { | |
643 if (!sample) { | |
644 NOTREACHED() << "ProcessOutput succeeded, but did not get a sample back"; | |
645 return false; | |
646 } | |
647 base::win::ScopedComPtr<IMFSample> output_sample; | |
648 output_sample.Attach(sample); | |
649 | |
650 if (!input_buffer_frame_times_.size()) { | |
651 DLOG(INFO) << "In input buffers left to process output"; | |
652 return false; | |
653 } | |
654 int32 input_buffer_id = | |
655 input_buffer_frame_times_[input_buffer_frame_times_.size() - 1]; | |
656 input_buffer_frame_times_.clear(); | |
657 | |
658 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | |
659 HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive()); | |
660 if (FAILED(hr)) { | |
661 NOTREACHED() << "Failed to get buffer from sample"; | |
662 return false; | |
663 } | |
664 base::win::ScopedComPtr<IDirect3DSurface9> surface; | |
665 hr = MFGetService(output_buffer, MR_BUFFER_SERVICE, | |
666 IID_PPV_ARGS(surface.Receive())); | |
667 if (FAILED(hr)) { | |
668 NOTREACHED() << "Failed to get surface from buffer"; | |
669 return false; | |
670 } | |
671 | |
672 D3DSURFACE_DESC surface_desc; | |
673 hr = surface->GetDesc(&surface_desc); | |
674 if (FAILED(hr)) { | |
675 NOTREACHED() << "Failed to get surface description"; | |
676 return false; | |
677 } | |
678 | |
679 #if !defined(NDEBUG) | |
680 DWORD start_surface_time = GetTickCount(); | |
681 #endif // !defined(NDEBUG) | |
682 | |
683 // TODO(ananta) | |
684 // The code below may not be necessary once we have an ANGLE extension which | |
685 // allows us to pass the Direct 3D surface directly for rendering. | |
686 | |
687 // The decoded bits in the source direct 3d surface are in the YUV | |
688 // format. Angle does not support that. As a workaround we create an | |
689 // offscreen surface in the RGB format and copy the source surface | |
690 // to this surface. | |
691 base::win::ScopedComPtr<IDirect3DSurface9> dest_surface; | |
692 hr = device_->CreateOffscreenPlainSurface(surface_desc.Width, | |
693 surface_desc.Height, | |
694 D3DFMT_A8R8G8B8, | |
695 D3DPOOL_DEFAULT, | |
696 dest_surface.Receive(), | |
697 NULL); | |
698 if (FAILED(hr)) { | |
699 NOTREACHED() << "Failed to create offscreen surface"; | |
700 return false; | |
701 } | |
702 | |
703 hr = D3DXLoadSurfaceFromSurface(dest_surface, NULL, NULL, surface, NULL, | |
704 NULL, 0, 0); | |
705 if (FAILED(hr)) { | |
706 NOTREACHED() << "Failed to copy source surface to dest."; | |
707 return false; | |
708 } | |
709 | |
710 #if !defined(NDEBUG) | |
711 DWORD end_surface_time = GetTickCount(); | |
712 DLOG(INFO) << "Time to create and copy new surface is " | |
713 << end_surface_time - start_surface_time; | |
714 #endif // !defined(NDEBUG) | |
715 | |
716 PendingSampleInfo sample_info; | |
717 | |
718 sample_info.input_buffer_id = input_buffer_id; | |
719 sample_info.surface = dest_surface; | |
720 pending_output_samples_.push_back(sample_info); | |
721 | |
722 // If we have available picture buffers to copy the output data then use the | |
723 // first one and then flag it as not being available for use. | |
724 if (available_pictures_.size()) { | |
725 ProcessPendingSamples(); | |
726 return true; | |
727 } | |
728 | |
729 if (pictures_requested_) { | |
730 DLOG(INFO) << "Waiting for picture slots from the client."; | |
731 return true; | |
732 } | |
733 // Go ahead and request picture buffers. | |
734 client_->ProvidePictureBuffers( | |
735 kNumPictureBuffers, gfx::Size(surface_desc.Width, surface_desc.Height)); | |
736 pictures_requested_ = true; | |
737 return true; | |
738 } | |
739 | |
740 bool DXVAVideoDecodeAccelerator::CopyOutputSampleDataToPictureBuffer( | |
741 IDirect3DSurface9* dest_surface, media::PictureBuffer picture_buffer, | |
742 int32 input_buffer_id) { | |
743 DCHECK(dest_surface); | |
744 | |
745 // Get the currently loaded bitmap from the DC. | |
746 HDC hdc = NULL; | |
747 | |
748 HRESULT hr = dest_surface->GetDC(&hdc); | |
749 if (FAILED(hr)) { | |
750 NOTREACHED() << "Failed to get HDC for dest offscreen surface"; | |
751 return false; | |
752 } | |
753 | |
754 HBITMAP bitmap = | |
755 reinterpret_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP)); | |
756 if (!bitmap) { | |
757 NOTREACHED() << "Failed to get bitmap from DC"; | |
758 return false; | |
759 } | |
760 // TODO(ananta) | |
761 // The code below may not be necessary once we have an ANGLE extension which | |
762 // allows us to pass the Direct 3D surface directly for rendering. | |
763 | |
764 // The Device dependent bitmap is upside down for OpenGL. We convert the | |
765 // bitmap to a DIB and render it on the texture instead. | |
766 BITMAP bitmap_basic_info = {0}; | |
767 GetObject(bitmap, sizeof(BITMAP), &bitmap_basic_info); | |
768 | |
769 BITMAPINFO bitmap_info = {0}; | |
770 bitmap_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER); | |
771 bitmap_info.bmiHeader.biWidth = bitmap_basic_info.bmWidth; | |
772 bitmap_info.bmiHeader.biHeight = bitmap_basic_info.bmHeight; | |
773 bitmap_info.bmiHeader.biPlanes = 1; | |
774 bitmap_info.bmiHeader.biBitCount = bitmap_basic_info.bmBitsPixel; | |
775 bitmap_info.bmiHeader.biCompression = BI_RGB; | |
776 bitmap_info.bmiHeader.biSizeImage = 0; | |
777 bitmap_info.bmiHeader.biClrUsed = 0; | |
778 | |
779 int ret = GetDIBits(hdc, bitmap, 0, 0, NULL, &bitmap_info, DIB_RGB_COLORS); | |
780 if (bitmap_info.bmiHeader.biSizeImage <= 0) { | |
781 NOTREACHED() << "Failed to read bitmap size"; | |
782 return false; | |
783 } | |
784 scoped_ptr<char> bits(new char[bitmap_info.bmiHeader.biSizeImage]); | |
785 ret = GetDIBits(hdc, bitmap, 0, bitmap_basic_info.bmHeight, bits.get(), | |
786 &bitmap_info, DIB_RGB_COLORS); | |
787 DCHECK_GT(ret, 0); | |
788 | |
789 D3DSURFACE_DESC surface_desc; | |
790 hr = dest_surface->GetDesc(&surface_desc); | |
791 DCHECK(SUCCEEDED(hr)); | |
792 | |
793 glBindTexture(GL_TEXTURE_2D, picture_buffer.texture_id()); | |
794 glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT, surface_desc.Width, | |
795 surface_desc.Height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, | |
796 reinterpret_cast<GLvoid*>(bits.get())); | |
797 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); | |
798 glBindTexture(GL_TEXTURE_2D, 0); | |
apatrick_chromium
2011/12/13 02:00:13
You should restore the texture binding to whatever
ananta
2011/12/13 02:51:18
Dunno how to restore the binding. Will look into t
ananta
2011/12/13 23:29:15
Done.
| |
799 dest_surface->ReleaseDC(hdc); | |
800 | |
801 media::Picture output_picture(picture_buffer.id(), input_buffer_id); | |
802 client_->PictureReady(output_picture); | |
803 return true; | |
804 } | |
805 | |
806 void DXVAVideoDecodeAccelerator::ProcessPendingSamples() { | |
807 PendingOutputSamples::iterator sample_index = | |
808 pending_output_samples_.begin(); | |
809 HRESULT hr = E_FAIL; | |
810 | |
811 while (sample_index != pending_output_samples_.end()) { | |
812 const PendingSampleInfo& sample_info = *sample_index; | |
813 OutputBuffers::iterator index; | |
814 for (index = available_pictures_.begin(); | |
815 index != available_pictures_.end(); | |
816 ++index) { | |
817 if (index->second.available) { | |
818 CopyOutputSampleDataToPictureBuffer(sample_info.surface, | |
819 index->second.picture_buffer, | |
820 sample_info.input_buffer_id); | |
821 index->second.available = false; | |
822 sample_index = pending_output_samples_.erase(sample_index); | |
823 break; | |
824 } | |
825 } | |
826 if (index == available_pictures_.end()) { | |
827 DLOG(INFO) << "No available picture slots for output"; | |
828 break; | |
829 } | |
830 } | |
831 } | |
832 | |
OLD | NEW |