OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/media/dxva_video_decode_accelerator.h" | |
6 | |
7 #if !defined(OS_WIN) | |
8 #error This file should only be built on Windows. | |
9 #endif // !defined(OS_WIN) | |
10 | |
11 #include <ks.h> | |
12 #include <codecapi.h> | |
13 #include <d3dx9tex.h> | |
14 #include <mfapi.h> | |
15 #include <mferror.h> | |
16 #include <wmcodecdsp.h> | |
17 | |
18 #include "base/bind.h" | |
19 #include "base/callback.h" | |
20 #include "base/debug/trace_event.h" | |
21 #include "base/logging.h" | |
22 #include "base/memory/scoped_handle.h" | |
23 #include "base/memory/scoped_ptr.h" | |
24 #include "base/message_loop.h" | |
25 #include "base/process_util.h" | |
26 #include "base/shared_memory.h" | |
27 #include "media/video/video_decode_accelerator.h" | |
28 #include "third_party/angle/include/GLES2/gl2.h" | |
29 #include "third_party/angle/include/GLES2/gl2ext.h" | |
30 | |
31 // We only request 5 picture buffers from the client which are used to hold the | |
32 // decoded samples. These buffers are then reused when the client tells us that | |
33 // it is done with the buffer. | |
34 static const int kNumPictureBuffers = 5; | |
35 | |
36 #define RETURN_ON_FAILURE(result, log, ret) \ | |
37 do { \ | |
38 if (!(result)) { \ | |
39 DLOG(ERROR) << log; \ | |
40 return ret; \ | |
41 } \ | |
42 } while (0) | |
43 | |
44 #define RETURN_ON_HR_FAILURE(result, log, ret) \ | |
45 RETURN_ON_FAILURE(SUCCEEDED(result), \ | |
46 log << ", HRESULT: 0x" << std::hex << result, \ | |
47 ret); | |
48 | |
49 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \ | |
50 do { \ | |
51 if (!(result)) { \ | |
52 DVLOG(1) << log; \ | |
53 StopOnError(error_code); \ | |
54 return ret; \ | |
55 } \ | |
56 } while (0) | |
57 | |
58 #define RETURN_AND_NOTIFY_ON_HR_FAILURE(result, log, error_code, ret) \ | |
59 RETURN_AND_NOTIFY_ON_FAILURE(SUCCEEDED(result), \ | |
60 log << ", HRESULT: 0x" << std::hex << result, \ | |
61 error_code, ret); | |
62 | |
63 static IMFSample* CreateEmptySample() { | |
64 base::win::ScopedComPtr<IMFSample> sample; | |
65 HRESULT hr = MFCreateSample(sample.Receive()); | |
66 RETURN_ON_HR_FAILURE(hr, "MFCreateSample failed", NULL); | |
67 return sample.Detach(); | |
68 } | |
69 | |
70 // Creates a Media Foundation sample with one buffer of length |buffer_length| | |
71 // on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0. | |
72 static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) { | |
73 CHECK_GT(buffer_length, 0); | |
74 | |
75 base::win::ScopedComPtr<IMFSample> sample; | |
76 sample.Attach(CreateEmptySample()); | |
77 | |
78 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
79 HRESULT hr = E_FAIL; | |
80 if (align == 0) { | |
81 // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer | |
82 // with the align argument being 0. | |
83 hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive()); | |
84 } else { | |
85 hr = MFCreateAlignedMemoryBuffer(buffer_length, | |
86 align - 1, | |
87 buffer.Receive()); | |
88 } | |
89 RETURN_ON_HR_FAILURE(hr, "Failed to create memory buffer for sample", NULL); | |
90 | |
91 hr = sample->AddBuffer(buffer); | |
92 RETURN_ON_HR_FAILURE(hr, "Failed to add buffer to sample", NULL); | |
93 | |
94 return sample.Detach(); | |
95 } | |
96 | |
97 // Creates a Media Foundation sample with one buffer containing a copy of the | |
98 // given Annex B stream data. | |
99 // If duration and sample time are not known, provide 0. | |
100 // |min_size| specifies the minimum size of the buffer (might be required by | |
101 // the decoder for input). If no alignment is required, provide 0. | |
102 static IMFSample* CreateInputSample(const uint8* stream, int size, | |
103 int min_size, int alignment) { | |
104 CHECK(stream); | |
105 CHECK_GT(size, 0); | |
106 base::win::ScopedComPtr<IMFSample> sample; | |
107 sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size), | |
108 alignment)); | |
109 RETURN_ON_FAILURE(sample, "Failed to create empty sample", NULL); | |
110 | |
111 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
112 HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive()); | |
113 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample", NULL); | |
114 | |
115 DWORD max_length = 0; | |
116 DWORD current_length = 0; | |
117 uint8* destination = NULL; | |
118 hr = buffer->Lock(&destination, &max_length, ¤t_length); | |
119 RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL); | |
120 | |
121 CHECK_EQ(current_length, 0u); | |
122 CHECK_GE(static_cast<int>(max_length), size); | |
123 memcpy(destination, stream, size); | |
124 | |
125 hr = buffer->Unlock(); | |
126 RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", NULL); | |
127 | |
128 hr = buffer->SetCurrentLength(size); | |
129 RETURN_ON_HR_FAILURE(hr, "Failed to set buffer length", NULL); | |
130 | |
131 hr = sample->SetUINT32(MFSampleExtension_CleanPoint, TRUE); | |
132 RETURN_ON_HR_FAILURE(hr, "Failed to mark sample as key frame", NULL); | |
133 | |
134 return sample.Detach(); | |
135 } | |
136 | |
137 static IMFSample* CreateSampleFromInputBuffer( | |
138 const media::BitstreamBuffer& bitstream_buffer, | |
139 base::ProcessHandle renderer_process, | |
140 DWORD stream_size, | |
141 DWORD alignment) { | |
142 HANDLE shared_memory_handle = NULL; | |
143 RETURN_ON_FAILURE(::DuplicateHandle(renderer_process, | |
144 bitstream_buffer.handle(), | |
145 base::GetCurrentProcessHandle(), | |
146 &shared_memory_handle, | |
147 0, | |
148 FALSE, | |
149 DUPLICATE_SAME_ACCESS), | |
150 "Duplicate handle failed", NULL); | |
151 | |
152 base::SharedMemory shm(shared_memory_handle, true); | |
153 RETURN_ON_FAILURE(shm.Map(bitstream_buffer.size()), | |
154 "Failed in base::SharedMemory::Map", NULL); | |
155 | |
156 return CreateInputSample(reinterpret_cast<const uint8*>(shm.memory()), | |
157 bitstream_buffer.size(), | |
158 stream_size, | |
159 alignment); | |
160 } | |
161 | |
162 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer( | |
163 const media::PictureBuffer& buffer) | |
164 : available(true), | |
165 picture_buffer(buffer) { | |
166 } | |
167 | |
168 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo( | |
169 int32 buffer_id, IDirect3DSurface9* surface) | |
170 : input_buffer_id(buffer_id), | |
171 dest_surface(surface) { | |
172 } | |
173 | |
174 DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {} | |
175 | |
176 // static | |
177 void DXVAVideoDecodeAccelerator::LoadDecodingDlls() { | |
178 static bool initialized_dlls = false; | |
179 if (initialized_dlls) | |
180 return; | |
181 | |
182 static wchar_t* decoding_dlls[] = { | |
183 L"d3d9.dll", | |
184 L"d3dx9_43.dll", | |
185 L"dxva2.dll", | |
186 L"mf.dll", | |
187 L"mfplat.dll", | |
188 L"msmpeg2vdec.dll", | |
189 }; | |
190 | |
191 for (int i = 0; i < arraysize(decoding_dlls); ++i) { | |
192 LoadLibrary(decoding_dlls[i]); | |
Ami GONE FROM CHROMIUM
2011/12/21 06:13:55
What about failure? Set some global var preventin
ananta
2011/12/21 07:04:55
Done. Failure to load here will prevent initializa
| |
193 } | |
194 initialized_dlls = true; | |
195 } | |
196 | |
197 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator( | |
198 media::VideoDecodeAccelerator::Client* client, | |
199 base::ProcessHandle renderer_process) | |
200 : client_(client), | |
201 state_(kUninitialized), | |
202 pictures_requested_(false), | |
203 renderer_process_(renderer_process), | |
204 dev_manager_reset_token_(0), | |
205 last_input_buffer_id_(-1), | |
206 inputs_before_decode_(0) { | |
207 // Best effort to load required decoding dlls. | |
Ami GONE FROM CHROMIUM
2011/12/21 06:13:55
Drop this since it's called from GpuMain?
(if you
ananta
2011/12/21 07:04:55
Done. Added a call to the LoadDecodingDlls from th
| |
208 LoadDecodingDlls(); | |
209 } | |
210 | |
211 DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() { | |
212 client_ = NULL; | |
213 } | |
214 | |
215 bool DXVAVideoDecodeAccelerator::Initialize(Profile) { | |
216 DCHECK(CalledOnValidThread()); | |
217 | |
218 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kUninitialized), | |
219 "Initialize: invalid state: " << state_, ILLEGAL_STATE, false); | |
220 | |
221 HRESULT hr = MFStartup(MF_VERSION, MFSTARTUP_FULL); | |
222 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "MFStartup failed.", PLATFORM_FAILURE, | |
223 false); | |
224 | |
225 RETURN_AND_NOTIFY_ON_FAILURE(CreateD3DDevManager(), | |
226 "Failed to create device manager", PLATFORM_FAILURE, false); | |
227 | |
228 RETURN_AND_NOTIFY_ON_FAILURE(InitDecoder(), | |
229 "Failed to initialize decoder", PLATFORM_FAILURE, false); | |
230 | |
231 RETURN_AND_NOTIFY_ON_FAILURE(GetStreamsInfoAndBufferReqs(), | |
232 "Failed to get input/output stream info.", PLATFORM_FAILURE, false); | |
233 | |
234 RETURN_AND_NOTIFY_ON_FAILURE( | |
235 SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0), | |
236 "Failed to start decoder", PLATFORM_FAILURE, false); | |
237 | |
238 state_ = kNormal; | |
239 MessageLoop::current()->PostTask(FROM_HERE, | |
240 base::Bind(&DXVAVideoDecodeAccelerator::NotifyInitializeDone, this)); | |
241 return true; | |
242 } | |
243 | |
244 void DXVAVideoDecodeAccelerator::Decode( | |
245 const media::BitstreamBuffer& bitstream_buffer) { | |
246 DCHECK(CalledOnValidThread()); | |
247 | |
248 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped), | |
249 "Invalid state: " << state_, ILLEGAL_STATE,); | |
250 | |
251 base::win::ScopedComPtr<IMFSample> sample; | |
252 sample.Attach(CreateSampleFromInputBuffer(bitstream_buffer, | |
253 renderer_process_, | |
254 input_stream_info_.cbSize, | |
255 input_stream_info_.cbAlignment)); | |
256 RETURN_AND_NOTIFY_ON_FAILURE(sample, "Failed to create input sample", | |
257 PLATFORM_FAILURE,); | |
258 if (!inputs_before_decode_) { | |
259 TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.Decoding", this, ""); | |
260 } | |
261 inputs_before_decode_++; | |
262 | |
263 RETURN_AND_NOTIFY_ON_FAILURE( | |
264 SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0), | |
265 "Failed to create input sample", PLATFORM_FAILURE,); | |
266 | |
267 HRESULT hr = decoder_->ProcessInput(0, sample, 0); | |
268 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to process input sample", | |
269 PLATFORM_FAILURE,); | |
270 | |
271 RETURN_AND_NOTIFY_ON_FAILURE( | |
272 SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0), | |
273 "Failed to send eos message to MFT", PLATFORM_FAILURE,); | |
274 state_ = kEosDrain; | |
275 | |
276 last_input_buffer_id_ = bitstream_buffer.id(); | |
277 | |
278 DoDecode(); | |
279 | |
280 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kStopped || state_ == kNormal), | |
281 "Failed to process output. Unexpected decoder state: " << state_, | |
282 ILLEGAL_STATE,); | |
283 | |
284 // The Microsoft Media foundation decoder internally buffers up to 30 frames | |
285 // before returning a decoded frame. We need to inform the client that this | |
286 // input buffer is processed as it may stop sending us further input. | |
287 // Note: This may break clients which expect every input buffer to be | |
288 // associated with a decoded output buffer. | |
289 // TODO(ananta) | |
290 // Do some more investigation into whether it is possible to get the MFT | |
291 // decoder to emit an output packet for every input packet. | |
292 // http://code.google.com/p/chromium/issues/detail?id=108121 | |
293 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
294 &DXVAVideoDecodeAccelerator::NotifyInputBufferRead, this, | |
295 bitstream_buffer.id())); | |
296 } | |
297 | |
298 void DXVAVideoDecodeAccelerator::AssignPictureBuffers( | |
299 const std::vector<media::PictureBuffer>& buffers) { | |
300 DCHECK(CalledOnValidThread()); | |
301 // Copy the picture buffers provided by the client to the available list, | |
302 // and mark these buffers as available for use. | |
303 for (size_t buffer_index = 0; buffer_index < buffers.size(); | |
304 ++buffer_index) { | |
305 bool inserted = output_picture_buffers_.insert(std::make_pair( | |
306 buffers[buffer_index].id(), | |
307 DXVAPictureBuffer(buffers[buffer_index]))).second; | |
308 DCHECK(inserted); | |
309 } | |
310 ProcessPendingSamples(); | |
311 } | |
312 | |
313 void DXVAVideoDecodeAccelerator::ReusePictureBuffer( | |
314 int32 picture_buffer_id) { | |
315 DCHECK(CalledOnValidThread()); | |
316 | |
317 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id); | |
318 RETURN_AND_NOTIFY_ON_FAILURE(it != output_picture_buffers_.end(), | |
319 "Invalid picture id: " << picture_buffer_id, INVALID_ARGUMENT,); | |
320 | |
321 it->second.available = true; | |
322 ProcessPendingSamples(); | |
323 } | |
324 | |
325 void DXVAVideoDecodeAccelerator::Flush() { | |
326 DCHECK(CalledOnValidThread()); | |
327 | |
328 DVLOG(1) << "DXVAVideoDecodeAccelerator::Flush"; | |
329 | |
330 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped), | |
331 "Unexpected decoder state: " << state_, ILLEGAL_STATE,); | |
332 | |
333 state_ = kEosDrain; | |
334 | |
335 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0), | |
336 "Failed to send drain message", PLATFORM_FAILURE,); | |
337 | |
338 // As per MSDN docs after the client sends this message, it calls | |
339 // IMFTransform::ProcessOutput in a loop, until ProcessOutput returns the | |
340 // error code MF_E_TRANSFORM_NEED_MORE_INPUT. The DoDecode function sets | |
341 // the state to kStopped when the decoder returns | |
342 // MF_E_TRANSFORM_NEED_MORE_INPUT. | |
343 // The MFT decoder can buffer upto 30 frames worth of input before returning | |
344 // an output frame. This loop here attempts to retrieve as many output frames | |
345 // as possible from the buffered set. | |
346 while (state_ != kStopped) { | |
347 DoDecode(); | |
348 } | |
349 | |
350 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
351 &DXVAVideoDecodeAccelerator::NotifyFlushDone, this)); | |
352 | |
353 state_ = kNormal; | |
354 } | |
355 | |
356 void DXVAVideoDecodeAccelerator::Reset() { | |
357 DCHECK(CalledOnValidThread()); | |
358 | |
359 DVLOG(1) << "DXVAVideoDecodeAccelerator::Reset"; | |
360 | |
361 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped), | |
362 "Reset: invalid state: " << state_, ILLEGAL_STATE,); | |
363 | |
364 state_ = kResetting; | |
365 | |
366 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0), | |
367 "Reset: Failed to send message.", PLATFORM_FAILURE,); | |
368 | |
369 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
370 &DXVAVideoDecodeAccelerator::NotifyResetDone, this)); | |
371 | |
372 state_ = DXVAVideoDecodeAccelerator::kNormal; | |
373 } | |
374 | |
375 void DXVAVideoDecodeAccelerator::Destroy() { | |
376 DCHECK(CalledOnValidThread()); | |
377 Invalidate(); | |
378 } | |
379 | |
380 bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() { | |
381 base::win::ScopedComPtr<IDirect3D9Ex> d3d9; | |
382 | |
383 HRESULT hr = Direct3DCreate9Ex(D3D_SDK_VERSION, d3d9.Receive()); | |
384 RETURN_ON_HR_FAILURE(hr, "Direct3DCreate9Ex failed", false); | |
385 | |
386 D3DPRESENT_PARAMETERS present_params = {0}; | |
387 present_params.BackBufferWidth = 1; | |
388 present_params.BackBufferHeight = 1; | |
389 present_params.BackBufferFormat = D3DFMT_UNKNOWN; | |
390 present_params.BackBufferCount = 1; | |
391 present_params.SwapEffect = D3DSWAPEFFECT_DISCARD; | |
392 present_params.hDeviceWindow = GetShellWindow(); | |
393 present_params.Windowed = TRUE; | |
394 present_params.Flags = D3DPRESENTFLAG_VIDEO; | |
395 present_params.FullScreen_RefreshRateInHz = 0; | |
396 present_params.PresentationInterval = 0; | |
397 | |
398 hr = d3d9->CreateDeviceEx(D3DADAPTER_DEFAULT, | |
399 D3DDEVTYPE_HAL, | |
400 GetShellWindow(), | |
401 D3DCREATE_SOFTWARE_VERTEXPROCESSING, | |
402 &present_params, | |
403 NULL, | |
404 device_.Receive()); | |
405 RETURN_ON_HR_FAILURE(hr, "Failed to create D3D device", false); | |
406 | |
407 hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token_, | |
408 device_manager_.Receive()); | |
409 RETURN_ON_HR_FAILURE(hr, "DXVA2CreateDirect3DDeviceManager9 failed", false); | |
410 | |
411 hr = device_manager_->ResetDevice(device_, dev_manager_reset_token_); | |
412 RETURN_ON_HR_FAILURE(hr, "Failed to reset device", false); | |
413 return true; | |
414 } | |
415 | |
416 bool DXVAVideoDecodeAccelerator::InitDecoder() { | |
417 // We cannot use CoCreateInstance to instantiate the decoder object as that | |
418 // fails in the sandbox. We mimic the steps CoCreateInstance uses to | |
419 // instantiate the object. | |
420 HMODULE decoder_dll = GetModuleHandle(L"msmpeg2vdec.dll"); | |
421 RETURN_ON_FAILURE(decoder_dll, | |
422 "msmpeg2vdec.dll required for decoding is not loaded", | |
423 false); | |
424 | |
425 typedef HRESULT (WINAPI* GetClassObject)(const CLSID& clsid, | |
426 const IID& iid, | |
427 void** object); | |
428 | |
429 GetClassObject get_class_object = reinterpret_cast<GetClassObject>( | |
430 GetProcAddress(decoder_dll, "DllGetClassObject")); | |
431 RETURN_ON_FAILURE(get_class_object, | |
432 "Failed to get DllGetClassObject pointer", false); | |
433 | |
434 base::win::ScopedComPtr<IClassFactory> factory; | |
435 HRESULT hr = get_class_object(__uuidof(CMSH264DecoderMFT), | |
436 __uuidof(IClassFactory), | |
437 reinterpret_cast<void**>(factory.Receive())); | |
438 RETURN_ON_HR_FAILURE(hr, "DllGetClassObject for decoder failed", false); | |
439 | |
440 hr = factory->CreateInstance(NULL, __uuidof(IMFTransform), | |
441 reinterpret_cast<void**>(decoder_.Receive())); | |
442 RETURN_ON_HR_FAILURE(hr, "Failed to create decoder instance", false); | |
443 | |
444 RETURN_ON_FAILURE(CheckDecoderDxvaSupport(), | |
445 "Failed to check decoder DXVA support", false); | |
446 | |
447 hr = decoder_->ProcessMessage( | |
448 MFT_MESSAGE_SET_D3D_MANAGER, | |
449 reinterpret_cast<ULONG_PTR>(device_manager_.get())); | |
450 RETURN_ON_HR_FAILURE(hr, "Failed to pass D3D manager to decoder", false); | |
451 | |
452 return SetDecoderMediaTypes(); | |
453 } | |
454 | |
455 bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() { | |
456 base::win::ScopedComPtr<IMFAttributes> attributes; | |
457 HRESULT hr = decoder_->GetAttributes(attributes.Receive()); | |
458 RETURN_ON_HR_FAILURE(hr, "Failed to get decoder attributes", false); | |
459 | |
460 UINT32 dxva = 0; | |
461 hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva); | |
462 RETURN_ON_HR_FAILURE(hr, "Failed to check if decoder supports DXVA", false); | |
463 | |
464 hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE); | |
465 RETURN_ON_HR_FAILURE(hr, "Failed to enable DXVA H/W decoding", false); | |
466 return true; | |
467 } | |
468 | |
469 bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() { | |
470 RETURN_ON_FAILURE(SetDecoderInputMediaType(), | |
471 "Failed to set decoder input media type", false); | |
472 return SetDecoderOutputMediaType(MFVideoFormat_NV12); | |
473 } | |
474 | |
475 bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() { | |
476 base::win::ScopedComPtr<IMFMediaType> media_type; | |
477 HRESULT hr = MFCreateMediaType(media_type.Receive()); | |
478 RETURN_ON_HR_FAILURE(hr, "MFCreateMediaType failed", false); | |
479 | |
480 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); | |
481 RETURN_ON_HR_FAILURE(hr, "Failed to set major input type", false); | |
482 | |
483 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264); | |
484 RETURN_ON_HR_FAILURE(hr, "Failed to set subtype", false); | |
485 | |
486 hr = decoder_->SetInputType(0, media_type, 0); // No flags | |
487 RETURN_ON_HR_FAILURE(hr, "Failed to set decoder input type", false); | |
488 return true; | |
489 } | |
490 | |
491 bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType( | |
492 const GUID& subtype) { | |
493 base::win::ScopedComPtr<IMFMediaType> out_media_type; | |
494 | |
495 for (uint32 i = 0; | |
496 SUCCEEDED(decoder_->GetOutputAvailableType(0, i, | |
497 out_media_type.Receive())); | |
498 ++i) { | |
499 GUID out_subtype = {0}; | |
500 HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype); | |
501 RETURN_ON_HR_FAILURE(hr, "Failed to get output major type", false); | |
502 | |
503 if (out_subtype == subtype) { | |
504 hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags | |
505 RETURN_ON_HR_FAILURE(hr, "Failed to set decoder output type", false); | |
506 return true; | |
507 } | |
508 out_media_type.Release(); | |
509 } | |
510 return false; | |
511 } | |
512 | |
513 bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg, | |
514 int32 param) { | |
515 HRESULT hr = decoder_->ProcessMessage(msg, param); | |
516 return SUCCEEDED(hr); | |
517 } | |
518 | |
519 // Gets the minimum buffer sizes for input and output samples. The MFT will not | |
520 // allocate buffer for input nor output, so we have to do it ourselves and make | |
521 // sure they're the correct size. We only provide decoding if DXVA is enabled. | |
522 bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() { | |
523 HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_); | |
524 RETURN_ON_HR_FAILURE(hr, "Failed to get input stream info", false); | |
525 | |
526 hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_); | |
527 RETURN_ON_HR_FAILURE(hr, "Failed to get decoder output stream info", false); | |
528 | |
529 DVLOG(1) << "Input stream info: "; | |
530 DVLOG(1) << "Max latency: " << input_stream_info_.hnsMaxLatency; | |
531 // There should be three flags, one for requiring a whole frame be in a | |
532 // single sample, one for requiring there be one buffer only in a single | |
533 // sample, and one that specifies a fixed sample size. (as in cbSize) | |
534 CHECK_EQ(input_stream_info_.dwFlags, 0x7u); | |
535 | |
536 DVLOG(1) << "Min buffer size: " << input_stream_info_.cbSize; | |
537 DVLOG(1) << "Max lookahead: " << input_stream_info_.cbMaxLookahead; | |
538 DVLOG(1) << "Alignment: " << input_stream_info_.cbAlignment; | |
539 | |
540 DVLOG(1) << "Output stream info: "; | |
541 // The flags here should be the same and mean the same thing, except when | |
542 // DXVA is enabled, there is an extra 0x100 flag meaning decoder will | |
543 // allocate its own sample. | |
544 DVLOG(1) << "Flags: " | |
545 << std::hex << std::showbase << output_stream_info_.dwFlags; | |
546 CHECK_EQ(output_stream_info_.dwFlags, 0x107u); | |
547 DVLOG(1) << "Min buffer size: " << output_stream_info_.cbSize; | |
548 DVLOG(1) << "Alignment: " << output_stream_info_.cbAlignment; | |
549 return true; | |
550 } | |
551 | |
552 void DXVAVideoDecodeAccelerator::DoDecode() { | |
553 // This function is also called from Flush in a loop which could result | |
554 // in the state transitioning to kNormal due to decoded output. | |
555 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kEosDrain), | |
556 "DoDecode: not in normal/drain state", ILLEGAL_STATE,); | |
557 | |
558 MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0}; | |
559 DWORD status = 0; | |
560 | |
561 HRESULT hr = decoder_->ProcessOutput(0, // No flags | |
562 1, // # of out streams to pull from | |
563 &output_data_buffer, | |
564 &status); | |
565 IMFCollection* events = output_data_buffer.pEvents; | |
566 if (events != NULL) { | |
567 VLOG(1) << "Got events from ProcessOuput, but discarding"; | |
568 events->Release(); | |
569 } | |
570 if (FAILED(hr)) { | |
571 // A stream change needs further ProcessInput calls to get back decoder | |
572 // output which is why we need to set the state to stopped. | |
573 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { | |
574 if (!SetDecoderOutputMediaType(MFVideoFormat_NV12)) { | |
575 // No more output from the decoder. Notify EOS and stop playback. | |
Ami GONE FROM CHROMIUM
2011/12/21 06:13:55
I still don't understand this comment. Should it
ananta
2011/12/21 07:04:55
Done.
| |
576 NOTREACHED() << "Failed to set decoder output media type"; | |
577 state_ = kStopped; | |
578 } else { | |
579 DVLOG(1) << "Received output format change from the decoder." | |
580 " Recursively invoking DoDecode"; | |
581 DoDecode(); | |
582 } | |
583 return; | |
584 } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { | |
585 // No more output from the decoder. Stop playback. | |
586 state_ = kStopped; | |
587 return; | |
588 } else { | |
589 NOTREACHED() << "Unhandled error in DoDecode()"; | |
590 return; | |
591 } | |
592 } | |
593 TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.Decoding", this, ""); | |
594 | |
595 TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode", | |
596 inputs_before_decode_); | |
597 | |
598 inputs_before_decode_ = 0; | |
599 | |
600 RETURN_AND_NOTIFY_ON_FAILURE(ProcessOutputSample(output_data_buffer.pSample), | |
601 "Failed to process output sample.", PLATFORM_FAILURE,); | |
602 | |
603 state_ = kNormal; | |
604 } | |
605 | |
606 bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) { | |
607 RETURN_ON_FAILURE(sample, "Decode succeeded with NULL output sample", false); | |
608 | |
609 base::win::ScopedComPtr<IMFSample> output_sample; | |
610 output_sample.Attach(sample); | |
611 | |
612 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | |
613 HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive()); | |
614 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from output sample", false); | |
615 | |
616 base::win::ScopedComPtr<IDirect3DSurface9> surface; | |
617 hr = MFGetService(output_buffer, MR_BUFFER_SERVICE, | |
618 IID_PPV_ARGS(surface.Receive())); | |
619 RETURN_ON_HR_FAILURE(hr, "Failed to get D3D surface from output sample", | |
620 false); | |
621 | |
622 D3DSURFACE_DESC surface_desc; | |
623 hr = surface->GetDesc(&surface_desc); | |
624 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false); | |
625 | |
626 TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this, | |
627 ""); | |
628 // TODO(ananta) | |
629 // The code below may not be necessary once we have an ANGLE extension which | |
630 // allows us to pass the Direct 3D surface directly for rendering. | |
631 | |
632 // The decoded bits in the source direct 3d surface are in the YUV | |
633 // format. Angle does not support that. As a workaround we create an | |
634 // offscreen surface in the RGB format and copy the source surface | |
635 // to this surface. | |
636 base::win::ScopedComPtr<IDirect3DSurface9> dest_surface; | |
637 hr = device_->CreateOffscreenPlainSurface(surface_desc.Width, | |
638 surface_desc.Height, | |
639 D3DFMT_A8R8G8B8, | |
640 D3DPOOL_DEFAULT, | |
641 dest_surface.Receive(), | |
642 NULL); | |
643 RETURN_ON_HR_FAILURE(hr, "Failed to create offscreen surface", false); | |
644 | |
645 hr = D3DXLoadSurfaceFromSurface(dest_surface, NULL, NULL, surface, NULL, | |
646 NULL, D3DX_DEFAULT, 0); | |
647 RETURN_ON_HR_FAILURE(hr, "D3DXLoadSurfaceFromSurface failed", false); | |
648 | |
649 TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this, ""); | |
650 | |
651 pending_output_samples_.push_back( | |
652 PendingSampleInfo(last_input_buffer_id_, dest_surface)); | |
653 | |
654 // If we have available picture buffers to copy the output data then use the | |
655 // first one and then flag it as not being available for use. | |
656 if (output_picture_buffers_.size()) { | |
657 ProcessPendingSamples(); | |
658 return true; | |
659 } | |
660 if (pictures_requested_) { | |
661 DVLOG(1) << "Waiting for picture slots from the client."; | |
662 return true; | |
663 } | |
664 // Go ahead and request picture buffers. | |
665 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
666 &DXVAVideoDecodeAccelerator::RequestPictureBuffers, | |
667 this, surface_desc.Width, surface_desc.Height)); | |
668 | |
669 pictures_requested_ = true; | |
670 return true; | |
671 } | |
672 | |
673 bool DXVAVideoDecodeAccelerator::CopyOutputSampleDataToPictureBuffer( | |
674 IDirect3DSurface9* dest_surface, media::PictureBuffer picture_buffer, | |
675 int input_buffer_id) { | |
676 DCHECK(dest_surface); | |
677 | |
678 D3DSURFACE_DESC surface_desc; | |
679 HRESULT hr = dest_surface->GetDesc(&surface_desc); | |
680 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false); | |
681 | |
682 scoped_array<char> bits; | |
683 RETURN_ON_FAILURE(GetBitmapFromSurface(dest_surface, &bits), | |
684 "Failed to get bitmap from surface for rendering", false); | |
685 | |
686 // This function currently executes in the context of IPC handlers in the | |
687 // GPU process which ensures that there is always a OpenGL context. | |
688 GLint current_texture = 0; | |
689 glGetIntegerv(GL_TEXTURE_BINDING_2D, ¤t_texture); | |
690 | |
691 glBindTexture(GL_TEXTURE_2D, picture_buffer.texture_id()); | |
692 glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT, surface_desc.Width, | |
693 surface_desc.Height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, | |
694 reinterpret_cast<GLvoid*>(bits.get())); | |
695 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); | |
696 | |
697 glBindTexture(GL_TEXTURE_2D, current_texture); | |
698 | |
699 media::Picture output_picture(picture_buffer.id(), input_buffer_id); | |
700 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
701 &DXVAVideoDecodeAccelerator::NotifyPictureReady, this, output_picture)); | |
702 return true; | |
703 } | |
704 | |
705 void DXVAVideoDecodeAccelerator::ProcessPendingSamples() { | |
706 if (pending_output_samples_.empty()) | |
707 return; | |
708 | |
709 OutputBuffers::iterator index; | |
710 | |
711 for (index = output_picture_buffers_.begin(); | |
712 index != output_picture_buffers_.end() && | |
713 !pending_output_samples_.empty(); | |
714 ++index) { | |
715 if (index->second.available) { | |
716 PendingSampleInfo sample_info = pending_output_samples_.front(); | |
717 | |
718 CopyOutputSampleDataToPictureBuffer(sample_info.dest_surface, | |
719 index->second.picture_buffer, | |
720 sample_info.input_buffer_id); | |
721 index->second.available = false; | |
722 pending_output_samples_.pop_front(); | |
723 } | |
724 } | |
725 } | |
726 | |
727 void DXVAVideoDecodeAccelerator::ClearState() { | |
728 last_input_buffer_id_ = -1; | |
729 output_picture_buffers_.clear(); | |
730 pending_output_samples_.clear(); | |
731 } | |
732 | |
733 void DXVAVideoDecodeAccelerator::StopOnError( | |
734 media::VideoDecodeAccelerator::Error error) { | |
735 DCHECK(CalledOnValidThread()); | |
736 | |
737 if (client_) | |
738 client_->NotifyError(error); | |
739 client_ = NULL; | |
740 | |
741 if (state_ != kUninitialized) { | |
742 Invalidate(); | |
743 } | |
744 } | |
745 | |
746 bool DXVAVideoDecodeAccelerator::GetBitmapFromSurface( | |
747 IDirect3DSurface9* surface, | |
748 scoped_array<char>* bits) { | |
749 // Get the currently loaded bitmap from the DC. | |
750 HDC hdc = NULL; | |
751 HRESULT hr = surface->GetDC(&hdc); | |
752 RETURN_ON_HR_FAILURE(hr, "Failed to get HDC from surface", false); | |
753 | |
754 HBITMAP bitmap = | |
755 reinterpret_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP)); | |
756 if (!bitmap) { | |
757 NOTREACHED() << "Failed to get bitmap from DC"; | |
758 surface->ReleaseDC(hdc); | |
759 return false; | |
760 } | |
761 // TODO(ananta) | |
762 // The code below may not be necessary once we have an ANGLE extension which | |
763 // allows us to pass the Direct 3D surface directly for rendering. | |
764 // The Device dependent bitmap is upside down for OpenGL. We convert the | |
765 // bitmap to a DIB and render it on the texture instead. | |
766 BITMAP bitmap_basic_info = {0}; | |
767 if (!GetObject(bitmap, sizeof(BITMAP), &bitmap_basic_info)) { | |
768 NOTREACHED() << "Failed to read bitmap info"; | |
769 surface->ReleaseDC(hdc); | |
770 return false; | |
771 } | |
772 BITMAPINFO bitmap_info = {0}; | |
773 bitmap_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER); | |
774 bitmap_info.bmiHeader.biWidth = bitmap_basic_info.bmWidth; | |
775 bitmap_info.bmiHeader.biHeight = bitmap_basic_info.bmHeight; | |
776 bitmap_info.bmiHeader.biPlanes = 1; | |
777 bitmap_info.bmiHeader.biBitCount = bitmap_basic_info.bmBitsPixel; | |
778 bitmap_info.bmiHeader.biCompression = BI_RGB; | |
779 bitmap_info.bmiHeader.biSizeImage = 0; | |
780 bitmap_info.bmiHeader.biClrUsed = 0; | |
781 | |
782 int ret = GetDIBits(hdc, bitmap, 0, 0, NULL, &bitmap_info, DIB_RGB_COLORS); | |
783 if (!ret || bitmap_info.bmiHeader.biSizeImage <= 0) { | |
784 NOTREACHED() << "Failed to read bitmap size"; | |
785 surface->ReleaseDC(hdc); | |
786 return false; | |
787 } | |
788 | |
789 bits->reset(new char[bitmap_info.bmiHeader.biSizeImage]); | |
790 ret = GetDIBits(hdc, bitmap, 0, bitmap_basic_info.bmHeight, bits->get(), | |
791 &bitmap_info, DIB_RGB_COLORS); | |
792 if (!ret) { | |
793 NOTREACHED() << "Failed to retrieve bitmap bits."; | |
794 } | |
795 surface->ReleaseDC(hdc); | |
796 return !!ret; | |
797 } | |
798 | |
799 void DXVAVideoDecodeAccelerator::Invalidate() { | |
800 if (state_ == kUninitialized) | |
801 return; | |
802 ClearState(); | |
803 decoder_.Release(); | |
804 device_.Release(); | |
805 device_manager_.Release(); | |
806 MFShutdown(); | |
807 state_ = kUninitialized; | |
808 } | |
809 | |
810 void DXVAVideoDecodeAccelerator::NotifyInitializeDone() { | |
811 if (client_) | |
812 client_->NotifyInitializeDone(); | |
813 } | |
814 | |
815 void DXVAVideoDecodeAccelerator::NotifyInputBufferRead(int input_buffer_id) { | |
816 if (client_) | |
817 client_->NotifyEndOfBitstreamBuffer(input_buffer_id); | |
818 } | |
819 | |
820 void DXVAVideoDecodeAccelerator::NotifyFlushDone() { | |
821 if (client_) | |
822 client_->NotifyFlushDone(); | |
823 } | |
824 | |
825 void DXVAVideoDecodeAccelerator::NotifyResetDone() { | |
826 if (client_) | |
827 client_->NotifyResetDone(); | |
828 } | |
829 | |
830 void DXVAVideoDecodeAccelerator::RequestPictureBuffers(int width, int height) { | |
831 // This task could execute after the decoder has been torn down. | |
832 if (state_ != kUninitialized && client_) { | |
833 client_->ProvidePictureBuffers(kNumPictureBuffers, | |
834 gfx::Size(width, height)); | |
835 } | |
836 } | |
837 | |
838 void DXVAVideoDecodeAccelerator::NotifyPictureReady( | |
839 const media::Picture& picture) { | |
840 // This task could execute after the decoder has been torn down. | |
841 if (state_ != kUninitialized && client_) | |
842 client_->PictureReady(picture); | |
843 } | |
OLD | NEW |