OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/media/dxva_video_decode_accelerator.h" | |
6 | |
7 #include <ks.h> | |
8 #include <codecapi.h> | |
9 #include <d3dx9tex.h> | |
10 #include <d3d9types.h> | |
11 #include <dxva.h> | |
12 #include <dxva2api.h> | |
13 #include <mfapi.h> | |
14 #include <mferror.h> | |
15 #include <wmcodecdsp.h> | |
16 | |
17 #include "base/lazy_instance.h" | |
18 #include "base/logging.h" | |
19 #include "base/memory/scoped_handle.h" | |
20 #include "base/memory/scoped_ptr.h" | |
21 #include "base/shared_memory.h" | |
22 #include "base/win/scoped_com_initializer.h" | |
23 #include "base/time.h" | |
24 #include "media/video/video_decode_accelerator.h" | |
25 #include "third_party/angle/include/EGL/egl.h" | |
26 #include "third_party/angle/include/GLES2/gl2.h" | |
27 #include "third_party/angle/include/GLES2/gl2ext.h" | |
28 | |
29 namespace { | |
30 | |
31 static const int kNumPictureBuffers = 5; | |
32 static const int kNumInputs = 100; | |
33 | |
34 IMFSample* CreateEmptySample() { | |
35 HRESULT hr = E_FAIL; | |
36 base::win::ScopedComPtr<IMFSample> sample; | |
37 hr = MFCreateSample(sample.Receive()); | |
38 if (FAILED(hr)) { | |
39 NOTREACHED() << "Unable to create an empty sample"; | |
40 return NULL; | |
41 } | |
42 return sample.Detach(); | |
43 } | |
44 | |
45 // Creates a Media Foundation sample with one buffer of length |buffer_length| | |
46 // on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0. | |
47 // If |align| is 0, then no alignment is specified. | |
48 IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) { | |
49 CHECK_GT(buffer_length, 0); | |
50 base::win::ScopedComPtr<IMFSample> sample; | |
51 sample.Attach(CreateEmptySample()); | |
52 if (!sample.get()) | |
53 return NULL; | |
54 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
55 HRESULT hr = E_FAIL; | |
56 if (align == 0) { | |
57 // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer | |
58 // with the align argument being 0. | |
59 hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive()); | |
60 } else { | |
61 hr = MFCreateAlignedMemoryBuffer(buffer_length, | |
62 align - 1, | |
63 buffer.Receive()); | |
64 } | |
65 if (FAILED(hr)) { | |
66 NOTREACHED() << "Unable to create an empty buffer"; | |
67 return NULL; | |
68 } | |
69 hr = sample->AddBuffer(buffer.get()); | |
70 if (FAILED(hr)) { | |
71 NOTREACHED() << "Failed to add empty buffer to sample"; | |
72 return NULL; | |
73 } | |
74 return sample.Detach(); | |
75 } | |
76 | |
77 // Creates a Media Foundation sample with one buffer containing a copy of the | |
78 // given Annex B stream data. | |
79 // If duration and sample time are not known, provide 0. | |
80 // |min_size| specifies the minimum size of the buffer (might be required by | |
81 // the decoder for input). The times here should be given in 100ns units. | |
82 // |alignment| specifies the buffer in the sample to be aligned. If no | |
83 // alignment is required, provide 0 or 1. | |
84 static IMFSample* CreateInputSample(const uint8* stream, int size, | |
85 int64 timestamp, int64 duration, | |
86 int min_size, int alignment) { | |
87 CHECK(stream); | |
88 CHECK_GT(size, 0); | |
89 base::win::ScopedComPtr<IMFSample> sample; | |
90 sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size), | |
91 alignment)); | |
92 if (!sample.get()) { | |
93 NOTREACHED() << "Failed to create empty buffer for input"; | |
94 return NULL; | |
95 } | |
96 HRESULT hr = E_FAIL; | |
97 if (duration > 0) { | |
98 hr = sample->SetSampleDuration(duration); | |
99 if (FAILED(hr)) { | |
100 NOTREACHED() << "Failed to set sample duration"; | |
101 return NULL; | |
102 } | |
103 } | |
104 if (timestamp > 0) { | |
105 hr = sample->SetSampleTime(timestamp); | |
106 if (FAILED(hr)) { | |
107 NOTREACHED() << "Failed to set sample time"; | |
108 return NULL; | |
109 } | |
110 } | |
111 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
112 hr = sample->GetBufferByIndex(0, buffer.Receive()); | |
113 if (FAILED(hr)) { | |
114 NOTREACHED() << "Failed to get buffer in sample"; | |
115 return NULL; | |
116 } | |
117 DWORD max_length = 0, current_length = 0; | |
118 uint8* destination = NULL; | |
119 hr = buffer->Lock(&destination, &max_length, ¤t_length); | |
120 if (FAILED(hr)) { | |
121 NOTREACHED() << "Failed to lock buffer"; | |
122 return NULL; | |
123 } | |
124 CHECK_EQ(current_length, 0u); | |
125 CHECK_GE(static_cast<int>(max_length), size); | |
126 memcpy(destination, stream, size); | |
127 CHECK(SUCCEEDED(buffer->Unlock())); | |
128 hr = buffer->SetCurrentLength(size); | |
129 if (FAILED(hr)) { | |
130 NOTREACHED() << "Failed to set current length to " << size; | |
131 return NULL; | |
132 } | |
133 hr = sample->SetUINT32(MFSampleExtension_CleanPoint, TRUE); | |
134 if (FAILED(hr)) { | |
135 NOTREACHED() << "Failed to mark sample as key sample"; | |
136 return NULL; | |
137 } | |
138 return sample.Detach(); | |
139 } | |
140 | |
141 } // namespace | |
142 | |
143 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator( | |
144 media::VideoDecodeAccelerator::Client* client, | |
145 base::ProcessHandle renderer_process) | |
146 : client_(client), | |
147 message_loop_(MessageLoop::current()), | |
148 surface_width_(0), | |
149 surface_height_(0), | |
150 state_(kUninitialized), | |
151 input_stream_info_(), | |
152 output_stream_info_(), | |
153 pictures_requested_(false), | |
154 renderer_process_(renderer_process) { | |
155 input_buffer_frame_times_.reserve(kNumInputs); | |
156 | |
157 #if !defined(NDEBUG) | |
158 decode_start_time_ = 0; | |
159 inputs_before_decode_ = 0; | |
160 #endif // !defined(NDEBUG) | |
161 } | |
162 | |
163 DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() { | |
164 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
165 client_ = NULL; | |
166 message_loop_ = NULL; | |
167 } | |
168 | |
169 bool DXVAVideoDecodeAccelerator::Initialize(Profile profile) { | |
170 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
171 | |
172 if (state_ != kUninitialized) { | |
173 NOTREACHED() << "Initialize: invalid state: " | |
174 << state_; | |
175 return false; | |
176 } | |
177 | |
178 HRESULT hr = MFStartup(MF_VERSION, MFSTARTUP_FULL); | |
179 if (FAILED(hr)) { | |
180 NOTREACHED() << "MFStartup failed. Error:" | |
181 << std::hex << std::showbase << hr; | |
182 return false; | |
183 } | |
184 if (!CreateD3DDevManager()) | |
185 return false; | |
186 if (!InitDecoder()) | |
187 return false; | |
188 if (!GetStreamsInfoAndBufferReqs()) | |
189 return false; | |
190 if (SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0)) { | |
191 state_ = DXVAVideoDecodeAccelerator::kNormal; | |
192 client_->NotifyInitializeDone(); | |
193 return true; | |
194 } | |
195 return false; | |
196 } | |
197 | |
198 void DXVAVideoDecodeAccelerator::Decode( | |
199 const media::BitstreamBuffer& bitstream_buffer) { | |
200 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
201 if (state_ == DXVAVideoDecodeAccelerator::kUninitialized) { | |
202 NOTREACHED() << "ConsumeVideoSample: invalid state"; | |
203 return; | |
204 } | |
205 | |
206 HANDLE shared_memory_handle = NULL; | |
207 if (!::DuplicateHandle(renderer_process_, | |
208 bitstream_buffer.handle(), | |
209 ::GetCurrentProcess(), | |
210 &shared_memory_handle, | |
211 0, | |
212 FALSE, | |
213 DUPLICATE_SAME_ACCESS)) { | |
214 NOTREACHED() << "Failed to open duplicate shared mem handle"; | |
215 return; | |
216 } | |
217 | |
218 base::SharedMemory shm(shared_memory_handle, true); | |
219 if (!shm.Map(bitstream_buffer.size())) { | |
220 NOTREACHED() << "Failed in SharedMemory::Map()"; | |
221 return; | |
222 } | |
223 | |
224 base::Time::Exploded exploded; | |
225 base::Time::Now().LocalExplode(&exploded); | |
226 base::win::ScopedComPtr<IMFSample> sample; | |
227 sample.Attach(CreateInputSample(reinterpret_cast<const uint8*>(shm.memory()), | |
228 bitstream_buffer.size(), | |
229 exploded.second * 10000000, | |
230 400000, | |
231 input_stream_info_.cbSize, | |
232 input_stream_info_.cbAlignment)); | |
233 if (!sample.get()) { | |
234 NOTREACHED() << "Failed to create an input sample"; | |
235 return; | |
236 } else { | |
237 #if !defined(NDEBUG) | |
238 inputs_before_decode_++; | |
239 if (!decode_start_time_) | |
240 decode_start_time_ = ::GetTickCount(); | |
241 #endif // !defined(NDEBUG) | |
242 SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0); | |
243 | |
244 if (FAILED(decoder_->ProcessInput(0, sample.get(), 0))) { | |
245 NOTREACHED() << "Failed to process input"; | |
246 return; | |
247 } | |
248 } | |
249 if (state_ != DXVAVideoDecodeAccelerator::kEosDrain) { | |
250 if (!SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0)) { | |
251 NOTREACHED() << "Failed to send eos message to MFT"; | |
252 } else { | |
253 state_ = DXVAVideoDecodeAccelerator::kEosDrain; | |
254 } | |
255 } | |
256 input_buffer_frame_times_.push_back(bitstream_buffer.id()); | |
257 DoDecode(); | |
258 client_->NotifyEndOfBitstreamBuffer(bitstream_buffer.id()); | |
259 } | |
260 | |
261 void DXVAVideoDecodeAccelerator::AssignPictureBuffers( | |
262 const std::vector<media::PictureBuffer>& buffers) { | |
263 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
264 // Copy the picture buffers provided by the client to the available list, | |
265 // and mark these buffers as available for use. | |
266 for (size_t buffer_index = 0; buffer_index < buffers.size(); ++buffer_index) { | |
267 DXVAPictureBuffer picture_buffer; | |
268 picture_buffer.available = true; | |
269 picture_buffer.picture_buffer = buffers[buffer_index]; | |
270 | |
271 DCHECK(available_pictures_.find(buffers[buffer_index].id()) == | |
272 available_pictures_.end()); | |
273 available_pictures_[buffers[buffer_index].id()] = picture_buffer; | |
274 } | |
275 int buffer_index = 0; | |
276 PendingOutputSamples::iterator sample_index = | |
277 pending_output_samples_.begin(); | |
278 HRESULT hr = E_FAIL; | |
279 | |
280 ProcessPendingSamples(); | |
281 } | |
282 | |
283 void DXVAVideoDecodeAccelerator::ReusePictureBuffer( | |
284 int32 picture_buffer_id) { | |
285 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
286 DCHECK(available_pictures_.find(picture_buffer_id) != | |
287 available_pictures_.end()); | |
288 available_pictures_[picture_buffer_id].available = true; | |
289 | |
290 PendingOutputSamples::iterator sample_index = | |
291 pending_output_samples_.begin(); | |
292 | |
293 if (sample_index != pending_output_samples_.end()) { | |
294 const PendingSampleInfo& sample_info = *sample_index; | |
295 CopyOutputSampleDataToPictureBuffer( | |
296 sample_info.surface, | |
297 available_pictures_[picture_buffer_id].picture_buffer, | |
298 sample_info.input_buffer_id); | |
299 available_pictures_[picture_buffer_id].available = false; | |
300 pending_output_samples_.erase(sample_index); | |
301 } | |
302 } | |
303 | |
304 void DXVAVideoDecodeAccelerator::Flush() { | |
305 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
306 | |
307 LOG(INFO) << "DXVAVideoDecodeAccelerator::Flush"; | |
308 if (state_ == DXVAVideoDecodeAccelerator::kUninitialized) { | |
309 NOTREACHED() << "ConsumeVideoSample: invalid state"; | |
310 client_->NotifyFlushDone(); | |
311 return; | |
312 } | |
313 | |
314 if (state_ == DXVAVideoDecodeAccelerator::kStopped) { | |
315 LOG(INFO) << "No data available from the decoder"; | |
316 client_->NotifyFlushDone(); | |
317 return; | |
318 } | |
319 | |
320 state_ = DXVAVideoDecodeAccelerator::kEosDrain; | |
321 if (!SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0)) { | |
322 LOG(WARNING) << "Failed to send drain message"; | |
323 state_ = DXVAVideoDecodeAccelerator::kStopped; | |
324 client_->NotifyFlushDone(); | |
325 return; | |
326 } | |
327 | |
328 // As per MSDN docs after the client sends this message, it calls | |
329 // IMFTransform::ProcessOutput in a loop, until ProcessOutput returns the | |
330 // error code MF_E_TRANSFORM_NEED_MORE_INPUT. The DoDecode function sets the | |
331 // state to DXVAVideoDecodeAccelerator::kStopped when the decoder returns | |
332 // MF_E_TRANSFORM_NEED_MORE_INPUT. | |
333 while (state_ != DXVAVideoDecodeAccelerator::kStopped) { | |
334 DoDecode(); | |
335 } | |
336 client_->NotifyFlushDone(); | |
337 } | |
338 | |
339 void DXVAVideoDecodeAccelerator::Reset() { | |
340 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
341 LOG(INFO) << "DXVAVideoDecodeAccelerator::Reset"; | |
342 if (state_ != kNormal && state_ != kStopped) { | |
343 NOTREACHED() << "Reset: invalid state"; | |
344 client_->NotifyResetDone(); | |
345 return; | |
346 } | |
347 | |
348 state_ = DXVAVideoDecodeAccelerator::kResetting; | |
349 if (!SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0)) { | |
350 LOG(WARNING) << "DXVAVideoDecodeAccelerator::Flush failed to send message"; | |
351 client_->NotifyResetDone(); | |
352 return; | |
353 } | |
354 state_ = DXVAVideoDecodeAccelerator::kNormal; | |
355 client_->NotifyResetDone(); | |
356 input_buffer_frame_times_.clear(); | |
357 } | |
358 | |
359 void DXVAVideoDecodeAccelerator::Destroy() { | |
360 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
361 OutputBuffers::iterator index; | |
362 for (index = available_pictures_.begin(); index != available_pictures_.end(); | |
363 ++index) { | |
364 client_->DismissPictureBuffer(index->second.picture_buffer.id()); | |
365 } | |
366 available_pictures_.clear(); | |
367 pending_output_samples_.clear(); | |
368 input_buffer_frame_times_.clear(); | |
369 } | |
370 | |
371 bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() { | |
372 d3d9_.Attach(Direct3DCreate9(D3D_SDK_VERSION)); | |
apatrick_chromium
2011/12/13 02:00:13
I don't think the IDirect3D9 needs to outlive the
ananta
2011/12/13 02:51:18
Done.
| |
373 if (d3d9_.get() == NULL) { | |
374 NOTREACHED() << "Failed to create D3D9"; | |
375 return false; | |
376 } | |
377 | |
378 D3DPRESENT_PARAMETERS present_params = {0}; | |
379 present_params.BackBufferWidth = 0; | |
apatrick_chromium
2011/12/13 02:00:13
0 by 0 might not work reliably. maybe 1 by 1 would
ananta
2011/12/13 02:51:18
Done.
| |
380 present_params.BackBufferHeight = 0; | |
381 present_params.BackBufferFormat = D3DFMT_UNKNOWN; | |
apatrick_chromium
2011/12/13 02:00:13
and i'd use a valid format as well.
ananta
2011/12/13 02:51:18
As per msdn D3DFMT_UNKNOWN can be specified for t
| |
382 present_params.BackBufferCount = 1; | |
383 present_params.SwapEffect = D3DSWAPEFFECT_DISCARD; | |
384 present_params.hDeviceWindow = GetShellWindow(); | |
385 present_params.Windowed = TRUE; | |
386 present_params.Flags = D3DPRESENTFLAG_VIDEO; | |
387 present_params.FullScreen_RefreshRateInHz = 0; | |
388 present_params.PresentationInterval = 0; | |
389 | |
390 HRESULT hr = d3d9_->CreateDevice(D3DADAPTER_DEFAULT, | |
apatrick_chromium
2011/12/13 02:00:13
This will need to be a D3D9Ex device if we are goi
ananta
2011/12/13 02:51:18
Done.
| |
391 D3DDEVTYPE_HAL, | |
392 GetShellWindow(), | |
393 (D3DCREATE_HARDWARE_VERTEXPROCESSING | | |
apatrick_chromium
2011/12/13 02:00:13
Older Intel cards do not support hardware vertex p
ananta
2011/12/13 02:51:18
Changed to mixed.
| |
394 D3DCREATE_MULTITHREADED), | |
395 &present_params, | |
396 device_.Receive()); | |
397 if (FAILED(hr)) { | |
398 NOTREACHED() << "Failed to create D3D Device"; | |
399 return false; | |
400 } | |
401 | |
402 UINT dev_manager_reset_token = 0; | |
403 hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token, | |
apatrick_chromium
2011/12/13 02:00:13
I think D3D retains a pointer to this and modifies
ananta
2011/12/13 02:51:18
Done.
| |
404 device_manager_.Receive()); | |
405 if (FAILED(hr)) { | |
406 NOTREACHED() << "Couldn't create D3D Device manager"; | |
407 return false; | |
408 } | |
409 | |
410 hr = device_manager_->ResetDevice(device_.get(), | |
411 dev_manager_reset_token); | |
412 if (FAILED(hr)) { | |
413 NOTREACHED() << "Failed to set device to device manager"; | |
414 return false; | |
415 } | |
416 return true; | |
417 } | |
418 | |
419 | |
420 bool DXVAVideoDecodeAccelerator::InitDecoder() { | |
421 HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT), | |
422 NULL, | |
423 CLSCTX_INPROC_SERVER, | |
424 __uuidof(IMFTransform), | |
425 reinterpret_cast<void**>(decoder_.Receive())); | |
426 if (FAILED(hr) || !decoder_.get()) { | |
427 NOTREACHED() << "CoCreateInstance failed " | |
428 << std::hex << std::showbase << hr; | |
429 return false; | |
430 } | |
431 | |
432 if (!CheckDecoderDxvaSupport()) | |
433 return false; | |
434 hr = decoder_->ProcessMessage( | |
435 MFT_MESSAGE_SET_D3D_MANAGER, | |
436 reinterpret_cast<ULONG_PTR>(device_manager_.get())); | |
437 if (FAILED(hr)) { | |
438 NOTREACHED() << "Failed to pass D3D9 device to decoder " | |
439 << std::hex << hr; | |
440 return false; | |
441 } | |
442 return SetDecoderMediaTypes(); | |
443 } | |
444 | |
445 bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() { | |
446 base::win::ScopedComPtr<IMFAttributes> attributes; | |
447 HRESULT hr = decoder_->GetAttributes(attributes.Receive()); | |
448 if (FAILED(hr)) { | |
449 NOTREACHED() << "Unlock: Failed to get attributes, hr = " | |
450 << std::hex << std::showbase << hr; | |
451 return false; | |
452 } | |
453 UINT32 dxva = 0; | |
454 hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva); | |
455 if (FAILED(hr) || !dxva) { | |
456 NOTREACHED() << "Failed to get DXVA attr. Error:" | |
457 << std::hex << std::showbase << hr | |
458 << " .This might not be the right decoder."; | |
459 return false; | |
460 } | |
461 | |
462 hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE); | |
463 DCHECK(SUCCEEDED(hr)); | |
464 return true; | |
465 } | |
466 | |
467 bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() { | |
468 MFT_REGISTER_TYPE_INFO* input_types = NULL; | |
469 MFT_REGISTER_TYPE_INFO* output_types = NULL; | |
470 uint32 num_inputs = 0; | |
471 uint32 num_outputs = 0; | |
472 HRESULT hr = MFTGetInfo(__uuidof(CMSH264DecoderMFT), NULL, &input_types, | |
473 &num_inputs, &output_types, &num_outputs, NULL); | |
474 if (!SetDecoderInputMediaType()) | |
475 return false; | |
476 return SetDecoderOutputMediaType(MFVideoFormat_NV12); | |
477 } | |
478 | |
479 bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() { | |
480 base::win::ScopedComPtr<IMFMediaType> media_type; | |
481 HRESULT hr = MFCreateMediaType(media_type.Receive()); | |
482 if (FAILED(hr)) { | |
483 NOTREACHED() << "Failed to create empty media type object"; | |
484 return false; | |
485 } | |
486 | |
487 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); | |
488 if (FAILED(hr)) { | |
489 NOTREACHED() << "SetGUID for major type failed"; | |
490 return false; | |
491 } | |
492 | |
493 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264); | |
494 if (FAILED(hr)) { | |
495 NOTREACHED() << "SetGUID for subtype failed"; | |
496 return false; | |
497 } | |
498 | |
499 hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags | |
500 if (FAILED(hr)) { | |
501 NOTREACHED() << "Failed to set decoder's input type"; | |
502 return false; | |
503 } | |
504 return true; | |
505 } | |
506 | |
507 bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType( | |
508 const GUID& subtype) { | |
509 DWORD i = 0; | |
510 IMFMediaType* out_media_type = NULL; | |
apatrick_chromium
2011/12/13 02:00:13
nit: Why can't this be ScopedComPtr?
ananta
2011/12/13 02:51:18
Done.
| |
511 bool found = false; | |
512 while (SUCCEEDED(decoder_->GetOutputAvailableType(0, i, &out_media_type))) { | |
513 GUID out_subtype = {0}; | |
514 HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype); | |
515 if (FAILED(hr)) { | |
516 NOTREACHED() << "Failed to GetGUID() on GetOutputAvailableType() " | |
517 << i; | |
518 out_media_type->Release(); | |
519 continue; | |
520 } | |
521 if (out_subtype == subtype) { | |
522 hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags | |
523 hr = MFGetAttributeSize(out_media_type, MF_MT_FRAME_SIZE, | |
524 reinterpret_cast<UINT32*>(&surface_width_), | |
525 reinterpret_cast<UINT32*>(&surface_height_)); | |
526 if (FAILED(hr)) { | |
527 NOTREACHED() << "Failed to SetOutputType to |subtype| or obtain " | |
528 << "width/height " << std::hex << hr; | |
529 } | |
530 out_media_type->Release(); | |
531 return true; | |
532 } | |
533 i++; | |
534 out_media_type->Release(); | |
535 } | |
536 return false; | |
537 } | |
538 | |
539 bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg, | |
540 int32 param) { | |
541 HRESULT hr = decoder_->ProcessMessage(msg, param); | |
542 return SUCCEEDED(hr); | |
543 } | |
544 | |
545 // Gets the minimum buffer sizes for input and output samples. | |
546 // The MFT will not allocate buffer for neither input nor output, so we have | |
apatrick_chromium
2011/12/13 02:00:13
nit: double negative
ananta
2011/12/13 02:51:18
Done.
| |
547 // to do it ourselves and make sure they're the correct size. | |
548 // Exception is when dxva is enabled, the decoder will allocate output. | |
549 bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() { | |
550 HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_); | |
551 if (FAILED(hr)) { | |
552 LOG(ERROR) << "Failed to get input stream info"; | |
553 return false; | |
554 } | |
555 LOG(INFO) << "Input stream info: "; | |
apatrick_chromium
2011/12/13 02:00:13
would VLOG be more appropriate?
ananta
2011/12/13 02:51:18
Done.
| |
556 LOG(INFO) << "Max latency: " << input_stream_info_.hnsMaxLatency; | |
557 // There should be three flags, one for requiring a whole frame be in a | |
558 // single sample, one for requiring there be one buffer only in a single | |
559 // sample, and one that specifies a fixed sample size. (as in cbSize) | |
560 LOG(INFO) << "Flags: " | |
561 << std::hex << std::showbase << input_stream_info_.dwFlags; | |
562 CHECK_EQ(input_stream_info_.dwFlags, 0x7u); | |
563 LOG(INFO) << "Min buffer size: " << input_stream_info_.cbSize; | |
564 LOG(INFO) << "Max lookahead: " << input_stream_info_.cbMaxLookahead; | |
565 LOG(INFO) << "Alignment: " << input_stream_info_.cbAlignment; | |
566 | |
567 hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_); | |
568 if (FAILED(hr)) { | |
569 LOG(ERROR) << "Failed to get output stream info"; | |
570 return false; | |
571 } | |
572 LOG(INFO) << "Output stream info: "; | |
573 // The flags here should be the same and mean the same thing, except when | |
574 // DXVA is enabled, there is an extra 0x100 flag meaning decoder will | |
575 // allocate its own sample. | |
576 LOG(INFO) << "Flags: " | |
577 << std::hex << std::showbase << output_stream_info_.dwFlags; | |
578 CHECK_EQ(output_stream_info_.dwFlags, 0x107u); | |
579 LOG(INFO) << "Min buffer size: " << output_stream_info_.cbSize; | |
580 LOG(INFO) << "Alignment: " << output_stream_info_.cbAlignment; | |
581 return true; | |
582 } | |
583 | |
584 bool DXVAVideoDecodeAccelerator::DoDecode() { | |
585 if (state_ != kNormal && state_ != kEosDrain) { | |
586 NOTREACHED() << "DoDecode: not in normal or drain state"; | |
587 return false; | |
588 } | |
589 // scoped_refptr<VideoFrame> frame; | |
apatrick_chromium
2011/12/13 02:00:13
nit: is this old code that can be deleted?
ananta
2011/12/13 02:51:18
Done.
| |
590 base::win::ScopedComPtr<IMFSample> output_sample; | |
591 MFT_OUTPUT_DATA_BUFFER output_data_buffer; | |
592 DWORD status = 0; | |
593 | |
594 memset(&output_data_buffer, 0, sizeof(output_data_buffer)); | |
595 output_data_buffer.pSample = output_sample; | |
596 status = 0; | |
597 | |
598 HRESULT hr = decoder_->ProcessOutput(0, // No flags | |
599 1, // # of out streams to pull from | |
600 &output_data_buffer, | |
601 &status); | |
602 IMFCollection* events = output_data_buffer.pEvents; | |
603 if (events != NULL) { | |
604 LOG(INFO) << "Got events from ProcessOuput, but discarding"; | |
605 events->Release(); | |
606 } | |
607 if (FAILED(hr)) { | |
608 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { | |
609 hr = SetDecoderOutputMediaType(MFVideoFormat_NV12); | |
610 if (SUCCEEDED(hr)) { | |
611 return true; | |
612 } else { | |
613 NOTREACHED() << "Failed to set decoder output media type"; | |
614 return false; | |
615 } | |
616 } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { | |
617 // No more output from the decoder. Notify EOS and stop playback. | |
618 state_ = DXVAVideoDecodeAccelerator::kStopped; | |
619 return false; | |
620 } else { | |
621 NOTREACHED() << "Unhandled error in DoDecode()"; | |
622 state_ = DXVAVideoDecodeAccelerator::kStopped; | |
623 return false; | |
624 } | |
625 } | |
626 | |
627 #if !defined(NDEBUG) | |
628 DLOG(INFO) << "Number of input packets before successful decode: " | |
629 << inputs_before_decode_; | |
630 inputs_before_decode_ = 0; | |
631 uint32 end_decode = GetTickCount(); | |
632 DLOG(INFO) << "Total time for decode: " | |
633 << end_decode - decode_start_time_; | |
634 decode_start_time_ = 0; | |
635 #endif // !defined(NDEBUG) | |
636 if (!ProcessOutputSample(output_data_buffer.pSample)) { | |
637 state_ = DXVAVideoDecodeAccelerator::kStopped; | |
638 return false; | |
639 } | |
640 | |
641 input_buffer_frame_times_.clear(); | |
642 state_ = DXVAVideoDecodeAccelerator::kNormal; | |
643 return true; | |
644 } | |
645 | |
646 bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) { | |
647 if (!sample) { | |
648 NOTREACHED() << "ProcessOutput succeeded, but did not get a sample back"; | |
649 return false; | |
650 } | |
651 base::win::ScopedComPtr<IMFSample> output_sample; | |
652 output_sample.Attach(sample); | |
653 | |
654 if (!input_buffer_frame_times_.size()) { | |
655 DLOG(INFO) << "In input buffers left to process output"; | |
656 return false; | |
657 } | |
658 int32 input_buffer_id = | |
659 input_buffer_frame_times_[input_buffer_frame_times_.size() - 1]; | |
660 input_buffer_frame_times_.clear(); | |
661 | |
662 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | |
663 HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive()); | |
664 if (FAILED(hr)) { | |
665 NOTREACHED() << "Failed to get buffer from sample"; | |
666 return false; | |
667 } | |
668 base::win::ScopedComPtr<IDirect3DSurface9> surface; | |
669 hr = MFGetService(output_buffer, MR_BUFFER_SERVICE, | |
670 IID_PPV_ARGS(surface.Receive())); | |
671 if (FAILED(hr)) { | |
672 NOTREACHED() << "Failed to get surface from buffer"; | |
673 return false; | |
674 } | |
675 | |
676 D3DSURFACE_DESC surface_desc; | |
677 hr = surface->GetDesc(&surface_desc); | |
678 if (FAILED(hr)) { | |
679 NOTREACHED() << "Failed to get surface description"; | |
680 return false; | |
681 } | |
682 | |
683 #if !defined(NDEBUG) | |
684 DWORD start_surface_time = GetTickCount(); | |
685 #endif // !defined(NDEBUG) | |
686 | |
687 // TODO(ananta) | |
688 // The code below may not be necessary once we have an ANGLE extension which | |
689 // allows us to pass the Direct 3D surface directly for rendering. | |
690 | |
691 // The decoded bits in the source direct 3d surface are in the YUV | |
692 // format. Angle does not support that. As a workaround we create an | |
693 // offscreen surface in the RGB format and copy the source surface | |
694 // to this surface. | |
695 base::win::ScopedComPtr<IDirect3DSurface9> dest_surface; | |
696 hr = device_->CreateOffscreenPlainSurface(surface_desc.Width, | |
697 surface_desc.Height, | |
698 D3DFMT_A8R8G8B8, | |
699 D3DPOOL_DEFAULT, | |
700 dest_surface.Receive(), | |
701 NULL); | |
702 if (FAILED(hr)) { | |
703 NOTREACHED() << "Failed to create offscreen surface"; | |
704 return false; | |
705 } | |
706 | |
707 hr = D3DXLoadSurfaceFromSurface(dest_surface, NULL, NULL, surface, NULL, | |
708 NULL, 0, 0); | |
709 if (FAILED(hr)) { | |
710 NOTREACHED() << "Failed to copy source surface to dest."; | |
711 return false; | |
712 } | |
713 | |
714 #if !defined(NDEBUG) | |
715 DWORD end_surface_time = GetTickCount(); | |
716 DLOG(INFO) << "Time to create and copy new surface is " | |
717 << end_surface_time - start_surface_time; | |
718 #endif // !defined(NDEBUG) | |
719 | |
720 PendingSampleInfo sample_info; | |
721 | |
722 sample_info.input_buffer_id = input_buffer_id; | |
723 sample_info.surface = dest_surface; | |
724 pending_output_samples_.push_back(sample_info); | |
725 | |
726 // If we have available picture buffers to copy the output data then use the | |
727 // first one and then flag it as not being available for use. | |
728 if (available_pictures_.size()) { | |
729 ProcessPendingSamples(); | |
730 return true; | |
731 } | |
732 | |
733 if (pictures_requested_) { | |
734 DLOG(INFO) << "Waiting for picture slots from the client."; | |
735 return true; | |
736 } | |
737 // Go ahead and request picture buffers. | |
738 client_->ProvidePictureBuffers( | |
739 kNumPictureBuffers, gfx::Size(surface_desc.Width, surface_desc.Height)); | |
740 pictures_requested_ = true; | |
741 return true; | |
742 } | |
743 | |
744 bool DXVAVideoDecodeAccelerator::CopyOutputSampleDataToPictureBuffer( | |
745 IDirect3DSurface9* dest_surface, media::PictureBuffer picture_buffer, | |
746 int32 input_buffer_id) { | |
747 DCHECK(dest_surface); | |
748 | |
749 // Get the currently loaded bitmap from the DC. | |
750 HDC hdc = NULL; | |
751 | |
752 HRESULT hr = dest_surface->GetDC(&hdc); | |
753 if (FAILED(hr)) { | |
754 NOTREACHED() << "Failed to get HDC for dest offscreen surface"; | |
755 return false; | |
756 } | |
757 | |
758 HBITMAP bitmap = | |
759 reinterpret_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP)); | |
760 if (!bitmap) { | |
761 NOTREACHED() << "Failed to get bitmap from DC"; | |
762 return false; | |
763 } | |
764 // TODO(ananta) | |
765 // The code below may not be necessary once we have an ANGLE extension which | |
766 // allows us to pass the Direct 3D surface directly for rendering. | |
767 | |
768 // The Device dependent bitmap is upside down for OpenGL. We convert the | |
769 // bitmap to a DIB and render it on the texture instead. | |
770 BITMAP bitmap_basic_info = {0}; | |
771 GetObject(bitmap, sizeof(BITMAP), &bitmap_basic_info); | |
772 | |
773 BITMAPINFO bitmap_info = {0}; | |
774 bitmap_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER); | |
775 bitmap_info.bmiHeader.biWidth = bitmap_basic_info.bmWidth; | |
776 bitmap_info.bmiHeader.biHeight = bitmap_basic_info.bmHeight; | |
777 bitmap_info.bmiHeader.biPlanes = 1; | |
778 bitmap_info.bmiHeader.biBitCount = bitmap_basic_info.bmBitsPixel; | |
779 bitmap_info.bmiHeader.biCompression = BI_RGB; | |
780 bitmap_info.bmiHeader.biSizeImage = 0; | |
781 bitmap_info.bmiHeader.biClrUsed = 0; | |
782 | |
783 int ret = GetDIBits(hdc, bitmap, 0, 0, NULL, &bitmap_info, DIB_RGB_COLORS); | |
784 if (bitmap_info.bmiHeader.biSizeImage <= 0) { | |
785 NOTREACHED() << "Failed to read bitmap size"; | |
786 return false; | |
787 } | |
788 scoped_ptr<char> bits(new char[bitmap_info.bmiHeader.biSizeImage]); | |
789 ret = GetDIBits(hdc, bitmap, 0, bitmap_basic_info.bmHeight, bits.get(), | |
790 &bitmap_info, DIB_RGB_COLORS); | |
791 DCHECK(ret > 0); | |
792 | |
793 D3DSURFACE_DESC surface_desc; | |
794 hr = dest_surface->GetDesc(&surface_desc); | |
795 DCHECK(SUCCEEDED(hr)); | |
796 | |
797 glBindTexture(GL_TEXTURE_2D, picture_buffer.texture_id()); | |
798 glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT, surface_desc.Width, | |
799 surface_desc.Height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, | |
800 (GLvoid*) bits.get()); | |
801 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); | |
802 glBindTexture(GL_TEXTURE_2D, 0); | |
803 dest_surface->ReleaseDC(hdc); | |
804 | |
805 media::Picture output_picture(picture_buffer.id(), input_buffer_id); | |
806 client_->PictureReady(output_picture); | |
807 return true; | |
808 } | |
809 | |
810 void DXVAVideoDecodeAccelerator::ProcessPendingSamples() { | |
811 PendingOutputSamples::iterator sample_index = | |
812 pending_output_samples_.begin(); | |
813 HRESULT hr = E_FAIL; | |
814 | |
815 while (sample_index != pending_output_samples_.end()) { | |
816 const PendingSampleInfo& sample_info = *sample_index; | |
817 OutputBuffers::iterator index; | |
818 for (index = available_pictures_.begin(); | |
819 index != available_pictures_.end(); | |
820 ++index) { | |
821 if (index->second.available) { | |
822 CopyOutputSampleDataToPictureBuffer(sample_info.surface, | |
823 index->second.picture_buffer, | |
824 sample_info.input_buffer_id); | |
825 index->second.available = false; | |
826 sample_index = pending_output_samples_.erase(sample_index); | |
827 break; | |
828 } | |
829 } | |
830 if (index == available_pictures_.end()) { | |
831 DLOG(INFO) << "No available picture slots for output"; | |
832 break; | |
833 } | |
834 } | |
835 } | |
OLD | NEW |