OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/media/dxva_video_decode_accelerator.h" | |
6 | |
7 #include <mfapi.h> | |
8 #include <mferror.h> | |
9 #include <wmcodecdsp.h> | |
10 | |
11 #include "base/lazy_instance.h" | |
12 #include "base/logging.h" | |
13 #include "base/memory/scoped_handle.h" | |
14 #include "base/memory/scoped_ptr.h" | |
15 #include "base/shared_memory.h" | |
16 #include "base/win/scoped_com_initializer.h" | |
17 #include "content/common/gpu/media/gles2_texture_to_egl_image_translator.h" | |
18 #include "media/base/video_frame.h" | |
19 #include "media/video/video_decode_accelerator.h" | |
20 | |
21 base::LazyInstance<base::win::ScopedCOMInitializer> | |
apatrick_chromium
2011/11/14 20:55:07
This happens in gpu_main.cc so it might not be nec
ananta
2011/12/13 01:39:15
Removed.
| |
22 g_init_com(base::LINKER_INITIALIZED); | |
23 | |
24 namespace { | |
25 GUID ConvertVideoFrameFormatToGuid(media::VideoFrame::Format format) { | |
26 switch (format) { | |
27 case media::VideoFrame::NV12: | |
28 return MFVideoFormat_NV12; | |
29 case media::VideoFrame::YV12: | |
30 return MFVideoFormat_YV12; | |
31 default: | |
32 break; | |
33 } | |
34 NOTREACHED() << "Unsupported VideoFrame format"; | |
35 return GUID_NULL; | |
36 } | |
37 | |
38 IMFSample* CreateEmptySample() { | |
39 HRESULT hr = E_FAIL; | |
40 base::win::ScopedComPtr<IMFSample> sample; | |
41 hr = MFCreateSample(sample.Receive()); | |
42 if (FAILED(hr)) { | |
43 NOTREACHED() << "Unable to create an empty sample"; | |
44 return NULL; | |
45 } | |
46 return sample.Detach(); | |
47 } | |
48 | |
49 // Creates a Media Foundation sample with one buffer of length |buffer_length| | |
50 // on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0. | |
51 // If |align| is 0, then no alignment is specified. | |
52 IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) { | |
53 CHECK_GT(buffer_length, 0); | |
54 base::win::ScopedComPtr<IMFSample> sample; | |
55 sample.Attach(CreateEmptySample()); | |
56 if (!sample.get()) | |
57 return NULL; | |
58 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
59 HRESULT hr = E_FAIL; | |
60 if (align == 0) { | |
61 // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer | |
62 // with the align argument being 0. | |
63 hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive()); | |
64 } else { | |
65 hr = MFCreateAlignedMemoryBuffer(buffer_length, | |
66 align - 1, | |
67 buffer.Receive()); | |
68 } | |
69 if (FAILED(hr)) { | |
70 NOTREACHED() << "Unable to create an empty buffer"; | |
71 return NULL; | |
72 } | |
73 hr = sample->AddBuffer(buffer.get()); | |
74 if (FAILED(hr)) { | |
75 NOTREACHED() << "Failed to add empty buffer to sample"; | |
76 return NULL; | |
77 } | |
78 return sample.Detach(); | |
79 } | |
80 | |
81 // Creates a Media Foundation sample with one buffer containing a copy of the | |
82 // given Annex B stream data. | |
83 // If duration and sample time are not known, provide 0. | |
84 // |min_size| specifies the minimum size of the buffer (might be required by | |
85 // the decoder for input). The times here should be given in 100ns units. | |
86 // |alignment| specifies the buffer in the sample to be aligned. If no | |
87 // alignment is required, provide 0 or 1. | |
88 static IMFSample* CreateInputSample(const uint8* stream, int size, | |
89 int64 timestamp, int64 duration, | |
90 int min_size, int alignment) { | |
91 CHECK(stream); | |
92 CHECK_GT(size, 0); | |
93 base::win::ScopedComPtr<IMFSample> sample; | |
94 sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size), | |
95 alignment)); | |
96 if (!sample.get()) { | |
97 NOTREACHED() << "Failed to create empty buffer for input"; | |
98 return NULL; | |
99 } | |
100 HRESULT hr = E_FAIL; | |
101 if (duration > 0) { | |
102 hr = sample->SetSampleDuration(duration); | |
103 if (FAILED(hr)) { | |
104 NOTREACHED() << "Failed to set sample duration"; | |
105 return NULL; | |
106 } | |
107 } | |
108 if (timestamp > 0) { | |
109 hr = sample->SetSampleTime(timestamp); | |
110 if (FAILED(hr)) { | |
111 NOTREACHED() << "Failed to set sample time"; | |
112 return NULL; | |
113 } | |
114 } | |
115 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
116 hr = sample->GetBufferByIndex(0, buffer.Receive()); | |
117 if (FAILED(hr)) { | |
118 NOTREACHED() << "Failed to get buffer in sample"; | |
119 return NULL; | |
120 } | |
121 DWORD max_length = 0, current_length = 0; | |
122 uint8* destination = NULL; | |
123 hr = buffer->Lock(&destination, &max_length, ¤t_length); | |
124 if (FAILED(hr)) { | |
125 NOTREACHED() << "Failed to lock buffer"; | |
126 return NULL; | |
127 } | |
128 CHECK_EQ(current_length, 0u); | |
129 CHECK_GE(static_cast<int>(max_length), size); | |
130 memcpy(destination, stream, size); | |
131 CHECK(SUCCEEDED(buffer->Unlock())); | |
132 hr = buffer->SetCurrentLength(size); | |
133 if (FAILED(hr)) { | |
134 NOTREACHED() << "Failed to set current length to " << size; | |
135 return NULL; | |
136 } | |
137 LOG(INFO) << __FUNCTION__ << " wrote " << size << " bytes into input sample"; | |
138 return sample.Detach(); | |
139 } | |
140 | |
141 } // namespace | |
142 | |
143 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator( | |
144 media::VideoDecodeAccelerator::Client* client) | |
145 : client_(client), | |
146 message_loop_(MessageLoop::current()), | |
147 surface_width_(0), | |
148 surface_height_(0), | |
149 state_(kUninitialized), | |
150 input_buffers_at_component_(0), | |
151 input_stream_info_(), | |
152 output_stream_info_() { | |
153 // Initialize COM on this thread. | |
154 g_init_com.Get(); | |
155 } | |
156 | |
157 DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() { | |
158 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
159 client_ = NULL; | |
160 message_loop_ = NULL; | |
161 } | |
162 | |
163 bool DXVAVideoDecodeAccelerator::Initialize(Profile profile) { | |
164 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
165 DCHECK(false); | |
166 | |
167 if (state_ != kUninitialized) { | |
168 NOTREACHED() << "Initialize: invalid state: " | |
169 << state_; | |
170 return false; | |
171 } | |
172 | |
173 HRESULT hr = MFStartup(MF_VERSION, MFSTARTUP_FULL); | |
174 if (FAILED(hr)) { | |
175 NOTREACHED() << "MFStartup failed. Error:" | |
176 << std::hex << std::showbase << hr; | |
177 return false; | |
178 } | |
179 if (!CreateD3DDevManager()) | |
180 return false; | |
181 if (!InitDecoder()) | |
182 return false; | |
183 if (!GetStreamsInfoAndBufferReqs()) | |
184 return false; | |
185 if (SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING)) { | |
186 state_ = DXVAVideoDecodeAccelerator::kNormal; | |
187 client_->NotifyInitializeDone(); | |
188 return true; | |
189 } | |
190 return false; | |
191 } | |
192 | |
193 void DXVAVideoDecodeAccelerator::Decode( | |
194 const media::BitstreamBuffer& bitstream_buffer) { | |
195 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
196 if (state_ == DXVAVideoDecodeAccelerator::kUninitialized) { | |
197 NOTREACHED() << "ConsumeVideoSample: invalid state"; | |
198 return; | |
199 } | |
200 base::win::ScopedHandle source_process_handle( | |
201 ::OpenProcess(PROCESS_DUP_HANDLE, | |
apatrick_chromium
2011/11/14 20:55:07
I think the sandbox might prevent OpenProcess from
ananta
2011/12/13 01:39:15
Done.
| |
202 FALSE, | |
203 bitstream_buffer.source_process_id())); | |
204 if (!source_process_handle.IsValid()) { | |
205 NOTREACHED() << "Failed to open source process handle"; | |
206 return; | |
207 } | |
208 | |
209 HANDLE shared_memory_handle = NULL; | |
210 if (!::DuplicateHandle(source_process_handle, | |
211 bitstream_buffer.handle(), | |
212 ::GetCurrentProcess(), | |
213 &shared_memory_handle, | |
214 0, | |
215 FALSE, | |
216 DUPLICATE_SAME_ACCESS)) { | |
217 NOTREACHED() << "Failed to open duplicate shared mem handle"; | |
218 return; | |
219 } | |
220 | |
221 base::SharedMemory shm(shared_memory_handle, true); | |
222 if (!shm.Map(bitstream_buffer.size())) { | |
223 NOTREACHED() << "Failed in SharedMemory::Map()"; | |
224 return; | |
225 } | |
226 | |
227 base::win::ScopedComPtr<IMFSample> sample; | |
228 sample.Attach(CreateInputSample(reinterpret_cast<const uint8*>(shm.memory()), | |
229 bitstream_buffer.size(), | |
230 0, | |
231 0, | |
232 input_stream_info_.cbSize, | |
233 input_stream_info_.cbAlignment)); | |
234 if (!sample.get()) { | |
235 NOTREACHED() << "Failed to create an input sample"; | |
236 return; | |
237 } else { | |
238 if (FAILED(decoder_->ProcessInput(0, sample.get(), 0))) { | |
239 NOTREACHED() << "Failed to process input"; | |
240 return; | |
241 } | |
242 } | |
243 if (state_ != DXVAVideoDecodeAccelerator::kEosDrain) { | |
244 // End of stream, send drain messages. | |
245 if (!SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM) || | |
246 !SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN)) { | |
247 NOTREACHED() << "Failed to send EOS / drain messages to MFT"; | |
248 } else { | |
249 state_ = DXVAVideoDecodeAccelerator::kEosDrain; | |
250 } | |
251 } | |
252 client_->NotifyEndOfBitstreamBuffer(bitstream_buffer.id()); | |
253 DoDecode(); | |
254 } | |
255 | |
256 void DXVAVideoDecodeAccelerator::AssignPictureBuffers( | |
257 const std::vector<media::PictureBuffer>& buffers) { | |
258 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
259 if (pending_output_samples_.size() != buffers.size()) { | |
260 NOTREACHED() << "Mismatched picture buffers and pending samples."; | |
261 return; | |
262 } | |
263 // Copy the picture buffers provided by the client to the available list, | |
264 // and mark these buffers as available for use. | |
265 for (size_t buffer_index = 0; buffer_index < buffers.size(); ++buffer_index) { | |
266 DXVAPictureBuffer picture_buffer; | |
267 picture_buffer.available = true; | |
268 picture_buffer.picture_buffer = buffers[buffer_index]; | |
269 | |
270 DCHECK(available_pictures_.find(buffers[buffer_index].id()) == | |
271 available_pictures_.end()); | |
272 available_pictures_[buffers[buffer_index].id()] = picture_buffer; | |
273 } | |
274 int buffer_index = 0; | |
275 PendingOutputSamples::iterator sample_index = | |
276 pending_output_samples_.begin(); | |
277 HRESULT hr = E_FAIL; | |
278 | |
279 while (sample_index != pending_output_samples_.end()) { | |
280 const base::win::ScopedComPtr<IMFSample>& sample = *sample_index; | |
281 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | |
282 hr = sample->GetBufferByIndex(0, output_buffer.Receive()); | |
283 if (FAILED(hr)) { | |
284 NOTREACHED() << "Failed to get buffer from sample"; | |
285 continue; | |
286 } | |
287 base::win::ScopedComPtr<IDirect3DSurface9> surface; | |
288 hr = MFGetService(output_buffer, MR_BUFFER_SERVICE, | |
289 IID_PPV_ARGS(surface.Receive())); | |
290 if (FAILED(hr)) { | |
291 NOTREACHED() << "Failed to get surface from buffer"; | |
292 continue; | |
293 } | |
294 CopyOutputSampleDataToPictureBuffer(sample, | |
295 surface, | |
296 buffers[buffer_index]); | |
297 sample_index = pending_output_samples_.erase(sample_index); | |
298 } | |
299 } | |
300 | |
301 void DXVAVideoDecodeAccelerator::ReusePictureBuffer( | |
302 int32 picture_buffer_id) { | |
303 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
304 DCHECK(available_pictures_.find(picture_buffer_id) != | |
305 available_pictures_.end()); | |
306 } | |
307 | |
308 void DXVAVideoDecodeAccelerator::Flush() { | |
309 #if 0 | |
310 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
311 LOG(INFO) << "DXVAVideoDecodeAccelerator::Flush"; | |
312 if (state_ != kNormal) { | |
313 NOTREACHED() << "Flush: invalid state"; | |
314 return; | |
315 } | |
316 state_ = kFlushing; | |
317 if (!SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH)) { | |
318 LOG(WARNING) << "DXVAVideoDecodeAccelerator::Flush failed to send message"; | |
319 } | |
320 state_ = kNormal; | |
321 client_->NotifyFlushDone(); | |
322 #endif | |
323 } | |
324 | |
325 void DXVAVideoDecodeAccelerator::Reset() { | |
326 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
327 LOG(INFO) << "DXVAVideoDecodeAccelerator::Reset"; | |
328 if (state_ != kNormal) { | |
329 NOTREACHED() << "Reset: invalid state"; | |
330 return; | |
331 } | |
332 | |
333 state_ = DXVAVideoDecodeAccelerator::kResetting; | |
334 if (!SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH)) { | |
335 LOG(WARNING) << "DXVAVideoDecodeAccelerator::Flush failed to send message"; | |
336 } | |
337 state_ = DXVAVideoDecodeAccelerator::kNormal; | |
338 client_->NotifyResetDone(); | |
339 } | |
340 | |
341 void DXVAVideoDecodeAccelerator::Destroy() { | |
342 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
343 OutputBuffers::iterator index; | |
344 for (index = available_pictures_.begin(); index != available_pictures_.end(); | |
345 ++index) { | |
346 client_->DismissPictureBuffer(index->second.picture_buffer.id()); | |
347 } | |
348 available_pictures_.clear(); | |
349 pending_output_samples_.clear(); | |
350 } | |
351 | |
352 void DXVAVideoDecodeAccelerator::SetEglState(EGLDisplay egl_display, | |
353 EGLContext egl_context) { | |
354 DCHECK_EQ(message_loop_, MessageLoop::current()); | |
355 egl_display_ = egl_display; | |
356 egl_context_ = egl_context; | |
357 } | |
358 | |
359 bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() { | |
360 d3d9_.Attach(Direct3DCreate9(D3D_SDK_VERSION)); | |
apatrick_chromium
2011/11/14 20:55:07
I think it will be easier to synchronize this code
| |
361 if (d3d9_.get() == NULL) { | |
362 NOTREACHED() << "Failed to create D3D9"; | |
363 return false; | |
364 } | |
365 | |
366 D3DPRESENT_PARAMETERS present_params = {0}; | |
367 present_params.BackBufferWidth = 0; | |
368 present_params.BackBufferHeight = 0; | |
369 present_params.BackBufferFormat = D3DFMT_UNKNOWN; | |
370 present_params.BackBufferCount = 1; | |
371 present_params.SwapEffect = D3DSWAPEFFECT_DISCARD; | |
372 present_params.hDeviceWindow = GetShellWindow(); | |
373 present_params.Windowed = TRUE; | |
374 present_params.Flags = D3DPRESENTFLAG_VIDEO; | |
375 present_params.FullScreen_RefreshRateInHz = 0; | |
376 present_params.PresentationInterval = 0; | |
377 | |
378 HRESULT hr = d3d9_->CreateDevice(D3DADAPTER_DEFAULT, | |
apatrick_chromium
2011/11/16 22:57:17
Daniel, the DXVA video decoder needs a D3D device.
| |
379 D3DDEVTYPE_HAL, | |
380 GetShellWindow(), | |
381 (D3DCREATE_HARDWARE_VERTEXPROCESSING | | |
382 D3DCREATE_MULTITHREADED), | |
383 &present_params, | |
384 device_.Receive()); | |
385 if (FAILED(hr)) { | |
386 NOTREACHED() << "Failed to create D3D Device"; | |
387 return false; | |
388 } | |
389 | |
390 UINT dev_manager_reset_token = 0; | |
391 hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token, | |
392 device_manager_.Receive()); | |
393 if (FAILED(hr)) { | |
394 NOTREACHED() << "Couldn't create D3D Device manager"; | |
395 return false; | |
396 } | |
397 | |
398 hr = device_manager_->ResetDevice(device_.get(), | |
399 dev_manager_reset_token); | |
400 if (FAILED(hr)) { | |
401 NOTREACHED() << "Failed to set device to device manager"; | |
402 return false; | |
403 } | |
404 return true; | |
405 } | |
406 | |
407 bool DXVAVideoDecodeAccelerator::InitDecoder() { | |
408 HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT), | |
apatrick_chromium
2011/11/14 20:55:07
Does this work when the sandbox is enabled?
ananta
2011/12/13 01:39:15
Yes.
| |
409 NULL, | |
410 CLSCTX_INPROC_SERVER, | |
411 __uuidof(IMFTransform), | |
412 reinterpret_cast<void**>(decoder_.Receive())); | |
413 if (FAILED(hr) || !decoder_.get()) { | |
414 NOTREACHED() << "CoCreateInstance failed " | |
415 << std::hex << std::showbase << hr; | |
416 return false; | |
417 } | |
418 if (!CheckDecoderDxvaSupport()) | |
419 return false; | |
420 hr = decoder_->ProcessMessage( | |
421 MFT_MESSAGE_SET_D3D_MANAGER, | |
422 reinterpret_cast<ULONG_PTR>(device_manager_.get())); | |
apatrick_chromium
2011/11/16 22:57:17
DXVA also needs a pointer to the Direct3DDeviceMan
| |
423 if (FAILED(hr)) { | |
424 NOTREACHED() << "Failed to pass D3D9 device to decoder " | |
425 << std::hex << hr; | |
426 return false; | |
427 } | |
428 return SetDecoderMediaTypes(); | |
429 } | |
430 | |
431 bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() { | |
432 base::win::ScopedComPtr<IMFAttributes> attributes; | |
433 HRESULT hr = decoder_->GetAttributes(attributes.Receive()); | |
434 if (FAILED(hr)) { | |
435 NOTREACHED() << "Unlock: Failed to get attributes, hr = " | |
436 << std::hex << std::showbase << hr; | |
437 return false; | |
438 } | |
439 UINT32 dxva = 0; | |
440 hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva); | |
441 if (FAILED(hr) || !dxva) { | |
442 NOTREACHED() << "Failed to get DXVA attr. Error:" | |
443 << std::hex << std::showbase << hr | |
444 << " .This might not be the right decoder."; | |
445 return false; | |
446 } | |
447 return true; | |
448 } | |
449 | |
450 bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() { | |
451 if (!SetDecoderInputMediaType()) | |
452 return false; | |
453 return SetDecoderOutputMediaType(ConvertVideoFrameFormatToGuid( | |
454 media::VideoFrame::NV12)); | |
455 } | |
456 | |
457 bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() { | |
458 base::win::ScopedComPtr<IMFMediaType> media_type; | |
459 HRESULT hr = MFCreateMediaType(media_type.Receive()); | |
460 if (FAILED(hr)) { | |
461 NOTREACHED() << "Failed to create empty media type object"; | |
462 return false; | |
463 } | |
464 | |
465 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); | |
466 if (FAILED(hr)) { | |
467 NOTREACHED() << "SetGUID for major type failed"; | |
468 return false; | |
469 } | |
470 | |
471 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264); | |
472 if (FAILED(hr)) { | |
473 NOTREACHED() << "SetGUID for subtype failed"; | |
474 return false; | |
475 } | |
476 | |
477 hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags | |
478 if (FAILED(hr)) { | |
479 NOTREACHED() << "Failed to set decoder's input type"; | |
480 return false; | |
481 } | |
482 return true; | |
483 } | |
484 | |
485 bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType( | |
486 const GUID& subtype) { | |
487 DWORD i = 0; | |
488 IMFMediaType* out_media_type = NULL; | |
489 bool found = false; | |
490 while (SUCCEEDED(decoder_->GetOutputAvailableType(0, i, &out_media_type))) { | |
491 GUID out_subtype = {0}; | |
492 HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype); | |
493 if (FAILED(hr)) { | |
494 NOTREACHED() << "Failed to GetGUID() on GetOutputAvailableType() " | |
495 << i; | |
496 out_media_type->Release(); | |
497 continue; | |
498 } | |
499 if (out_subtype == subtype) { | |
500 hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags | |
501 hr = MFGetAttributeSize(out_media_type, MF_MT_FRAME_SIZE, | |
502 reinterpret_cast<UINT32*>(&surface_width_), | |
503 reinterpret_cast<UINT32*>(&surface_height_)); | |
504 if (FAILED(hr)) { | |
505 NOTREACHED() << "Failed to SetOutputType to |subtype| or obtain " | |
506 << "width/height " << std::hex << hr; | |
507 } else { | |
508 out_media_type->Release(); | |
509 return true; | |
510 } | |
511 } | |
512 i++; | |
513 out_media_type->Release(); | |
514 } | |
515 return false; | |
516 } | |
517 | |
518 bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg) { | |
519 HRESULT hr = decoder_->ProcessMessage(msg, NULL); | |
520 return SUCCEEDED(hr); | |
521 } | |
522 | |
523 // Gets the minimum buffer sizes for input and output samples. | |
524 // The MFT will not allocate buffer for neither input nor output, so we have | |
525 // to do it ourselves and make sure they're the correct size. | |
526 // Exception is when dxva is enabled, the decoder will allocate output. | |
527 bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() { | |
528 HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_); | |
529 if (FAILED(hr)) { | |
530 LOG(ERROR) << "Failed to get input stream info"; | |
531 return false; | |
532 } | |
533 LOG(INFO) << "Input stream info: "; | |
534 LOG(INFO) << "Max latency: " << input_stream_info_.hnsMaxLatency; | |
535 // There should be three flags, one for requiring a whole frame be in a | |
536 // single sample, one for requiring there be one buffer only in a single | |
537 // sample, and one that specifies a fixed sample size. (as in cbSize) | |
538 LOG(INFO) << "Flags: " | |
539 << std::hex << std::showbase << input_stream_info_.dwFlags; | |
540 CHECK_EQ(input_stream_info_.dwFlags, 0x7u); | |
541 LOG(INFO) << "Min buffer size: " << input_stream_info_.cbSize; | |
542 LOG(INFO) << "Max lookahead: " << input_stream_info_.cbMaxLookahead; | |
543 LOG(INFO) << "Alignment: " << input_stream_info_.cbAlignment; | |
544 | |
545 hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_); | |
546 if (FAILED(hr)) { | |
547 LOG(ERROR) << "Failed to get output stream info"; | |
548 return false; | |
549 } | |
550 LOG(INFO) << "Output stream info: "; | |
551 // The flags here should be the same and mean the same thing, except when | |
552 // DXVA is enabled, there is an extra 0x100 flag meaning decoder will | |
553 // allocate its own sample. | |
554 LOG(INFO) << "Flags: " | |
555 << std::hex << std::showbase << output_stream_info_.dwFlags; | |
556 CHECK_EQ(output_stream_info_.dwFlags, 0x107u); | |
557 LOG(INFO) << "Min buffer size: " << output_stream_info_.cbSize; | |
558 LOG(INFO) << "Alignment: " << output_stream_info_.cbAlignment; | |
559 return true; | |
560 } | |
561 | |
562 bool DXVAVideoDecodeAccelerator::DoDecode() { | |
563 if (state_ != kNormal && state_ != kEosDrain) { | |
564 NOTREACHED() << "DoDecode: not in normal or drain state"; | |
565 return false; | |
566 } | |
567 // scoped_refptr<VideoFrame> frame; | |
568 base::win::ScopedComPtr<IMFSample> output_sample; | |
569 | |
570 | |
571 MFT_OUTPUT_DATA_BUFFER output_data_buffer; | |
572 DWORD status = 0; | |
573 | |
574 memset(&output_data_buffer, 0, sizeof(output_data_buffer)); | |
575 output_data_buffer.pSample = output_sample; | |
576 status = 0; | |
577 | |
578 HRESULT hr = decoder_->ProcessOutput(0, // No flags | |
579 1, // # of out streams to pull from | |
580 &output_data_buffer, | |
581 &status); | |
582 IMFCollection* events = output_data_buffer.pEvents; | |
583 if (events != NULL) { | |
584 LOG(INFO) << "Got events from ProcessOuput, but discarding"; | |
585 events->Release(); | |
586 } | |
587 | |
588 if (FAILED(hr)) { | |
589 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { | |
590 hr = SetDecoderOutputMediaType(ConvertVideoFrameFormatToGuid( | |
591 media::VideoFrame::NV12)); | |
592 if (SUCCEEDED(hr)) { | |
593 return true; | |
594 } else { | |
595 NOTREACHED() << "Failed to set decoder output media type"; | |
596 return false; | |
597 } | |
598 } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { | |
599 if (state_ == DXVAVideoDecodeAccelerator::kEosDrain) { | |
600 // No more output from the decoder. Notify EOS and stop playback. | |
601 // scoped_refptr<VideoFrame> frame; | |
602 // VideoFrame::CreateEmptyFrame(&frame); | |
603 state_ = DXVAVideoDecodeAccelerator::kStopped; | |
604 return false; | |
605 } | |
606 return true; | |
607 } else { | |
608 NOTREACHED() << "Unhandled error in DoDecode()"; | |
609 state_ = DXVAVideoDecodeAccelerator::kStopped; | |
610 return false; | |
611 } | |
612 } | |
613 if (!ProcessOutputSample(output_data_buffer.pSample)) { | |
614 NOTREACHED() << "Failed to process output sample"; | |
615 return false; | |
616 } | |
617 | |
618 #if 0 | |
619 // No distinction between the 3 planes - all 3 point to the handle of | |
620 // the texture. (There are actually only 2 planes since the output | |
621 // D3D surface is in NV12 format.) | |
622 VideoFrame::D3dTexture textures[VideoFrame::kMaxPlanes] = { surface.get(), | |
623 surface.get(), | |
624 surface.get() }; | |
625 VideoFrame::CreateFrameD3dTexture(info_.stream_info.surface_format, | |
626 info_.stream_info.surface_width, | |
627 info_.stream_info.surface_height, | |
628 textures, | |
629 TimeDelta::FromMicroseconds(timestamp), | |
630 TimeDelta::FromMicroseconds(duration), | |
631 &frame); | |
632 if (!frame.get()) { | |
633 NOTREACHED() << "Failed to allocate video frame for d3d texture"; | |
634 return true; | |
635 } | |
636 #endif | |
637 return true; | |
638 } | |
639 | |
640 bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) { | |
641 if (!sample) { | |
642 NOTREACHED() << "ProcessOutput succeeded, but did not get a sample back"; | |
643 return false; | |
644 } | |
645 base::win::ScopedComPtr<IMFSample> output_sample(sample); | |
646 | |
647 int64 timestamp = 0, duration = 0; | |
648 if (FAILED(output_sample->GetSampleTime(×tamp)) || | |
649 FAILED(output_sample->GetSampleDuration(&duration))) { | |
650 NOTREACHED() << "Failed to get timestamp/duration from output"; | |
651 } | |
652 | |
653 // Sanity checks for checking if there is really something in the sample. | |
654 DWORD buf_count = 0; | |
655 HRESULT hr = output_sample->GetBufferCount(&buf_count); | |
656 if (FAILED(hr) || buf_count != 1) { | |
657 NOTREACHED() << "Failed to get buffer count, or buffer count mismatch"; | |
658 return false; | |
659 } | |
660 | |
661 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | |
662 hr = output_sample->GetBufferByIndex(0, output_buffer.Receive()); | |
663 if (FAILED(hr)) { | |
664 NOTREACHED() << "Failed to get buffer from sample"; | |
665 return false; | |
666 } | |
667 | |
668 base::win::ScopedComPtr<IDirect3DSurface9> surface; | |
669 hr = MFGetService(output_buffer, MR_BUFFER_SERVICE, | |
670 IID_PPV_ARGS(surface.Receive())); | |
671 if (FAILED(hr)) { | |
672 NOTREACHED() << "Failed to get surface from buffer"; | |
673 return false; | |
674 } | |
675 | |
676 // If we have available picture buffers to copy the output data then use the | |
677 // first one and then flag it as not being available for use. | |
678 OutputBuffers::iterator index; | |
679 for (index = available_pictures_.begin(); | |
680 index != available_pictures_.end(); | |
681 ++index) { | |
682 if (index->second.available) { | |
683 CopyOutputSampleDataToPictureBuffer(output_sample, surface.get(), | |
684 index->second.picture_buffer); | |
685 index->second.available = false; | |
686 return true; | |
687 } | |
688 } | |
689 D3DSURFACE_DESC surface_desc; | |
690 hr = surface->GetDesc(&surface_desc); | |
691 if (FAILED(hr)) { | |
692 NOTREACHED() << "Failed to get surface description"; | |
693 return false; | |
694 } | |
695 client_->ProvidePictureBuffers( | |
696 1, gfx::Size(surface_desc.Width, surface_desc.Height)); | |
697 pending_output_samples_.push_back(output_sample); | |
698 return true; | |
699 } | |
700 | |
701 bool DXVAVideoDecodeAccelerator::CopyOutputSampleDataToPictureBuffer( | |
702 IMFSample* sample, IDirect3DSurface9* surface, | |
703 media::PictureBuffer picture_buffer) { | |
704 DCHECK(sample); | |
705 DCHECK(surface); | |
706 | |
707 static Gles2TextureToEglImageTranslator texture2eglImage_translator; | |
708 EGLImageKHR egl_image = texture2eglImage_translator.TranslateToEglImage( | |
apatrick_chromium
2011/11/16 22:57:17
... and here we have a D3D surface (not a texture)
ananta
2011/12/13 01:39:15
Removed this. Was an incorrect cut paste.
| |
709 egl_display_, egl_context_, picture_buffer.texture_id()); | |
710 media::Picture output_picture(picture_buffer.id(), 0); | |
711 client_->PictureReady(output_picture); | |
712 return true; | |
713 } | |
714 | |
OLD | NEW |