OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/media/dxva_video_decode_accelerator.h" | |
6 | |
7 #if !defined(OS_WIN) | |
8 #error This file should only be built on Windows. | |
9 #endif // !defined(OS_WIN) | |
10 | |
11 #include <ks.h> | |
12 #include <codecapi.h> | |
13 #include <d3dx9tex.h> | |
14 #include <mfapi.h> | |
15 #include <mferror.h> | |
16 #include <wmcodecdsp.h> | |
17 | |
18 #include "base/bind.h" | |
19 #include "base/callback.h" | |
20 #include "base/debug/trace_event.h" | |
21 #include "base/logging.h" | |
22 #include "base/memory/scoped_handle.h" | |
23 #include "base/memory/scoped_ptr.h" | |
24 #include "base/message_loop.h" | |
25 #include "base/process_util.h" | |
26 #include "base/shared_memory.h" | |
27 #include "media/video/video_decode_accelerator.h" | |
28 #include "third_party/angle/include/GLES2/gl2.h" | |
29 #include "third_party/angle/include/GLES2/gl2ext.h" | |
30 | |
31 // We only request 5 picture buffers from the client which are used to hold the | |
32 // decoded samples. These buffers are then reused when the client tells us that | |
33 // it is done with the buffer. | |
34 static const int kNumPictureBuffers = 5; | |
35 | |
36 #define RETURN_ON_FAILURE(result, log, ret) \ | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
A single failure in CreateEmptySample will result
ananta
2011/12/20 02:27:22
Replaced the logging with a CHECK condition instea
| |
37 do { \ | |
38 if (!(result)) { \ | |
39 DVLOG(1) << log; \ | |
40 return ret; \ | |
41 } \ | |
42 } while (0) | |
43 | |
44 #define RETURN_ON_HR_FAILURE(result, log, ret) \ | |
45 RETURN_ON_FAILURE(SUCCEEDED(result), log, ret); | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
If you replaced log in this call with:
log << ", H
ananta
2011/12/20 02:27:22
Done.
| |
46 | |
47 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \ | |
48 do { \ | |
49 if (!(result)) { \ | |
50 DVLOG(1) << log; \ | |
51 StopOnError(error_code); \ | |
52 return ret; \ | |
53 } \ | |
54 } while (0) | |
55 | |
56 #define RETURN_AND_NOTIFY_ON_HR_FAILURE(result, log, error_code, ret) \ | |
57 RETURN_AND_NOTIFY_ON_FAILURE(SUCCEEDED(result), log, error_code, ret); | |
58 | |
59 static IMFSample* CreateEmptySample() { | |
60 base::win::ScopedComPtr<IMFSample> sample; | |
61 HRESULT hr = MFCreateSample(sample.Receive()); | |
62 RETURN_ON_HR_FAILURE(hr, "Unable to create an empty sample", NULL); | |
63 return sample.Detach(); | |
64 } | |
65 | |
66 // Creates a Media Foundation sample with one buffer of length |buffer_length| | |
67 // on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0. | |
68 static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) { | |
69 CHECK_GT(buffer_length, 0); | |
70 | |
71 base::win::ScopedComPtr<IMFSample> sample; | |
72 sample.Attach(CreateEmptySample()); | |
73 RETURN_ON_FAILURE(sample.get(), "Failed to create sample", NULL); | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
Here and elsewhere, I don't think you need the .ge
ananta
2011/12/20 02:27:22
Done.
| |
74 | |
75 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
76 HRESULT hr = E_FAIL; | |
77 if (align == 0) { | |
78 // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer | |
79 // with the align argument being 0. | |
80 hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive()); | |
81 } else { | |
82 hr = MFCreateAlignedMemoryBuffer(buffer_length, | |
83 align - 1, | |
84 buffer.Receive()); | |
85 } | |
86 RETURN_ON_HR_FAILURE(hr, "Unable to create an empty buffer", NULL); | |
87 | |
88 hr = sample->AddBuffer(buffer.get()); | |
89 RETURN_ON_HR_FAILURE(hr, "Failed to add empty buffer to sample", NULL); | |
90 | |
91 return sample.Detach(); | |
92 } | |
93 | |
94 // Creates a Media Foundation sample with one buffer containing a copy of the | |
95 // given Annex B stream data. | |
96 // If duration and sample time are not known, provide 0. | |
97 // |min_size| specifies the minimum size of the buffer (might be required by | |
98 // the decoder for input). If no alignment is required, provide 0. | |
99 static IMFSample* CreateInputSample(const uint8* stream, int size, | |
100 int min_size, int alignment) { | |
101 CHECK(stream); | |
102 CHECK_GT(size, 0); | |
103 base::win::ScopedComPtr<IMFSample> sample; | |
104 sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size), | |
105 alignment)); | |
106 RETURN_ON_FAILURE(sample.get(), "Failed to create empty buffer for input", | |
107 NULL); | |
108 | |
109 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
110 HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive()); | |
111 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer in sample", NULL); | |
112 | |
113 DWORD max_length = 0; | |
114 DWORD current_length = 0; | |
115 uint8* destination = NULL; | |
116 hr = buffer->Lock(&destination, &max_length, ¤t_length); | |
117 RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL); | |
118 | |
119 CHECK_EQ(current_length, 0u); | |
120 CHECK_GE(static_cast<int>(max_length), size); | |
121 memcpy(destination, stream, size); | |
122 CHECK(SUCCEEDED(buffer->Unlock())); | |
123 | |
124 hr = buffer->SetCurrentLength(size); | |
125 RETURN_ON_HR_FAILURE(hr, "Failed to set current length", NULL); | |
126 | |
127 hr = sample->SetUINT32(MFSampleExtension_CleanPoint, TRUE); | |
128 RETURN_ON_HR_FAILURE(hr, "Failed to mark sample as key sample", NULL); | |
129 | |
130 return sample.Detach(); | |
131 } | |
132 | |
133 static IMFSample* CreateSampleFromInputBuffer( | |
134 const media::BitstreamBuffer& bitstream_buffer, | |
135 base::ProcessHandle renderer_process, | |
136 DWORD stream_size, | |
137 DWORD alignment) { | |
138 HANDLE shared_memory_handle = NULL; | |
139 RETURN_ON_FAILURE(::DuplicateHandle(renderer_process, | |
140 bitstream_buffer.handle(), | |
141 base::GetCurrentProcessHandle(), | |
142 &shared_memory_handle, | |
143 0, | |
144 FALSE, | |
145 DUPLICATE_SAME_ACCESS), | |
146 "Failed to open duplicate shared mem handle", NULL); | |
147 | |
148 base::SharedMemory shm(shared_memory_handle, true); | |
149 RETURN_ON_FAILURE(shm.Map(bitstream_buffer.size()), | |
150 "Failed in SharedMemory::Map", NULL); | |
151 | |
152 return CreateInputSample(reinterpret_cast<const uint8*>(shm.memory()), | |
153 bitstream_buffer.size(), | |
154 stream_size, | |
155 alignment); | |
156 } | |
157 | |
158 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer( | |
159 bool picture_available, const media::PictureBuffer& buffer) | |
160 : available(picture_available), | |
161 picture_buffer(buffer) { | |
162 } | |
163 | |
164 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo( | |
165 int32 buffer_id, IDirect3DSurface9* surface) | |
166 : input_buffer_id(buffer_id), | |
167 dest_surface(surface) { | |
168 } | |
169 | |
170 DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {} | |
171 | |
172 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator( | |
173 media::VideoDecodeAccelerator::Client* client, | |
174 base::ProcessHandle renderer_process) | |
175 : client_(client), | |
176 state_(kUninitialized), | |
177 pictures_requested_(false), | |
178 renderer_process_(renderer_process), | |
179 dev_manager_reset_token_(0), | |
180 last_input_buffer_id_(-1), | |
181 inputs_before_decode_(0) { | |
182 } | |
183 | |
184 DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() { | |
185 client_ = NULL; | |
186 } | |
187 | |
188 bool DXVAVideoDecodeAccelerator::Initialize(Profile) { | |
189 DCHECK(CalledOnValidThread()); | |
190 | |
191 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kUninitialized), | |
192 "Initialize: invalid state: " << state_, ILLEGAL_STATE, false); | |
193 | |
194 HRESULT hr = MFStartup(MF_VERSION, MFSTARTUP_FULL); | |
195 RETURN_AND_NOTIFY_ON_HR_FAILURE( | |
196 hr, "MFStartup failed. Error:" << std::hex << std::showbase, | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
I think you forgot the final hr on this line (but
ananta
2011/12/20 02:27:22
Done.
| |
197 PLATFORM_FAILURE, false); | |
198 | |
199 RETURN_AND_NOTIFY_ON_FAILURE(CreateD3DDevManager(), | |
200 "Failed to create device manager", PLATFORM_FAILURE, false); | |
201 | |
202 RETURN_AND_NOTIFY_ON_FAILURE(InitDecoder(), | |
203 "Failed to initialize decoder", PLATFORM_FAILURE, false); | |
204 | |
205 RETURN_AND_NOTIFY_ON_FAILURE(GetStreamsInfoAndBufferReqs(), | |
206 "Failed to get input/output stream info.", PLATFORM_FAILURE, false); | |
207 | |
208 RETURN_AND_NOTIFY_ON_FAILURE( | |
209 SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0), | |
210 "Failed to start decoder", PLATFORM_FAILURE, false); | |
211 | |
212 state_ = kNormal; | |
213 MessageLoop::current()->PostTask(FROM_HERE, | |
214 base::Bind(&DXVAVideoDecodeAccelerator::NotifyInitializeDone, this)); | |
215 return true; | |
216 } | |
217 | |
218 void DXVAVideoDecodeAccelerator::Decode( | |
219 const media::BitstreamBuffer& bitstream_buffer) { | |
220 DCHECK(CalledOnValidThread()); | |
221 | |
222 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped), | |
223 "Invalid state: " << state_, ILLEGAL_STATE,); | |
224 | |
225 base::win::ScopedComPtr<IMFSample> sample; | |
226 sample.Attach(CreateSampleFromInputBuffer(bitstream_buffer, | |
227 renderer_process_, | |
228 input_stream_info_.cbSize, | |
229 input_stream_info_.cbAlignment)); | |
230 RETURN_AND_NOTIFY_ON_FAILURE(sample.get(), "Failed to create input sample", | |
231 PLATFORM_FAILURE,); | |
232 if (!inputs_before_decode_) { | |
233 TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.Decoding", this, ""); | |
234 } | |
235 inputs_before_decode_++; | |
236 | |
237 RETURN_AND_NOTIFY_ON_FAILURE( | |
238 SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0), | |
239 "Failed to create input sample", PLATFORM_FAILURE,); | |
240 | |
241 RETURN_AND_NOTIFY_ON_HR_FAILURE(decoder_->ProcessInput(0, sample.get(), 0), | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
Macros use text-substitution at compile time, so p
ananta
2011/12/20 02:27:22
Done.
| |
242 "Failed to process input sample", PLATFORM_FAILURE,); | |
243 | |
244 if (state_ != DXVAVideoDecodeAccelerator::kEosDrain) { | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
You have a check at the top of this function that
ananta
2011/12/20 02:27:22
Done.
| |
245 RETURN_AND_NOTIFY_ON_FAILURE( | |
246 SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0), | |
247 "Failed to send eos message to MFT", PLATFORM_FAILURE,); | |
248 state_ = kEosDrain; | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
IIUC DoDecode() always sets state_ to normal or st
ananta
2011/12/20 02:27:22
DoDecode also gets called from Flush which sets th
| |
249 } | |
250 | |
251 last_input_buffer_id_ = bitstream_buffer.id(); | |
252 | |
253 DoDecode(); | |
254 | |
255 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kStopped || state_ == kNormal), | |
256 "Failed to process output. Unexpected decoder state: " << state_, | |
257 ILLEGAL_STATE,); | |
258 | |
259 // The Microsoft Media foundation decoder internally buffers up to 30 frames | |
260 // before returning a decoded frame. We need to inform the client that this | |
261 // input buffer is processed as it may stop sending us further input. | |
262 // Note: This may break clients which expect every input buffer to be | |
263 // associated with a decoded output buffer. | |
264 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
265 &DXVAVideoDecodeAccelerator::NotifyInputBufferRead, this, | |
266 bitstream_buffer.id())); | |
267 } | |
268 | |
269 void DXVAVideoDecodeAccelerator::AssignPictureBuffers( | |
270 const std::vector<media::PictureBuffer>& buffers) { | |
271 DCHECK(CalledOnValidThread()); | |
272 // Copy the picture buffers provided by the client to the available list, | |
273 // and mark these buffers as available for use. | |
274 for (size_t buffer_index = 0; buffer_index < buffers.size(); | |
275 ++buffer_index) { | |
276 bool inserted = output_picture_buffers_.insert(std::make_pair( | |
277 buffers[buffer_index].id(), | |
278 DXVAPictureBuffer(true, buffers[buffer_index]))).second; | |
279 DCHECK(inserted); | |
280 } | |
281 ProcessPendingSamples(); | |
282 } | |
283 | |
284 void DXVAVideoDecodeAccelerator::ReusePictureBuffer( | |
285 int32 picture_buffer_id) { | |
286 DCHECK(CalledOnValidThread()); | |
287 | |
288 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id); | |
289 RETURN_AND_NOTIFY_ON_FAILURE(it != output_picture_buffers_.end(), | |
290 "Invalid picture id: " << picture_buffer_id, PLATFORM_FAILURE,); | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
s/PLATFORM_FAILURE/INVALID_ARGUMENT/
ananta
2011/12/20 02:27:22
Done.
| |
291 | |
292 it->second.available = true; | |
293 ProcessPendingSamples(); | |
294 } | |
295 | |
296 void DXVAVideoDecodeAccelerator::Flush() { | |
297 DCHECK(CalledOnValidThread()); | |
298 | |
299 DVLOG(1) << "DXVAVideoDecodeAccelerator::Flush"; | |
300 | |
301 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ != kStopped), | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
s/!=/==/ ???
ananta
2011/12/20 02:27:22
Thanks for catching this :(. Fixed.
| |
302 "Unexpected decoder state: " << state_, ILLEGAL_STATE,); | |
303 | |
304 state_ = kEosDrain; | |
305 | |
306 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0), | |
307 "Failed to send drain message", PLATFORM_FAILURE,); | |
308 | |
309 // As per MSDN docs after the client sends this message, it calls | |
310 // IMFTransform::ProcessOutput in a loop, until ProcessOutput returns the | |
311 // error code MF_E_TRANSFORM_NEED_MORE_INPUT. The DoDecode function sets | |
312 // the state to kStopped when the decoder returns | |
313 // MF_E_TRANSFORM_NEED_MORE_INPUT. | |
314 while (state_ != kStopped) { | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
I don't understand why Decode() is content to call
ananta
2011/12/20 02:27:22
As per our IM discussion, the MFT decoder can buff
| |
315 DoDecode(); | |
316 } | |
317 | |
318 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
319 &DXVAVideoDecodeAccelerator::NotifyFlushDone, this)); | |
320 | |
321 state_ = kNormal; | |
322 } | |
323 | |
324 void DXVAVideoDecodeAccelerator::Reset() { | |
325 DCHECK(CalledOnValidThread()); | |
326 | |
327 DVLOG(1) << "DXVAVideoDecodeAccelerator::Reset"; | |
328 | |
329 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped), | |
330 "Reset: invalid state: " << state_, ILLEGAL_STATE,); | |
331 | |
332 state_ = kResetting; | |
333 | |
334 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0), | |
335 "Reset: Failed to send message.", PLATFORM_FAILURE,); | |
336 | |
337 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
338 &DXVAVideoDecodeAccelerator::NotifyResetDone, this)); | |
339 | |
340 state_ = DXVAVideoDecodeAccelerator::kNormal; | |
341 } | |
342 | |
343 void DXVAVideoDecodeAccelerator::Destroy() { | |
344 DCHECK(CalledOnValidThread()); | |
345 | |
346 OutputBuffers::iterator index; | |
347 for (index = output_picture_buffers_.begin(); | |
348 index != output_picture_buffers_.end(); | |
349 ++index) { | |
350 client_->DismissPictureBuffer(index->second.picture_buffer.id()); | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
This still violates Destroy's API.
You must not to
ananta
2011/12/20 02:27:22
Done.
| |
351 } | |
352 Invalidate(); | |
353 } | |
354 | |
355 bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() { | |
356 base::win::ScopedComPtr<IDirect3D9Ex> d3d9; | |
357 | |
358 RETURN_ON_HR_FAILURE(Direct3DCreate9Ex(D3D_SDK_VERSION, d3d9.Receive()), | |
359 "Failed to create D3D9", false); | |
360 | |
361 D3DPRESENT_PARAMETERS present_params = {0}; | |
362 present_params.BackBufferWidth = 1; | |
363 present_params.BackBufferHeight = 1; | |
364 present_params.BackBufferFormat = D3DFMT_UNKNOWN; | |
365 present_params.BackBufferCount = 1; | |
366 present_params.SwapEffect = D3DSWAPEFFECT_DISCARD; | |
367 present_params.hDeviceWindow = GetShellWindow(); | |
368 present_params.Windowed = TRUE; | |
369 present_params.Flags = D3DPRESENTFLAG_VIDEO; | |
370 present_params.FullScreen_RefreshRateInHz = 0; | |
371 present_params.PresentationInterval = 0; | |
372 | |
373 HRESULT hr = d3d9->CreateDeviceEx(D3DADAPTER_DEFAULT, | |
374 D3DDEVTYPE_HAL, | |
375 GetShellWindow(), | |
376 D3DCREATE_SOFTWARE_VERTEXPROCESSING, | |
377 &present_params, | |
378 NULL, | |
379 device_.Receive()); | |
380 RETURN_ON_HR_FAILURE(hr, "Failed to create D3D Device", false); | |
381 | |
382 hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token_, | |
383 device_manager_.Receive()); | |
384 RETURN_ON_HR_FAILURE(hr, "Couldn't create D3D Device manager", false); | |
385 | |
386 hr = device_manager_->ResetDevice(device_.get(), | |
387 dev_manager_reset_token_); | |
388 RETURN_ON_HR_FAILURE(hr, "Failed to set device to device manager", false); | |
389 return true; | |
390 } | |
391 | |
392 bool DXVAVideoDecodeAccelerator::InitDecoder() { | |
393 HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT), | |
394 NULL, | |
395 CLSCTX_INPROC_SERVER, | |
396 __uuidof(IMFTransform), | |
397 reinterpret_cast<void**>(decoder_.Receive())); | |
398 RETURN_ON_HR_FAILURE(hr, "Failed to CoCreate decoder.", false); | |
399 | |
400 RETURN_ON_FAILURE(CheckDecoderDxvaSupport(), | |
401 "Failed to check decoder DXVA support", false); | |
402 | |
403 hr = decoder_->ProcessMessage( | |
404 MFT_MESSAGE_SET_D3D_MANAGER, | |
405 reinterpret_cast<ULONG_PTR>(device_manager_.get())); | |
406 RETURN_ON_HR_FAILURE(hr, "Failed to pass D3D9 device to decoder.", false); | |
407 | |
408 return SetDecoderMediaTypes(); | |
409 } | |
410 | |
411 bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() { | |
412 base::win::ScopedComPtr<IMFAttributes> attributes; | |
413 HRESULT hr = decoder_->GetAttributes(attributes.Receive()); | |
414 RETURN_ON_HR_FAILURE(hr, "Failed to pass decoder attributes.", false); | |
415 | |
416 UINT32 dxva = 0; | |
417 hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva); | |
418 RETURN_ON_HR_FAILURE(hr, "Failed to get DXVA aware attribute.", false); | |
419 | |
420 hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE); | |
421 RETURN_ON_HR_FAILURE(hr, "Failed to force codec to use H/W DXVA.", false); | |
422 return true; | |
423 } | |
424 | |
425 bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() { | |
426 RETURN_ON_FAILURE(SetDecoderInputMediaType(), | |
427 "Failed to set decoder input media type", false); | |
428 return SetDecoderOutputMediaType(MFVideoFormat_NV12); | |
429 } | |
430 | |
431 bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() { | |
432 base::win::ScopedComPtr<IMFMediaType> media_type; | |
433 HRESULT hr = MFCreateMediaType(media_type.Receive()); | |
434 RETURN_ON_HR_FAILURE(hr, "Failed to create empty media type object", false); | |
435 | |
436 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); | |
437 RETURN_ON_HR_FAILURE(hr, "SetGUID for major type failed", false); | |
438 | |
439 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264); | |
440 RETURN_ON_HR_FAILURE(hr, "SetGUID for subtype failed", false); | |
441 | |
442 hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags | |
443 RETURN_ON_HR_FAILURE(hr, "Failed to set decoder's input type", false); | |
444 return true; | |
445 } | |
446 | |
447 bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType( | |
448 const GUID& subtype) { | |
449 base::win::ScopedComPtr<IMFMediaType> out_media_type; | |
450 | |
451 for (uint32 i = 0; | |
452 SUCCEEDED(decoder_->GetOutputAvailableType(0, i, | |
453 out_media_type.Receive())); | |
454 ++i) { | |
455 GUID out_subtype = {0}; | |
456 HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype); | |
457 RETURN_ON_HR_FAILURE(hr, "Failed to get output media type guid", false); | |
458 | |
459 if (out_subtype == subtype) { | |
460 hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags | |
461 RETURN_ON_HR_FAILURE(hr, "Failed to set output media type", false); | |
462 return true; | |
463 } | |
464 out_media_type.Release(); | |
465 } | |
466 return false; | |
467 } | |
468 | |
469 bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg, | |
470 int32 param) { | |
471 HRESULT hr = decoder_->ProcessMessage(msg, param); | |
472 return SUCCEEDED(hr); | |
473 } | |
474 | |
475 // Gets the minimum buffer sizes for input and output samples. The MFT will not | |
476 // allocate buffer for input nor output, so we have to do it ourselves and make | |
477 // sure they're the correct size. We only provide decoding if DXVA is enabled. | |
478 bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() { | |
479 HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_); | |
480 RETURN_ON_HR_FAILURE(hr, "Failed to get input stream info", false); | |
481 | |
482 hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_); | |
483 RETURN_ON_HR_FAILURE(hr, "Failed to get output stream info", false); | |
484 | |
485 VLOG(1) << "Input stream info: "; | |
486 VLOG(1) << "Max latency: " << input_stream_info_.hnsMaxLatency; | |
487 // There should be three flags, one for requiring a whole frame be in a | |
488 // single sample, one for requiring there be one buffer only in a single | |
489 // sample, and one that specifies a fixed sample size. (as in cbSize) | |
490 CHECK_EQ(input_stream_info_.dwFlags, 0x7u); | |
491 | |
492 VLOG(1) << "Min buffer size: " << input_stream_info_.cbSize; | |
493 VLOG(1) << "Max lookahead: " << input_stream_info_.cbMaxLookahead; | |
494 VLOG(1) << "Alignment: " << input_stream_info_.cbAlignment; | |
495 | |
496 VLOG(1) << "Output stream info: "; | |
497 // The flags here should be the same and mean the same thing, except when | |
498 // DXVA is enabled, there is an extra 0x100 flag meaning decoder will | |
499 // allocate its own sample. | |
500 VLOG(1) << "Flags: " | |
501 << std::hex << std::showbase << output_stream_info_.dwFlags; | |
502 CHECK_EQ(output_stream_info_.dwFlags, 0x107u); | |
503 VLOG(1) << "Min buffer size: " << output_stream_info_.cbSize; | |
504 VLOG(1) << "Alignment: " << output_stream_info_.cbAlignment; | |
505 return true; | |
506 } | |
507 | |
508 void DXVAVideoDecodeAccelerator::DoDecode() { | |
509 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kEosDrain), | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
Always entered in kEosDrain ATM.
ananta
2011/12/20 02:27:22
Please note that the DoDecode function gets called
| |
510 "DoDecode: not in normal or drain state", ILLEGAL_STATE,); | |
511 | |
512 MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0}; | |
513 DWORD status = 0; | |
514 | |
515 HRESULT hr = decoder_->ProcessOutput(0, // No flags | |
516 1, // # of out streams to pull from | |
517 &output_data_buffer, | |
518 &status); | |
519 IMFCollection* events = output_data_buffer.pEvents; | |
520 if (events != NULL) { | |
521 VLOG(1) << "Got events from ProcessOuput, but discarding"; | |
522 events->Release(); | |
523 } | |
524 if (FAILED(hr)) { | |
525 // A stream change needs further ProcessInput calls to get back decoder | |
526 // output which is why we need to set the state to stopped. | |
527 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { | |
528 // No more output from the decoder. Notify EOS and stop playback. | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
I don't understand this. Please have someone who
ananta
2011/12/20 02:27:22
We don't have anyone who is very familiar with DXV
| |
529 if (!SetDecoderOutputMediaType(MFVideoFormat_NV12)) { | |
530 NOTREACHED() << "Failed to set decoder output media type"; | |
531 } | |
532 state_ = kStopped; | |
533 return; | |
534 } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { | |
535 // This function is also called from Flush in a loop which could result | |
536 // in the state transitioning to kNormal due to decoded output. | |
537 DCHECK(state_ == kEosDrain || state_ == kNormal); | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
redundant to top-of-method check.
ananta
2011/12/20 02:27:22
Removed
| |
538 // No more output from the decoder. Stop playback. | |
539 state_ = kStopped; | |
540 return; | |
541 } else { | |
542 NOTREACHED() << "Unhandled error in DoDecode()"; | |
543 return; | |
544 } | |
545 } | |
546 TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.Decoding", this, ""); | |
547 | |
548 TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode", | |
549 inputs_before_decode_); | |
550 | |
551 inputs_before_decode_ = 0; | |
552 | |
553 RETURN_AND_NOTIFY_ON_FAILURE(ProcessOutputSample(output_data_buffer.pSample), | |
554 "Failed to process output sample.", PLATFORM_FAILURE,); | |
555 | |
556 state_ = kNormal; | |
557 } | |
558 | |
559 bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) { | |
560 RETURN_ON_FAILURE(sample, | |
561 "ProcessOutput succeeded, but did not get a sample back", false); | |
562 | |
563 base::win::ScopedComPtr<IMFSample> output_sample; | |
564 output_sample.Attach(sample); | |
565 | |
566 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | |
567 HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive()); | |
568 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample.", false); | |
569 | |
570 base::win::ScopedComPtr<IDirect3DSurface9> surface; | |
571 hr = MFGetService(output_buffer, MR_BUFFER_SERVICE, | |
572 IID_PPV_ARGS(surface.Receive())); | |
573 RETURN_ON_HR_FAILURE(hr, "Failed to get surface from buffer.", false); | |
574 | |
575 D3DSURFACE_DESC surface_desc; | |
576 hr = surface->GetDesc(&surface_desc); | |
577 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description.", false); | |
578 | |
579 TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this, | |
580 ""); | |
581 // TODO(ananta) | |
582 // The code below may not be necessary once we have an ANGLE extension which | |
583 // allows us to pass the Direct 3D surface directly for rendering. | |
584 | |
585 // The decoded bits in the source direct 3d surface are in the YUV | |
586 // format. Angle does not support that. As a workaround we create an | |
587 // offscreen surface in the RGB format and copy the source surface | |
588 // to this surface. | |
589 base::win::ScopedComPtr<IDirect3DSurface9> dest_surface; | |
590 hr = device_->CreateOffscreenPlainSurface(surface_desc.Width, | |
591 surface_desc.Height, | |
592 D3DFMT_A8R8G8B8, | |
593 D3DPOOL_DEFAULT, | |
594 dest_surface.Receive(), | |
595 NULL); | |
596 RETURN_ON_HR_FAILURE(hr, "Failed to create offscreen surface.", false); | |
597 | |
598 hr = D3DXLoadSurfaceFromSurface(dest_surface, NULL, NULL, surface, NULL, | |
599 NULL, 0, 0); | |
600 RETURN_ON_HR_FAILURE(hr, "Failed to copy source surface to dest.", false); | |
601 | |
602 TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this, ""); | |
603 | |
604 pending_output_samples_.push_back( | |
605 PendingSampleInfo(last_input_buffer_id_, dest_surface)); | |
606 | |
607 // If we have available picture buffers to copy the output data then use the | |
608 // first one and then flag it as not being available for use. | |
609 if (output_picture_buffers_.size()) { | |
610 ProcessPendingSamples(); | |
611 return true; | |
612 } | |
613 if (pictures_requested_) { | |
614 DVLOG(1) << "Waiting for picture slots from the client."; | |
615 return true; | |
616 } | |
617 // Go ahead and request picture buffers. | |
618 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
619 &DXVAVideoDecodeAccelerator::RequestPictureBuffers, | |
620 this, surface_desc.Width, surface_desc.Height)); | |
621 | |
622 pictures_requested_ = true; | |
623 return true; | |
624 } | |
625 | |
626 bool DXVAVideoDecodeAccelerator::CopyOutputSampleDataToPictureBuffer( | |
627 IDirect3DSurface9* dest_surface, media::PictureBuffer picture_buffer, | |
628 int input_buffer_id) { | |
629 DCHECK(dest_surface); | |
630 | |
631 D3DSURFACE_DESC surface_desc; | |
632 HRESULT hr = dest_surface->GetDesc(&surface_desc); | |
633 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description.", false); | |
634 | |
635 scoped_array<char> bits; | |
636 RETURN_ON_FAILURE(GetBitmapFromSurface(dest_surface, &bits), | |
637 "Failed to read bits from D3D surface", false); | |
638 | |
639 // This function currently executes in the context of IPC handlers in the | |
640 // GPU process which ensures that there is always a OpenGL context. | |
641 GLint current_texture = 0; | |
642 glGetIntegerv(GL_TEXTURE_BINDING_2D, ¤t_texture); | |
643 | |
644 glBindTexture(GL_TEXTURE_2D, picture_buffer.texture_id()); | |
645 glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT, surface_desc.Width, | |
646 surface_desc.Height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, | |
647 reinterpret_cast<GLvoid*>(bits.get())); | |
648 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); | |
649 | |
650 glBindTexture(GL_TEXTURE_2D, current_texture); | |
651 | |
652 media::Picture output_picture(picture_buffer.id(), input_buffer_id); | |
653 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
654 &DXVAVideoDecodeAccelerator::NotifyPictureReady, this, output_picture)); | |
655 return true; | |
656 } | |
657 | |
658 void DXVAVideoDecodeAccelerator::ProcessPendingSamples() { | |
659 if (pending_output_samples_.empty()) | |
660 return; | |
661 | |
662 OutputBuffers::iterator index; | |
663 | |
664 for (index = output_picture_buffers_.begin(); | |
665 index != output_picture_buffers_.end() && | |
666 !pending_output_samples_.empty(); | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
de-indent
ananta
2011/12/20 02:27:22
Done.
| |
667 ++index) { | |
668 if (index->second.available) { | |
669 PendingSampleInfo sample_info = pending_output_samples_.front(); | |
670 | |
671 CopyOutputSampleDataToPictureBuffer(sample_info.dest_surface, | |
672 index->second.picture_buffer, | |
673 sample_info.input_buffer_id); | |
674 index->second.available = false; | |
675 pending_output_samples_.pop_front(); | |
676 } | |
677 } | |
678 } | |
679 | |
680 void DXVAVideoDecodeAccelerator::ClearState() { | |
681 last_input_buffer_id_ = -1; | |
682 output_picture_buffers_.clear(); | |
683 pending_output_samples_.clear(); | |
684 } | |
685 | |
686 void DXVAVideoDecodeAccelerator::StopOnError( | |
687 media::VideoDecodeAccelerator::Error error) { | |
688 DCHECK(CalledOnValidThread()); | |
689 | |
690 if (client_) | |
691 client_->NotifyError(error); | |
692 client_ = NULL; | |
693 | |
694 if (state_ != kUninitialized) { | |
695 Invalidate(); | |
696 } | |
697 } | |
698 | |
699 bool DXVAVideoDecodeAccelerator::GetBitmapFromSurface( | |
700 IDirect3DSurface9* surface, | |
701 scoped_array<char>* bits) { | |
702 // Get the currently loaded bitmap from the DC. | |
703 HDC hdc = NULL; | |
704 HRESULT hr = surface->GetDC(&hdc); | |
705 RETURN_ON_HR_FAILURE(hr, "Failed to get HDC for dest offscreen surface.", | |
706 false); | |
707 HBITMAP bitmap = | |
708 reinterpret_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP)); | |
709 if (!bitmap) { | |
710 NOTREACHED() << "Failed to get bitmap from DC"; | |
711 surface->ReleaseDC(hdc); | |
712 return false; | |
713 } | |
714 // TODO(ananta) | |
715 // The code below may not be necessary once we have an ANGLE extension which | |
716 // allows us to pass the Direct 3D surface directly for rendering. | |
717 // The Device dependent bitmap is upside down for OpenGL. We convert the | |
718 // bitmap to a DIB and render it on the texture instead. | |
719 BITMAP bitmap_basic_info = {0}; | |
720 if (!GetObject(bitmap, sizeof(BITMAP), &bitmap_basic_info)) { | |
721 NOTREACHED() << "Failed to read bitmap info"; | |
722 surface->ReleaseDC(hdc); | |
723 return false; | |
724 } | |
725 BITMAPINFO bitmap_info = {0}; | |
726 bitmap_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER); | |
727 bitmap_info.bmiHeader.biWidth = bitmap_basic_info.bmWidth; | |
728 bitmap_info.bmiHeader.biHeight = bitmap_basic_info.bmHeight; | |
729 bitmap_info.bmiHeader.biPlanes = 1; | |
730 bitmap_info.bmiHeader.biBitCount = bitmap_basic_info.bmBitsPixel; | |
731 bitmap_info.bmiHeader.biCompression = BI_RGB; | |
732 bitmap_info.bmiHeader.biSizeImage = 0; | |
733 bitmap_info.bmiHeader.biClrUsed = 0; | |
734 | |
735 int ret = GetDIBits(hdc, bitmap, 0, 0, NULL, &bitmap_info, DIB_RGB_COLORS); | |
736 if (!ret || bitmap_info.bmiHeader.biSizeImage <= 0) { | |
737 NOTREACHED() << "Failed to read bitmap size"; | |
738 surface->ReleaseDC(hdc); | |
739 return false; | |
740 } | |
741 | |
742 bits->reset(new char[bitmap_info.bmiHeader.biSizeImage]); | |
743 ret = GetDIBits(hdc, bitmap, 0, bitmap_basic_info.bmHeight, bits->get(), | |
744 &bitmap_info, DIB_RGB_COLORS); | |
745 if (!ret) { | |
746 NOTREACHED() << "Failed to retrieve bitmap bits."; | |
747 } | |
748 surface->ReleaseDC(hdc); | |
749 return !!ret; | |
750 } | |
751 | |
752 void DXVAVideoDecodeAccelerator::Invalidate() { | |
753 if (state_ == kUninitialized) | |
754 return; | |
755 ClearState(); | |
756 decoder_.Release(); | |
757 device_.Release(); | |
758 device_manager_.Release(); | |
759 MFShutdown(); | |
760 state_ = kUninitialized; | |
761 } | |
762 | |
763 void DXVAVideoDecodeAccelerator::NotifyInitializeDone() { | |
764 if (client_) | |
765 client_->NotifyInitializeDone(); | |
766 } | |
767 | |
768 void DXVAVideoDecodeAccelerator::NotifyInputBufferRead(int input_buffer_id) { | |
769 if (client_) | |
770 client_->NotifyEndOfBitstreamBuffer(input_buffer_id); | |
771 } | |
772 | |
773 void DXVAVideoDecodeAccelerator::NotifyFlushDone() { | |
774 if (client_) | |
775 client_->NotifyFlushDone(); | |
776 } | |
777 | |
778 void DXVAVideoDecodeAccelerator::NotifyResetDone() { | |
779 if (client_) | |
780 client_->NotifyResetDone(); | |
781 } | |
782 | |
783 void DXVAVideoDecodeAccelerator::RequestPictureBuffers(int width, int height) { | |
784 // This task could execute after the decoder has been torn down. | |
785 if (state_ == kNormal && client_) | |
786 client_->ProvidePictureBuffers(kNumPictureBuffers, | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
style-guide requires multi-line then clauses to ha
ananta
2011/12/20 02:27:22
Done.
| |
787 gfx::Size(width, height)); | |
788 } | |
789 | |
790 void DXVAVideoDecodeAccelerator::NotifyPictureReady( | |
791 const media::Picture& picture) { | |
792 // This task could execute after the decoder has been torn down. | |
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
I don't follow this comment. Why should state_==k
ananta
2011/12/20 02:27:22
Replaced with a check for kUninitialized.
| |
793 if (state_ == kNormal && client_) | |
794 client_->PictureReady(picture); | |
795 } | |
OLD | NEW |