Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(113)

Side by Side Diff: content/common/gpu/media/dxva_video_decode_accelerator.cc

Issue 8510039: Initial implementation of the DXVA 2.0 H.264 hardware decoder for pepper for Windows. The decodin... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: '' Created 9 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/media/dxva_video_decode_accelerator.h"
6
7 #if !defined(OS_WIN)
8 #error This file should only be built on Windows.
9 #endif // !defined(OS_WIN)
10
11 #include <ks.h>
12 #include <codecapi.h>
13 #include <d3dx9tex.h>
14 #include <mfapi.h>
15 #include <mferror.h>
16 #include <wmcodecdsp.h>
17
18 #include "base/bind.h"
19 #include "base/callback.h"
20 #include "base/debug/trace_event.h"
21 #include "base/logging.h"
22 #include "base/memory/scoped_handle.h"
23 #include "base/memory/scoped_ptr.h"
24 #include "base/message_loop.h"
25 #include "base/process_util.h"
26 #include "base/shared_memory.h"
27 #include "media/video/video_decode_accelerator.h"
28 #include "third_party/angle/include/GLES2/gl2.h"
29 #include "third_party/angle/include/GLES2/gl2ext.h"
30
31 // We only request 5 picture buffers from the client which are used to hold the
32 // decoded samples. These buffers are then reused when the client tells us that
33 // it is done with the buffer.
34 static const int kNumPictureBuffers = 5;
35
36 #define RETURN_ON_FAILURE(result, ret) \
37 do { \
38 if (!(result)) { \
39 DLOG(ERROR) << "Failed ***"; \
Ami GONE FROM CHROMIUM 2011/12/21 00:39:15 Why not also emit the log?
ananta 2011/12/21 02:59:58 Done.
40 return ret; \
41 } \
42 } while (0)
43
44 #define RETURN_ON_HR_FAILURE(result, ret) \
45 RETURN_ON_FAILURE(SUCCEEDED(result), ret);
46
47 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \
48 do { \
49 if (!(result)) { \
50 DVLOG(1) << log; \
51 StopOnError(error_code); \
52 return ret; \
53 } \
54 } while (0)
55
56 #define RETURN_AND_NOTIFY_ON_HR_FAILURE(result, log, error_code, ret) \
57 RETURN_AND_NOTIFY_ON_FAILURE(SUCCEEDED(result), \
58 log << ", HRESULT: 0x" << std::hex << result, \
59 error_code, ret);
60
61 static IMFSample* CreateEmptySample() {
62 base::win::ScopedComPtr<IMFSample> sample;
63 HRESULT hr = MFCreateSample(sample.Receive());
64 RETURN_ON_HR_FAILURE(hr, NULL);
65 return sample.Detach();
66 }
67
68 // Creates a Media Foundation sample with one buffer of length |buffer_length|
69 // on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0.
70 static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) {
71 CHECK_GT(buffer_length, 0);
72
73 base::win::ScopedComPtr<IMFSample> sample;
74 sample.Attach(CreateEmptySample());
75
76 base::win::ScopedComPtr<IMFMediaBuffer> buffer;
77 HRESULT hr = E_FAIL;
78 if (align == 0) {
79 // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer
80 // with the align argument being 0.
81 hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive());
82 } else {
83 hr = MFCreateAlignedMemoryBuffer(buffer_length,
84 align - 1,
85 buffer.Receive());
86 }
87 RETURN_ON_HR_FAILURE(hr, NULL);
88
89 hr = sample->AddBuffer(buffer);
90 RETURN_ON_HR_FAILURE(hr, NULL);
91
92 return sample.Detach();
93 }
94
95 // Creates a Media Foundation sample with one buffer containing a copy of the
96 // given Annex B stream data.
97 // If duration and sample time are not known, provide 0.
98 // |min_size| specifies the minimum size of the buffer (might be required by
99 // the decoder for input). If no alignment is required, provide 0.
100 static IMFSample* CreateInputSample(const uint8* stream, int size,
101 int min_size, int alignment) {
102 CHECK(stream);
103 CHECK_GT(size, 0);
104 base::win::ScopedComPtr<IMFSample> sample;
105 sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size),
106 alignment));
107 RETURN_ON_FAILURE(sample, NULL);
108
109 base::win::ScopedComPtr<IMFMediaBuffer> buffer;
110 HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive());
111 RETURN_ON_HR_FAILURE(hr, NULL);
112
113 DWORD max_length = 0;
114 DWORD current_length = 0;
115 uint8* destination = NULL;
116 hr = buffer->Lock(&destination, &max_length, &current_length);
117 RETURN_ON_HR_FAILURE(hr, NULL);
118
119 CHECK_EQ(current_length, 0u);
120 CHECK_GE(static_cast<int>(max_length), size);
121 memcpy(destination, stream, size);
122
123 hr = buffer->Unlock();
124 RETURN_ON_HR_FAILURE(hr, NULL);
125
126 hr = buffer->SetCurrentLength(size);
127 RETURN_ON_HR_FAILURE(hr, NULL);
128
129 hr = sample->SetUINT32(MFSampleExtension_CleanPoint, TRUE);
130 RETURN_ON_HR_FAILURE(hr, NULL);
131
132 return sample.Detach();
133 }
134
135 static IMFSample* CreateSampleFromInputBuffer(
136 const media::BitstreamBuffer& bitstream_buffer,
137 base::ProcessHandle renderer_process,
138 DWORD stream_size,
139 DWORD alignment) {
140 HANDLE shared_memory_handle = NULL;
141 RETURN_ON_FAILURE(::DuplicateHandle(renderer_process,
142 bitstream_buffer.handle(),
143 base::GetCurrentProcessHandle(),
144 &shared_memory_handle,
145 0,
146 FALSE,
147 DUPLICATE_SAME_ACCESS), NULL);
148
149 base::SharedMemory shm(shared_memory_handle, true);
150 RETURN_ON_FAILURE(shm.Map(bitstream_buffer.size()), NULL);
151
152 return CreateInputSample(reinterpret_cast<const uint8*>(shm.memory()),
153 bitstream_buffer.size(),
154 stream_size,
155 alignment);
156 }
157
158 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer(
159 const media::PictureBuffer& buffer)
160 : available(true),
161 picture_buffer(buffer) {
162 }
163
164 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo(
165 int32 buffer_id, IDirect3DSurface9* surface)
166 : input_buffer_id(buffer_id),
167 dest_surface(surface) {
168 }
169
170 DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {}
171
172 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
173 media::VideoDecodeAccelerator::Client* client,
174 base::ProcessHandle renderer_process)
175 : client_(client),
176 state_(kUninitialized),
177 pictures_requested_(false),
178 renderer_process_(renderer_process),
179 dev_manager_reset_token_(0),
180 last_input_buffer_id_(-1),
181 inputs_before_decode_(0) {
182 }
183
184 DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() {
185 client_ = NULL;
186 }
187
188 bool DXVAVideoDecodeAccelerator::Initialize(Profile) {
189 DCHECK(CalledOnValidThread());
190
191 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kUninitialized),
192 "Initialize: invalid state: " << state_, ILLEGAL_STATE, false);
193
194 HRESULT hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
195 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "MFStartup failed.", PLATFORM_FAILURE,
196 false);
197
198 RETURN_AND_NOTIFY_ON_FAILURE(CreateD3DDevManager(),
199 "Failed to create device manager", PLATFORM_FAILURE, false);
200
201 RETURN_AND_NOTIFY_ON_FAILURE(InitDecoder(),
202 "Failed to initialize decoder", PLATFORM_FAILURE, false);
203
204 RETURN_AND_NOTIFY_ON_FAILURE(GetStreamsInfoAndBufferReqs(),
205 "Failed to get input/output stream info.", PLATFORM_FAILURE, false);
206
207 RETURN_AND_NOTIFY_ON_FAILURE(
208 SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0),
209 "Failed to start decoder", PLATFORM_FAILURE, false);
210
211 state_ = kNormal;
212 MessageLoop::current()->PostTask(FROM_HERE,
213 base::Bind(&DXVAVideoDecodeAccelerator::NotifyInitializeDone, this));
214 return true;
215 }
216
217 void DXVAVideoDecodeAccelerator::Decode(
218 const media::BitstreamBuffer& bitstream_buffer) {
219 DCHECK(CalledOnValidThread());
220
221 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped),
222 "Invalid state: " << state_, ILLEGAL_STATE,);
223
224 base::win::ScopedComPtr<IMFSample> sample;
225 sample.Attach(CreateSampleFromInputBuffer(bitstream_buffer,
226 renderer_process_,
227 input_stream_info_.cbSize,
228 input_stream_info_.cbAlignment));
229 RETURN_AND_NOTIFY_ON_FAILURE(sample, "Failed to create input sample",
230 PLATFORM_FAILURE,);
231 if (!inputs_before_decode_) {
232 TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.Decoding", this, "");
233 }
234 inputs_before_decode_++;
235
236 RETURN_AND_NOTIFY_ON_FAILURE(
237 SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0),
238 "Failed to create input sample", PLATFORM_FAILURE,);
239
240 HRESULT hr = decoder_->ProcessInput(0, sample, 0);
241 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to process input sample",
242 PLATFORM_FAILURE,);
243
244 RETURN_AND_NOTIFY_ON_FAILURE(
245 SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0),
246 "Failed to send eos message to MFT", PLATFORM_FAILURE,);
247 state_ = kEosDrain;
248
249 last_input_buffer_id_ = bitstream_buffer.id();
250
251 DoDecode();
252
253 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kStopped || state_ == kNormal),
254 "Failed to process output. Unexpected decoder state: " << state_,
255 ILLEGAL_STATE,);
256
257 // The Microsoft Media foundation decoder internally buffers up to 30 frames
258 // before returning a decoded frame. We need to inform the client that this
259 // input buffer is processed as it may stop sending us further input.
260 // Note: This may break clients which expect every input buffer to be
261 // associated with a decoded output buffer.
262 // TODO(ananta)
263 // Do some more investigation into whether it is possible to get the MFT
264 // decoder to emit an output packet for every input packet.
265 // http://code.google.com/p/chromium/issues/detail?id=108121
266 MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
267 &DXVAVideoDecodeAccelerator::NotifyInputBufferRead, this,
268 bitstream_buffer.id()));
269 }
270
271 void DXVAVideoDecodeAccelerator::AssignPictureBuffers(
272 const std::vector<media::PictureBuffer>& buffers) {
273 DCHECK(CalledOnValidThread());
274 // Copy the picture buffers provided by the client to the available list,
275 // and mark these buffers as available for use.
276 for (size_t buffer_index = 0; buffer_index < buffers.size();
277 ++buffer_index) {
278 bool inserted = output_picture_buffers_.insert(std::make_pair(
279 buffers[buffer_index].id(),
280 DXVAPictureBuffer(buffers[buffer_index]))).second;
281 DCHECK(inserted);
282 }
283 ProcessPendingSamples();
284 }
285
286 void DXVAVideoDecodeAccelerator::ReusePictureBuffer(
287 int32 picture_buffer_id) {
288 DCHECK(CalledOnValidThread());
289
290 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id);
291 RETURN_AND_NOTIFY_ON_FAILURE(it != output_picture_buffers_.end(),
292 "Invalid picture id: " << picture_buffer_id, INVALID_ARGUMENT,);
293
294 it->second.available = true;
295 ProcessPendingSamples();
296 }
297
298 void DXVAVideoDecodeAccelerator::Flush() {
299 DCHECK(CalledOnValidThread());
300
301 DVLOG(1) << "DXVAVideoDecodeAccelerator::Flush";
302
303 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped),
304 "Unexpected decoder state: " << state_, ILLEGAL_STATE,);
305
306 state_ = kEosDrain;
307
308 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0),
309 "Failed to send drain message", PLATFORM_FAILURE,);
310
311 // As per MSDN docs after the client sends this message, it calls
312 // IMFTransform::ProcessOutput in a loop, until ProcessOutput returns the
313 // error code MF_E_TRANSFORM_NEED_MORE_INPUT. The DoDecode function sets
314 // the state to kStopped when the decoder returns
315 // MF_E_TRANSFORM_NEED_MORE_INPUT.
316 // The MFT decoder can buffer upto 30 frames worth of input before returning
317 // an output frame. This loop here attempts to retrieve as many output frames
318 // as possible from the buffered set.
319 while (state_ != kStopped) {
320 DoDecode();
321 }
322
323 MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
324 &DXVAVideoDecodeAccelerator::NotifyFlushDone, this));
325
326 state_ = kNormal;
327 }
328
329 void DXVAVideoDecodeAccelerator::Reset() {
330 DCHECK(CalledOnValidThread());
331
332 DVLOG(1) << "DXVAVideoDecodeAccelerator::Reset";
333
334 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kStopped),
335 "Reset: invalid state: " << state_, ILLEGAL_STATE,);
336
337 state_ = kResetting;
338
339 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0),
340 "Reset: Failed to send message.", PLATFORM_FAILURE,);
341
342 MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
343 &DXVAVideoDecodeAccelerator::NotifyResetDone, this));
344
345 state_ = DXVAVideoDecodeAccelerator::kNormal;
346 }
347
348 void DXVAVideoDecodeAccelerator::Destroy() {
349 DCHECK(CalledOnValidThread());
350 Invalidate();
351 }
352
353 bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() {
354 base::win::ScopedComPtr<IDirect3D9Ex> d3d9;
355
356 HRESULT hr = Direct3DCreate9Ex(D3D_SDK_VERSION, d3d9.Receive());
357 RETURN_ON_HR_FAILURE(hr, false);
358
359 D3DPRESENT_PARAMETERS present_params = {0};
360 present_params.BackBufferWidth = 1;
361 present_params.BackBufferHeight = 1;
362 present_params.BackBufferFormat = D3DFMT_UNKNOWN;
363 present_params.BackBufferCount = 1;
364 present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
365 present_params.hDeviceWindow = GetShellWindow();
366 present_params.Windowed = TRUE;
367 present_params.Flags = D3DPRESENTFLAG_VIDEO;
368 present_params.FullScreen_RefreshRateInHz = 0;
369 present_params.PresentationInterval = 0;
370
371 hr = d3d9->CreateDeviceEx(D3DADAPTER_DEFAULT,
372 D3DDEVTYPE_HAL,
373 GetShellWindow(),
374 D3DCREATE_SOFTWARE_VERTEXPROCESSING,
375 &present_params,
376 NULL,
377 device_.Receive());
378 RETURN_ON_HR_FAILURE(hr, false);
379
380 hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token_,
381 device_manager_.Receive());
382 RETURN_ON_HR_FAILURE(hr, false);
383
384 hr = device_manager_->ResetDevice(device_, dev_manager_reset_token_);
385 RETURN_ON_HR_FAILURE(hr, false);
386 return true;
387 }
388
389 bool DXVAVideoDecodeAccelerator::InitDecoder() {
390 HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT),
391 NULL,
392 CLSCTX_INPROC_SERVER,
393 __uuidof(IMFTransform),
394 reinterpret_cast<void**>(decoder_.Receive()));
395 RETURN_ON_HR_FAILURE(hr, false);
396
397 CheckDecoderDxvaSupport();
Ami GONE FROM CHROMIUM 2011/12/21 00:39:15 RETURN_ON_FAILURE ?
ananta 2011/12/21 02:59:58 Done.
398
399 hr = decoder_->ProcessMessage(
400 MFT_MESSAGE_SET_D3D_MANAGER,
401 reinterpret_cast<ULONG_PTR>(device_manager_.get()));
402 RETURN_ON_HR_FAILURE(hr, false);
403
404 return SetDecoderMediaTypes();
405 }
406
407 bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() {
408 base::win::ScopedComPtr<IMFAttributes> attributes;
409 HRESULT hr = decoder_->GetAttributes(attributes.Receive());
410 RETURN_ON_HR_FAILURE(hr, false);
411
412 UINT32 dxva = 0;
413 hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva);
414 RETURN_ON_HR_FAILURE(hr, false);
415
416 hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE);
417 RETURN_ON_HR_FAILURE(hr, false);
418 return true;
419 }
420
421 bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() {
422 RETURN_ON_FAILURE(SetDecoderInputMediaType(), false);
423 return SetDecoderOutputMediaType(MFVideoFormat_NV12);
424 }
425
426 bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() {
427 base::win::ScopedComPtr<IMFMediaType> media_type;
428 HRESULT hr = MFCreateMediaType(media_type.Receive());
429 RETURN_ON_HR_FAILURE(hr, false);
430
431 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
432 RETURN_ON_HR_FAILURE(hr, false);
433
434 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
435 RETURN_ON_HR_FAILURE(hr, false);
436
437 hr = decoder_->SetInputType(0, media_type, 0); // No flags
438 RETURN_ON_HR_FAILURE(hr, false);
439 return true;
440 }
441
442 bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType(
443 const GUID& subtype) {
444 base::win::ScopedComPtr<IMFMediaType> out_media_type;
445
446 for (uint32 i = 0;
447 SUCCEEDED(decoder_->GetOutputAvailableType(0, i,
448 out_media_type.Receive()));
449 ++i) {
450 GUID out_subtype = {0};
451 HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype);
452 RETURN_ON_HR_FAILURE(hr, false);
453
454 if (out_subtype == subtype) {
455 hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags
456 RETURN_ON_HR_FAILURE(hr, false);
457 return true;
458 }
459 out_media_type.Release();
460 }
461 return false;
462 }
463
464 bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg,
465 int32 param) {
466 HRESULT hr = decoder_->ProcessMessage(msg, param);
467 return SUCCEEDED(hr);
468 }
469
470 // Gets the minimum buffer sizes for input and output samples. The MFT will not
471 // allocate buffer for input nor output, so we have to do it ourselves and make
472 // sure they're the correct size. We only provide decoding if DXVA is enabled.
473 bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() {
474 HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_);
475 RETURN_ON_HR_FAILURE(hr, false);
476
477 hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_);
478 RETURN_ON_HR_FAILURE(hr, false);
479
480 DVLOG(1) << "Input stream info: ";
481 DVLOG(1) << "Max latency: " << input_stream_info_.hnsMaxLatency;
482 // There should be three flags, one for requiring a whole frame be in a
483 // single sample, one for requiring there be one buffer only in a single
484 // sample, and one that specifies a fixed sample size. (as in cbSize)
485 CHECK_EQ(input_stream_info_.dwFlags, 0x7u);
486
487 DVLOG(1) << "Min buffer size: " << input_stream_info_.cbSize;
488 DVLOG(1) << "Max lookahead: " << input_stream_info_.cbMaxLookahead;
489 DVLOG(1) << "Alignment: " << input_stream_info_.cbAlignment;
490
491 DVLOG(1) << "Output stream info: ";
492 // The flags here should be the same and mean the same thing, except when
493 // DXVA is enabled, there is an extra 0x100 flag meaning decoder will
494 // allocate its own sample.
495 DVLOG(1) << "Flags: "
496 << std::hex << std::showbase << output_stream_info_.dwFlags;
497 CHECK_EQ(output_stream_info_.dwFlags, 0x107u);
498 DVLOG(1) << "Min buffer size: " << output_stream_info_.cbSize;
499 DVLOG(1) << "Alignment: " << output_stream_info_.cbAlignment;
500 return true;
501 }
502
503 void DXVAVideoDecodeAccelerator::DoDecode() {
504 // This function is also called from Flush in a loop which could result
505 // in the state transitioning to kNormal due to decoded output.
506 RETURN_AND_NOTIFY_ON_FAILURE((state_ == kNormal || state_ == kEosDrain),
507 "DoDecode: not in normal/drain state", ILLEGAL_STATE,);
508
509 MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0};
510 DWORD status = 0;
511
512 HRESULT hr = decoder_->ProcessOutput(0, // No flags
513 1, // # of out streams to pull from
514 &output_data_buffer,
515 &status);
516 IMFCollection* events = output_data_buffer.pEvents;
517 if (events != NULL) {
518 VLOG(1) << "Got events from ProcessOuput, but discarding";
519 events->Release();
520 }
521 if (FAILED(hr)) {
522 // A stream change needs further ProcessInput calls to get back decoder
523 // output which is why we need to set the state to stopped.
524 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
525 // No more output from the decoder. Notify EOS and stop playback.
Ami GONE FROM CHROMIUM 2011/12/21 00:39:15 This comment is still wrong.
ananta 2011/12/21 02:59:58 Done.
526 if (!SetDecoderOutputMediaType(MFVideoFormat_NV12)) {
527 NOTREACHED() << "Failed to set decoder output media type";
528 state_ = kStopped;
529 } else {
530 DVLOG(1) << "Received output format change from the decoder."
531 " Recursively invoking DoDecode";
532 DoDecode();
533 }
534 return;
535 } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
536 // No more output from the decoder. Stop playback.
537 state_ = kStopped;
538 return;
539 } else {
540 NOTREACHED() << "Unhandled error in DoDecode()";
541 return;
542 }
543 }
544 TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.Decoding", this, "");
545
546 TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode",
547 inputs_before_decode_);
548
549 inputs_before_decode_ = 0;
550
551 RETURN_AND_NOTIFY_ON_FAILURE(ProcessOutputSample(output_data_buffer.pSample),
552 "Failed to process output sample.", PLATFORM_FAILURE,);
553
554 state_ = kNormal;
555 }
556
557 bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) {
558 RETURN_ON_FAILURE(sample, false);
559
560 base::win::ScopedComPtr<IMFSample> output_sample;
561 output_sample.Attach(sample);
562
563 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer;
564 HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive());
565 RETURN_ON_HR_FAILURE(hr, false);
566
567 base::win::ScopedComPtr<IDirect3DSurface9> surface;
568 hr = MFGetService(output_buffer, MR_BUFFER_SERVICE,
569 IID_PPV_ARGS(surface.Receive()));
570 RETURN_ON_HR_FAILURE(hr, false);
571
572 D3DSURFACE_DESC surface_desc;
573 hr = surface->GetDesc(&surface_desc);
574 RETURN_ON_HR_FAILURE(hr, false);
575
576 TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this,
577 "");
578 // TODO(ananta)
579 // The code below may not be necessary once we have an ANGLE extension which
580 // allows us to pass the Direct 3D surface directly for rendering.
581
582 // The decoded bits in the source direct 3d surface are in the YUV
583 // format. Angle does not support that. As a workaround we create an
584 // offscreen surface in the RGB format and copy the source surface
585 // to this surface.
586 base::win::ScopedComPtr<IDirect3DSurface9> dest_surface;
587 hr = device_->CreateOffscreenPlainSurface(surface_desc.Width,
588 surface_desc.Height,
589 D3DFMT_X8R8G8B8,
590 D3DPOOL_DEFAULT,
591 dest_surface.Receive(),
592 NULL);
593 RETURN_ON_HR_FAILURE(hr, false);
594
595 hr = D3DXLoadSurfaceFromSurface(dest_surface, NULL, NULL, surface, NULL,
596 NULL, D3DX_DEFAULT, 0);
597 RETURN_ON_HR_FAILURE(hr, false);
598
599 TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this, "");
600
601 pending_output_samples_.push_back(
602 PendingSampleInfo(last_input_buffer_id_, dest_surface));
603
604 // If we have available picture buffers to copy the output data then use the
605 // first one and then flag it as not being available for use.
606 if (output_picture_buffers_.size()) {
607 ProcessPendingSamples();
608 return true;
609 }
610 if (pictures_requested_) {
611 DVLOG(1) << "Waiting for picture slots from the client.";
612 return true;
613 }
614 // Go ahead and request picture buffers.
615 MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
616 &DXVAVideoDecodeAccelerator::RequestPictureBuffers,
617 this, surface_desc.Width, surface_desc.Height));
618
619 pictures_requested_ = true;
620 return true;
621 }
622
623 bool DXVAVideoDecodeAccelerator::CopyOutputSampleDataToPictureBuffer(
624 IDirect3DSurface9* dest_surface, media::PictureBuffer picture_buffer,
625 int input_buffer_id) {
626 DCHECK(dest_surface);
627
628 D3DSURFACE_DESC surface_desc;
629 HRESULT hr = dest_surface->GetDesc(&surface_desc);
630 RETURN_ON_HR_FAILURE(hr, false);
631
632 scoped_array<char> bits;
633 RETURN_ON_FAILURE(GetBitmapFromSurface(dest_surface, &bits), false);
634
635 // This function currently executes in the context of IPC handlers in the
636 // GPU process which ensures that there is always a OpenGL context.
637 GLint current_texture = 0;
638 glGetIntegerv(GL_TEXTURE_BINDING_2D, &current_texture);
639
640 glBindTexture(GL_TEXTURE_2D, picture_buffer.texture_id());
641 glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT, surface_desc.Width,
642 surface_desc.Height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE,
643 reinterpret_cast<GLvoid*>(bits.get()));
644 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
645
646 glBindTexture(GL_TEXTURE_2D, current_texture);
647
648 media::Picture output_picture(picture_buffer.id(), input_buffer_id);
649 MessageLoop::current()->PostTask(FROM_HERE, base::Bind(
650 &DXVAVideoDecodeAccelerator::NotifyPictureReady, this, output_picture));
651 return true;
652 }
653
654 void DXVAVideoDecodeAccelerator::ProcessPendingSamples() {
655 if (pending_output_samples_.empty())
656 return;
657
658 OutputBuffers::iterator index;
659
660 for (index = output_picture_buffers_.begin();
661 index != output_picture_buffers_.end() &&
662 !pending_output_samples_.empty();
663 ++index) {
664 if (index->second.available) {
665 PendingSampleInfo sample_info = pending_output_samples_.front();
666
667 CopyOutputSampleDataToPictureBuffer(sample_info.dest_surface,
668 index->second.picture_buffer,
669 sample_info.input_buffer_id);
670 index->second.available = false;
671 pending_output_samples_.pop_front();
672 }
673 }
674 }
675
676 void DXVAVideoDecodeAccelerator::ClearState() {
677 last_input_buffer_id_ = -1;
678 output_picture_buffers_.clear();
679 pending_output_samples_.clear();
680 }
681
682 void DXVAVideoDecodeAccelerator::StopOnError(
683 media::VideoDecodeAccelerator::Error error) {
684 DCHECK(CalledOnValidThread());
685
686 if (client_)
687 client_->NotifyError(error);
688 client_ = NULL;
689
690 if (state_ != kUninitialized) {
691 Invalidate();
692 }
693 }
694
695 bool DXVAVideoDecodeAccelerator::GetBitmapFromSurface(
696 IDirect3DSurface9* surface,
697 scoped_array<char>* bits) {
698 // Get the currently loaded bitmap from the DC.
699 HDC hdc = NULL;
700 HRESULT hr = surface->GetDC(&hdc);
701 RETURN_ON_HR_FAILURE(hr, false);
702
703 HBITMAP bitmap =
704 reinterpret_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP));
705 if (!bitmap) {
706 NOTREACHED() << "Failed to get bitmap from DC";
707 surface->ReleaseDC(hdc);
708 return false;
709 }
710 // TODO(ananta)
711 // The code below may not be necessary once we have an ANGLE extension which
712 // allows us to pass the Direct 3D surface directly for rendering.
713 // The Device dependent bitmap is upside down for OpenGL. We convert the
714 // bitmap to a DIB and render it on the texture instead.
715 BITMAP bitmap_basic_info = {0};
716 if (!GetObject(bitmap, sizeof(BITMAP), &bitmap_basic_info)) {
717 NOTREACHED() << "Failed to read bitmap info";
718 surface->ReleaseDC(hdc);
719 return false;
720 }
721 BITMAPINFO bitmap_info = {0};
722 bitmap_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
723 bitmap_info.bmiHeader.biWidth = bitmap_basic_info.bmWidth;
724 bitmap_info.bmiHeader.biHeight = bitmap_basic_info.bmHeight;
725 bitmap_info.bmiHeader.biPlanes = 1;
726 bitmap_info.bmiHeader.biBitCount = bitmap_basic_info.bmBitsPixel;
727 bitmap_info.bmiHeader.biCompression = BI_RGB;
728 bitmap_info.bmiHeader.biSizeImage = 0;
729 bitmap_info.bmiHeader.biClrUsed = 0;
730
731 int ret = GetDIBits(hdc, bitmap, 0, 0, NULL, &bitmap_info, DIB_RGB_COLORS);
732 if (!ret || bitmap_info.bmiHeader.biSizeImage <= 0) {
733 NOTREACHED() << "Failed to read bitmap size";
734 surface->ReleaseDC(hdc);
735 return false;
736 }
737
738 bits->reset(new char[bitmap_info.bmiHeader.biSizeImage]);
739 ret = GetDIBits(hdc, bitmap, 0, bitmap_basic_info.bmHeight, bits->get(),
740 &bitmap_info, DIB_RGB_COLORS);
741 if (!ret) {
742 NOTREACHED() << "Failed to retrieve bitmap bits.";
743 }
744 surface->ReleaseDC(hdc);
745 return !!ret;
746 }
747
748 void DXVAVideoDecodeAccelerator::Invalidate() {
749 if (state_ == kUninitialized)
750 return;
751 ClearState();
752 decoder_.Release();
753 device_.Release();
754 device_manager_.Release();
755 MFShutdown();
756 state_ = kUninitialized;
757 }
758
759 void DXVAVideoDecodeAccelerator::NotifyInitializeDone() {
760 if (client_)
761 client_->NotifyInitializeDone();
762 }
763
764 void DXVAVideoDecodeAccelerator::NotifyInputBufferRead(int input_buffer_id) {
765 if (client_)
766 client_->NotifyEndOfBitstreamBuffer(input_buffer_id);
767 }
768
769 void DXVAVideoDecodeAccelerator::NotifyFlushDone() {
770 if (client_)
771 client_->NotifyFlushDone();
772 }
773
774 void DXVAVideoDecodeAccelerator::NotifyResetDone() {
775 if (client_)
776 client_->NotifyResetDone();
777 }
778
779 void DXVAVideoDecodeAccelerator::RequestPictureBuffers(int width, int height) {
780 // This task could execute after the decoder has been torn down.
781 if (state_ != kUninitialized && client_) {
782 client_->ProvidePictureBuffers(kNumPictureBuffers,
783 gfx::Size(width, height));
784 }
785 }
786
787 void DXVAVideoDecodeAccelerator::NotifyPictureReady(
788 const media::Picture& picture) {
789 // This task could execute after the decoder has been torn down.
790 if (state_ != kUninitialized && client_)
791 client_->PictureReady(picture);
792 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698