OLD | NEW |
| (Empty) |
1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "chrome/gpu/gpu_video_decoder_mft.h" | |
6 | |
7 #if defined(OS_WIN) | |
8 | |
9 #pragma comment(lib, "dxva2.lib") | |
10 #pragma comment(lib, "d3d9.lib") | |
11 #pragma comment(lib, "evr.lib") | |
12 #pragma comment(lib, "mf.lib") | |
13 #pragma comment(lib, "mfplat.lib") | |
14 #pragma comment(lib, "mfuuid.lib") | |
15 #pragma comment(lib, "strmiids.lib") | |
16 | |
17 GpuVideoDecoderMFT::GpuVideoDecoderMFT( | |
18 const GpuVideoDecoderInfoParam* param, | |
19 GpuChannel* channel_, | |
20 base::ProcessHandle handle) | |
21 : GpuVideoDecoder(param, channel_, handle), | |
22 state_(kNormal) { | |
23 output_transfer_buffer_busy_ = false; | |
24 pending_request_ = 0; | |
25 } | |
26 | |
27 bool GpuVideoDecoderMFT::StartupComLibraries() { | |
28 HRESULT hr; | |
29 hr = CoInitializeEx(NULL, | |
30 COINIT_APARTMENTTHREADED | COINIT_DISABLE_OLE1DDE); | |
31 if (FAILED(hr)) { | |
32 LOG(ERROR) << "CoInit fail"; | |
33 return false; | |
34 } | |
35 | |
36 hr = MFStartup(MF_VERSION, MFSTARTUP_FULL); | |
37 if (FAILED(hr)) { | |
38 LOG(ERROR) << "MFStartup fail"; | |
39 CoUninitialize(); | |
40 return false; | |
41 } | |
42 return true; | |
43 } | |
44 | |
45 void GpuVideoDecoderMFT::ShutdownComLibraries() { | |
46 HRESULT hr; | |
47 hr = MFShutdown(); | |
48 if (FAILED(hr)) { | |
49 LOG(WARNING) << "Warning: MF failed to shutdown"; | |
50 } | |
51 CoUninitialize(); | |
52 } | |
53 | |
54 // Creates a Media Foundation sample with one buffer containing a copy of the | |
55 // given Annex B stream data. | |
56 // If duration and sample_time are not known, provide 0. | |
57 // min_size specifies the minimum size of the buffer (might be required by | |
58 // the decoder for input). The times here should be given in 100ns units. | |
59 IMFSample* GpuVideoDecoderMFT::CreateInputSample(uint8* data, | |
60 int32 size, | |
61 int64 timestamp, | |
62 int64 duration, | |
63 int32 min_size) { | |
64 ScopedComPtr<IMFSample> sample; | |
65 HRESULT hr = MFCreateSample(sample.Receive()); | |
66 if (FAILED(hr) || !sample.get()) { | |
67 LOG(ERROR) << "Unable to create an empty sample"; | |
68 return NULL; | |
69 } | |
70 | |
71 ScopedComPtr<IMFMediaBuffer> buffer; | |
72 int32 buffer_length = min_size > size ? min_size : size; | |
73 hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive()); | |
74 if (FAILED(hr)) { | |
75 LOG(ERROR) << "Unable to create an empty buffer"; | |
76 return NULL; | |
77 } | |
78 | |
79 hr = sample->AddBuffer(buffer.get()); | |
80 if (FAILED(hr)) { | |
81 LOG(ERROR) << "Failed to add empty buffer to sample"; | |
82 return NULL; | |
83 } | |
84 | |
85 if (duration > 0 && FAILED(sample->SetSampleDuration(duration))) { | |
86 LOG(ERROR) << "Failed to set sample duration"; | |
87 return NULL; | |
88 } | |
89 | |
90 if (timestamp > 0 && FAILED(sample->SetSampleTime(timestamp))) { | |
91 LOG(ERROR) << "Failed to set sample time"; | |
92 return NULL; | |
93 } | |
94 | |
95 DWORD max_length, current_length; | |
96 uint8* buffer_data; | |
97 hr = buffer->Lock(&buffer_data, &max_length, ¤t_length); | |
98 if (FAILED(hr)) { | |
99 LOG(ERROR) << "Failed to lock buffer"; | |
100 return NULL; | |
101 } | |
102 CHECK_GE(static_cast<int>(max_length), size); | |
103 memcpy(buffer_data, data, size); | |
104 CHECK(SUCCEEDED(buffer->Unlock())); | |
105 | |
106 hr = buffer->SetCurrentLength(size); | |
107 if (FAILED(hr)) { | |
108 LOG(ERROR) << "Failed to set current length to " << size; | |
109 return NULL; | |
110 } | |
111 | |
112 return sample.Detach(); | |
113 } | |
114 | |
115 bool GpuVideoDecoderMFT::CreateD3DDevManager(HWND video_window) { | |
116 d3d9_.Attach(Direct3DCreate9(D3D_SDK_VERSION)); | |
117 if (d3d9_.get() == NULL) { | |
118 LOG(ERROR) << "Failed to create D3D9"; | |
119 return false; | |
120 } | |
121 | |
122 D3DPRESENT_PARAMETERS present_params = {0}; | |
123 present_params.BackBufferWidth = init_param_.width_; | |
124 present_params.BackBufferHeight = init_param_.height_; | |
125 present_params.BackBufferFormat = D3DFMT_UNKNOWN; | |
126 present_params.BackBufferCount = 1; | |
127 present_params.SwapEffect = D3DSWAPEFFECT_DISCARD; | |
128 present_params.hDeviceWindow = video_window; | |
129 present_params.Windowed = TRUE; | |
130 present_params.Flags = D3DPRESENTFLAG_VIDEO; | |
131 present_params.FullScreen_RefreshRateInHz = 0; | |
132 present_params.PresentationInterval = 0; | |
133 | |
134 // D3DCREATE_HARDWARE_VERTEXPROCESSING specifies hardware vertex processing. | |
135 // (Is it even needed for just video decoding?) | |
136 HRESULT hr = d3d9_->CreateDevice(D3DADAPTER_DEFAULT, | |
137 D3DDEVTYPE_HAL, | |
138 video_window, | |
139 D3DCREATE_HARDWARE_VERTEXPROCESSING, | |
140 &present_params, | |
141 device_.Receive()); | |
142 if (FAILED(hr)) { | |
143 LOG(ERROR) << "Failed to create D3D Device"; | |
144 return false; | |
145 } | |
146 | |
147 UINT dev_manager_reset_token = 0; | |
148 hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token, | |
149 device_manager_.Receive()); | |
150 if (FAILED(hr)) { | |
151 LOG(ERROR) << "Couldn't create D3D Device manager"; | |
152 return false; | |
153 } | |
154 | |
155 hr = device_manager_->ResetDevice(device_.get(), | |
156 dev_manager_reset_token); | |
157 if (FAILED(hr)) { | |
158 LOG(ERROR) << "Failed to set device to device manager"; | |
159 return false; | |
160 } | |
161 | |
162 return true; | |
163 } | |
164 | |
165 bool GpuVideoDecoderMFT::InitMediaFoundation() { | |
166 if (!StartupComLibraries()) | |
167 return false; | |
168 | |
169 if (!CreateD3DDevManager(GetDesktopWindow())) | |
170 return false; | |
171 | |
172 if (!InitDecoder()) | |
173 return false; | |
174 | |
175 if (!GetStreamsInfoAndBufferReqs()) | |
176 return false; | |
177 | |
178 return SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM); | |
179 } | |
180 | |
181 bool GpuVideoDecoderMFT::InitDecoder() { | |
182 // TODO(jiesun): use MFEnum to get decoder CLSID. | |
183 HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT), | |
184 NULL, | |
185 CLSCTX_INPROC_SERVER, | |
186 __uuidof(IMFTransform), | |
187 reinterpret_cast<void**>(decoder_.Receive())); | |
188 if (FAILED(hr) || !decoder_.get()) { | |
189 LOG(ERROR) << "CoCreateInstance failed " << std::hex << std::showbase << hr; | |
190 return false; | |
191 } | |
192 | |
193 if (!CheckDecoderDxvaSupport()) | |
194 return false; | |
195 | |
196 hr = decoder_->ProcessMessage( | |
197 MFT_MESSAGE_SET_D3D_MANAGER, | |
198 reinterpret_cast<ULONG_PTR>(device_manager_.get())); | |
199 if (FAILED(hr)) { | |
200 LOG(ERROR) << "Failed to set D3D9 device to decoder"; | |
201 return false; | |
202 } | |
203 | |
204 return SetDecoderMediaTypes(); | |
205 } | |
206 | |
207 bool GpuVideoDecoderMFT::CheckDecoderDxvaSupport() { | |
208 ScopedComPtr<IMFAttributes> attributes; | |
209 HRESULT hr = decoder_->GetAttributes(attributes.Receive()); | |
210 if (FAILED(hr)) { | |
211 LOG(ERROR) << "Unlock: Failed to get attributes, hr = " | |
212 << std::hex << std::showbase << hr; | |
213 return false; | |
214 } | |
215 | |
216 UINT32 dxva; | |
217 hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva); | |
218 if (FAILED(hr) || !dxva) { | |
219 LOG(ERROR) << "Failed to get DXVA attr, hr = " | |
220 << std::hex << std::showbase << hr | |
221 << "this might not be the right decoder."; | |
222 return false; | |
223 } | |
224 return true; | |
225 } | |
226 | |
227 bool GpuVideoDecoderMFT::SetDecoderMediaTypes() { | |
228 return SetDecoderInputMediaType() && | |
229 SetDecoderOutputMediaType(MFVideoFormat_NV12); | |
230 } | |
231 | |
232 bool GpuVideoDecoderMFT::SetDecoderInputMediaType() { | |
233 ScopedComPtr<IMFMediaType> media_type; | |
234 HRESULT hr = MFCreateMediaType(media_type.Receive()); | |
235 if (FAILED(hr)) { | |
236 LOG(ERROR) << "Failed to create empty media type object"; | |
237 return false; | |
238 } | |
239 | |
240 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); | |
241 if (FAILED(hr)) { | |
242 LOG(ERROR) << "SetGUID for major type failed"; | |
243 return false; | |
244 } | |
245 | |
246 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264); | |
247 if (FAILED(hr)) { | |
248 LOG(ERROR) << "SetGUID for subtype failed"; | |
249 return false; | |
250 } | |
251 | |
252 hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags | |
253 if (FAILED(hr)) { | |
254 LOG(ERROR) << "Failed to set decoder's input type"; | |
255 return false; | |
256 } | |
257 | |
258 return true; | |
259 } | |
260 | |
261 bool GpuVideoDecoderMFT::SetDecoderOutputMediaType(const GUID subtype) { | |
262 DWORD i = 0; | |
263 IMFMediaType* out_media_type; | |
264 bool found = false; | |
265 while (SUCCEEDED(decoder_->GetOutputAvailableType(0, i, &out_media_type))) { | |
266 GUID out_subtype; | |
267 HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype); | |
268 if (FAILED(hr)) { | |
269 LOG(ERROR) << "Failed to GetGUID() on GetOutputAvailableType() " << i; | |
270 out_media_type->Release(); | |
271 continue; | |
272 } | |
273 if (out_subtype == subtype) { | |
274 hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags | |
275 if (FAILED(hr)) { | |
276 LOG(ERROR) << "Failed to SetOutputType to |subtype| or obtain " | |
277 << "width/height/stride " << std::hex << hr; | |
278 } else { | |
279 out_media_type->Release(); | |
280 return true; | |
281 } | |
282 } | |
283 i++; | |
284 out_media_type->Release(); | |
285 } | |
286 return false; | |
287 } | |
288 | |
289 bool GpuVideoDecoderMFT::SendMFTMessage(MFT_MESSAGE_TYPE msg) { | |
290 HRESULT hr = decoder_->ProcessMessage(msg, NULL); | |
291 return SUCCEEDED(hr); | |
292 } | |
293 | |
294 // Prints out info about the input/output streams, gets the minimum buffer sizes | |
295 // for input and output samples. | |
296 // The MFT will not allocate buffer for neither input nor output, so we have | |
297 // to do it ourselves and make sure they're the correct size. | |
298 // Exception is when dxva is enabled, the decoder will allocate output. | |
299 bool GpuVideoDecoderMFT::GetStreamsInfoAndBufferReqs() { | |
300 HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_); | |
301 if (FAILED(hr)) { | |
302 LOG(ERROR) << "Failed to get input stream info"; | |
303 return false; | |
304 } | |
305 LOG(INFO) << "Input stream info: "; | |
306 LOG(INFO) << "Max latency: " << input_stream_info_.hnsMaxLatency; | |
307 | |
308 // There should be three flags, one for requiring a whole frame be in a | |
309 // single sample, one for requiring there be one buffer only in a single | |
310 // sample, and one that specifies a fixed sample size. (as in cbSize) | |
311 LOG(INFO) << "Flags: " | |
312 << std::hex << std::showbase << input_stream_info_.dwFlags; | |
313 CHECK_EQ(static_cast<int>(input_stream_info_.dwFlags), 0x7); | |
314 LOG(INFO) << "Min buffer size: " << input_stream_info_.cbSize; | |
315 LOG(INFO) << "Max lookahead: " << input_stream_info_.cbMaxLookahead; | |
316 LOG(INFO) << "Alignment: " << input_stream_info_.cbAlignment; | |
317 if (input_stream_info_.cbAlignment > 0) { | |
318 LOG(WARNING) << "Warning: Decoder requires input to be aligned"; | |
319 } | |
320 | |
321 hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_); | |
322 if (FAILED(hr)) { | |
323 LOG(ERROR) << "Failed to get output stream info"; | |
324 return false; | |
325 } | |
326 LOG(INFO) << "Output stream info: "; | |
327 | |
328 // The flags here should be the same and mean the same thing, except when | |
329 // DXVA is enabled, there is an extra 0x100 flag meaning decoder will | |
330 // allocate its own sample. | |
331 CHECK_EQ(static_cast<int>(output_stream_info_.dwFlags), 0x107); | |
332 LOG(INFO) << "Min buffer size: " << output_stream_info_.cbSize; | |
333 LOG(INFO) << "Alignment: " << output_stream_info_.cbAlignment; | |
334 if (output_stream_info_.cbAlignment > 0) { | |
335 LOG(WARNING) << "Warning: Decoder requires output to be aligned"; | |
336 } | |
337 | |
338 return true; | |
339 } | |
340 | |
341 bool GpuVideoDecoderMFT::DoInitialize( | |
342 const GpuVideoDecoderInitParam& param, | |
343 GpuVideoDecoderInitDoneParam* done_param) { | |
344 LOG(ERROR) << "GpuVideoDecoderMFT::DoInitialize"; | |
345 | |
346 done_param->format_ = | |
347 GpuVideoDecoderInitDoneParam::SurfaceFormat_YV12; | |
348 done_param->surface_type_ = | |
349 GpuVideoDecoderInitDoneParam::SurfaceTypeSystemMemory; | |
350 done_param->input_buffer_handle_ = base::SharedMemory::NULLHandle(); | |
351 done_param->output_buffer_handle_ = base::SharedMemory::NULLHandle(); | |
352 | |
353 do { | |
354 done_param->success_ = false; | |
355 | |
356 if (!InitMediaFoundation()) | |
357 break; | |
358 | |
359 // TODO(jiesun): Check the assumption of input size < original size. | |
360 done_param->input_buffer_size_ = param.width_ * param.height_ * 3 / 2; | |
361 input_transfer_buffer_.reset(new base::SharedMemory); | |
362 if (!input_transfer_buffer_->Create(std::wstring(), false, false, | |
363 done_param->input_buffer_size_)) | |
364 break; | |
365 if (!input_transfer_buffer_->Map(done_param->input_buffer_size_)) | |
366 break; | |
367 | |
368 // TODO(jiesun): Allocate this according to the surface format. | |
369 // The format actually could change during streaming, we need to | |
370 // notify GpuVideoDecoderHost side when this happened and renegotiate | |
371 // the transfer buffer. | |
372 done_param->output_buffer_size_ = param.width_ * param.height_ * 3 / 2; | |
373 output_transfer_buffer_.reset(new base::SharedMemory); | |
374 if (!output_transfer_buffer_->Create(std::wstring(), false, false, | |
375 done_param->output_buffer_size_)) | |
376 break; | |
377 if (!output_transfer_buffer_->Map(done_param->output_buffer_size_)) | |
378 break; | |
379 | |
380 if (!input_transfer_buffer_->ShareToProcess( | |
381 renderer_handle_, | |
382 &done_param->input_buffer_handle_)) | |
383 break; | |
384 if (!output_transfer_buffer_->ShareToProcess( | |
385 renderer_handle_, | |
386 &done_param->output_buffer_handle_)) | |
387 break; | |
388 | |
389 done_param->success_ = true; | |
390 } while (0); | |
391 | |
392 SendInitializeDone(*done_param); | |
393 return true; | |
394 } | |
395 | |
396 bool GpuVideoDecoderMFT::DoUninitialize() { | |
397 LOG(ERROR) << "GpuVideoDecoderMFT::DoUninitialize"; | |
398 SendUninitializeDone(); | |
399 return true; | |
400 } | |
401 | |
402 void GpuVideoDecoderMFT::DoEmptyThisBuffer( | |
403 const GpuVideoDecoderInputBufferParam& buffer) { | |
404 LOG(ERROR) << "GpuVideoDecoderMFT::EmptyThisBuffer"; | |
405 | |
406 CHECK(input_transfer_buffer_->memory()); | |
407 ScopedComPtr<IMFSample> sample; | |
408 if (buffer.size_) { | |
409 uint8* data = static_cast<uint8*>(input_transfer_buffer_->memory()); | |
410 sample.Attach(CreateInputSample(data, | |
411 buffer.size_, | |
412 buffer.timestamp_*10, | |
413 0LL, | |
414 input_stream_info_.cbSize)); | |
415 CHECK(sample.get()); | |
416 } else { | |
417 state_ = kEosFlush; | |
418 } | |
419 | |
420 input_buffer_queue_.push_back(sample); | |
421 SendEmptyBufferACK(); | |
422 | |
423 while (pending_request_) | |
424 if (!DoDecode()) break; | |
425 } | |
426 | |
427 void GpuVideoDecoderMFT::DoFillThisBuffer( | |
428 const GpuVideoDecoderOutputBufferParam& frame) { | |
429 LOG(ERROR) << "GpuVideoDecoderMFT::FillThisBuffer"; | |
430 | |
431 pending_request_++; | |
432 while (pending_request_) | |
433 if (!DoDecode()) break; | |
434 } | |
435 | |
436 void GpuVideoDecoderMFT::DoFillThisBufferDoneACK() { | |
437 output_transfer_buffer_busy_ = false; | |
438 pending_request_--; | |
439 while (pending_request_) | |
440 if (!DoDecode()) break; | |
441 } | |
442 | |
443 void GpuVideoDecoderMFT::DoFlush() { | |
444 state_ = kFlushing; | |
445 | |
446 while (!input_buffer_queue_.empty()) | |
447 input_buffer_queue_.pop_front(); | |
448 pending_request_ = 0; | |
449 // TODO(jiesun): this is wrong?? | |
450 output_transfer_buffer_busy_ = false; | |
451 SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH); | |
452 | |
453 state_ = kNormal; | |
454 SendFlushDone(); | |
455 } | |
456 | |
457 bool GpuVideoDecoderMFT::DoDecode() { | |
458 if (state_ != kNormal && state_ != kEosFlush) return false; | |
459 if (output_transfer_buffer_busy_) return false; | |
460 | |
461 MFT_OUTPUT_DATA_BUFFER output_data_buffer; | |
462 memset(&output_data_buffer, 0, sizeof(output_data_buffer)); | |
463 output_data_buffer.dwStreamID = 0; | |
464 | |
465 ScopedComPtr<IMFSample> output_sample; | |
466 DWORD status; | |
467 HRESULT hr = decoder_->ProcessOutput(0, // No flags | |
468 1, // # of out streams to pull from | |
469 &output_data_buffer, | |
470 &status); | |
471 | |
472 IMFCollection* events = output_data_buffer.pEvents; | |
473 if (events != NULL) { | |
474 LOG(INFO) << "Got events from ProcessOuput, but discarding"; | |
475 events->Release(); | |
476 } | |
477 | |
478 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { | |
479 hr = SetDecoderOutputMediaType(MFVideoFormat_NV12); | |
480 CHECK(SUCCEEDED(hr)); | |
481 return true; | |
482 } | |
483 if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { | |
484 if (input_buffer_queue_.empty()) { | |
485 if (state_ == kEosFlush) { | |
486 GpuVideoDecoderOutputBufferParam output_param; | |
487 output_param.timestamp_ = 0; | |
488 output_param.duration_ = 0; | |
489 output_param.flags_ = | |
490 GpuVideoDecoderOutputBufferParam::kFlagsEndOfStream; | |
491 output_transfer_buffer_busy_ = true; | |
492 SendFillBufferDone(output_param); | |
493 } | |
494 return false; | |
495 } | |
496 while (!input_buffer_queue_.empty()) { | |
497 ScopedComPtr<IMFSample> input_sample = input_buffer_queue_.front(); | |
498 input_buffer_queue_.pop_front(); | |
499 | |
500 if (input_sample.get()) { | |
501 HRESULT hr = decoder_->ProcessInput(0, input_sample.get(), 0); | |
502 if (hr == MF_E_NOTACCEPTING) return true; | |
503 CHECK(SUCCEEDED(hr)); | |
504 } else { | |
505 SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM); | |
506 } | |
507 | |
508 // If we already received the input EOS, we do not need to issue | |
509 // more requests for new samples. | |
510 if (state_ != kEosFlush) | |
511 SendEmptyBufferDone(); | |
512 } | |
513 return true; | |
514 } | |
515 | |
516 CHECK(SUCCEEDED(hr)); | |
517 output_sample.Attach(output_data_buffer.pSample); | |
518 CHECK(output_sample.get()); | |
519 | |
520 int64 timestamp, duration; | |
521 output_sample->GetSampleTime(×tamp); | |
522 output_sample->GetSampleDuration(&duration); | |
523 | |
524 // The duration and timestamps are in 100-ns units, so divide by 10 | |
525 // to convert to microseconds. | |
526 timestamp /= 10; | |
527 duration /= 10; | |
528 | |
529 // Sanity checks for checking if there is really something in the sample. | |
530 DWORD buf_count; | |
531 hr = output_sample->GetBufferCount(&buf_count); | |
532 CHECK(SUCCEEDED(hr) && buf_count == 1); | |
533 | |
534 ScopedComPtr<IMFMediaBuffer> output_buffer; | |
535 hr = output_sample->GetBufferByIndex(0, output_buffer.Receive()); | |
536 CHECK(SUCCEEDED(hr)); | |
537 | |
538 ScopedComPtr<IDirect3DSurface9> surface; | |
539 hr = MFGetService(output_buffer, MR_BUFFER_SERVICE, | |
540 IID_PPV_ARGS(surface.Receive())); | |
541 CHECK(SUCCEEDED(hr)); | |
542 | |
543 | |
544 // NV12 to YV12 | |
545 D3DLOCKED_RECT d3dlocked_rect; | |
546 RECT rect = {0, 0, init_param_.width_, init_param_.height_}; | |
547 hr = surface->LockRect(&d3dlocked_rect, &rect, 0); | |
548 | |
549 if (SUCCEEDED(hr)) { | |
550 D3DSURFACE_DESC desc; | |
551 hr = surface->GetDesc(&desc); | |
552 CHECK(SUCCEEDED(hr)); | |
553 | |
554 uint32 src_stride = d3dlocked_rect.Pitch; | |
555 uint32 dst_stride = init_param_.width_; | |
556 uint8* src_y = static_cast<uint8*>(d3dlocked_rect.pBits); | |
557 uint8* src_uv = src_y + src_stride * desc.Height; | |
558 uint8* dst_y = static_cast<uint8*>(output_transfer_buffer_->memory()); | |
559 uint8* dst_u = dst_y + dst_stride * init_param_.height_; | |
560 uint8* dst_v = dst_u + dst_stride * init_param_.height_ / 4; | |
561 | |
562 for ( int y = 0 ; y < init_param_.height_; ++y ) { | |
563 for ( int x = 0 ; x < init_param_.width_ ; ++x ) { | |
564 dst_y[x] = src_y[x]; | |
565 if (!(y & 1)) { | |
566 if (x & 1) | |
567 dst_v[x>>1] = src_uv[x]; | |
568 else | |
569 dst_u[x>>1] = src_uv[x]; | |
570 } | |
571 } | |
572 dst_y += dst_stride; | |
573 src_y += src_stride; | |
574 if (!(y & 1)) { | |
575 src_uv += src_stride; | |
576 dst_v += dst_stride >> 1; | |
577 dst_u += dst_stride >> 1; | |
578 } | |
579 } | |
580 hr = surface->UnlockRect(); | |
581 CHECK(SUCCEEDED(hr)); | |
582 } | |
583 | |
584 GpuVideoDecoderOutputBufferParam output_param; | |
585 output_param.timestamp_ = timestamp; | |
586 output_param.duration_ = duration; | |
587 output_param.flags_ = 0; | |
588 output_transfer_buffer_busy_ = true; | |
589 SendFillBufferDone(output_param); | |
590 return true; | |
591 } | |
592 | |
593 #endif | |
594 | |
OLD | NEW |