OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/media/dxva_video_decode_accelerator_win.h" | |
6 | |
7 #include <memory> | |
8 | |
9 #if !defined(OS_WIN) | |
10 #error This file should only be built on Windows. | |
11 #endif // !defined(OS_WIN) | |
12 | |
13 #include <codecapi.h> | |
14 #include <dxgi1_2.h> | |
15 #include <ks.h> | |
16 #include <mfapi.h> | |
17 #include <mferror.h> | |
18 #include <ntverp.h> | |
19 #include <stddef.h> | |
20 #include <string.h> | |
21 #include <wmcodecdsp.h> | |
22 | |
23 #include "base/base_paths_win.h" | |
24 #include "base/bind.h" | |
25 #include "base/callback.h" | |
26 #include "base/debug/alias.h" | |
27 #include "base/file_version_info.h" | |
28 #include "base/files/file_path.h" | |
29 #include "base/logging.h" | |
30 #include "base/macros.h" | |
31 #include "base/memory/shared_memory.h" | |
32 #include "base/message_loop/message_loop.h" | |
33 #include "base/path_service.h" | |
34 #include "base/trace_event/trace_event.h" | |
35 #include "base/win/windows_version.h" | |
36 #include "build/build_config.h" | |
37 #include "media/base/win/mf_initializer.h" | |
38 #include "media/video/video_decode_accelerator.h" | |
39 #include "third_party/angle/include/EGL/egl.h" | |
40 #include "third_party/angle/include/EGL/eglext.h" | |
41 #include "ui/gl/gl_bindings.h" | |
42 #include "ui/gl/gl_context.h" | |
43 #include "ui/gl/gl_fence.h" | |
44 #include "ui/gl/gl_surface_egl.h" | |
45 | |
46 namespace { | |
47 | |
48 // Path is appended on to the PROGRAM_FILES base path. | |
49 const wchar_t kVPXDecoderDLLPath[] = L"Intel\\Media SDK\\"; | |
50 | |
51 const wchar_t kVP8DecoderDLLName[] = | |
52 #if defined(ARCH_CPU_X86) | |
53 L"mfx_mft_vp8vd_32.dll"; | |
54 #elif defined(ARCH_CPU_X86_64) | |
55 L"mfx_mft_vp8vd_64.dll"; | |
56 #else | |
57 #error Unsupported Windows CPU Architecture | |
58 #endif | |
59 | |
60 const wchar_t kVP9DecoderDLLName[] = | |
61 #if defined(ARCH_CPU_X86) | |
62 L"mfx_mft_vp9vd_32.dll"; | |
63 #elif defined(ARCH_CPU_X86_64) | |
64 L"mfx_mft_vp9vd_64.dll"; | |
65 #else | |
66 #error Unsupported Windows CPU Architecture | |
67 #endif | |
68 | |
69 const CLSID CLSID_WebmMfVp8Dec = { | |
70 0x451e3cb7, | |
71 0x2622, | |
72 0x4ba5, | |
73 { 0x8e, 0x1d, 0x44, 0xb3, 0xc4, 0x1d, 0x09, 0x24 } | |
74 }; | |
75 | |
76 const CLSID CLSID_WebmMfVp9Dec = { | |
77 0x07ab4bd2, | |
78 0x1979, | |
79 0x4fcd, | |
80 { 0xa6, 0x97, 0xdf, 0x9a, 0xd1, 0x5b, 0x34, 0xfe } | |
81 }; | |
82 | |
83 const CLSID MEDIASUBTYPE_VP80 = { | |
84 0x30385056, | |
85 0x0000, | |
86 0x0010, | |
87 { 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 } | |
88 }; | |
89 | |
90 const CLSID MEDIASUBTYPE_VP90 = { | |
91 0x30395056, | |
92 0x0000, | |
93 0x0010, | |
94 { 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 } | |
95 }; | |
96 | |
97 // The CLSID of the video processor media foundation transform which we use for | |
98 // texture color conversion in DX11. | |
99 // Defined in mfidl.h in the Windows 10 SDK. ntverp.h provides VER_PRODUCTBUILD | |
100 // to detect which SDK we are compiling with. | |
101 #if VER_PRODUCTBUILD < 10011 // VER_PRODUCTBUILD for 10.0.10158.0 SDK. | |
102 DEFINE_GUID(CLSID_VideoProcessorMFT, | |
103 0x88753b26, 0x5b24, 0x49bd, 0xb2, 0xe7, 0xc, 0x44, 0x5c, 0x78, | |
104 0xc9, 0x82); | |
105 #endif | |
106 | |
107 // MF_XVP_PLAYBACK_MODE | |
108 // Data type: UINT32 (treat as BOOL) | |
109 // If this attribute is TRUE, the video processor will run in playback mode | |
110 // where it allows callers to allocate output samples and allows last frame | |
111 // regeneration (repaint). | |
112 DEFINE_GUID(MF_XVP_PLAYBACK_MODE, 0x3c5d293f, 0xad67, 0x4e29, 0xaf, 0x12, | |
113 0xcf, 0x3e, 0x23, 0x8a, 0xcc, 0xe9); | |
114 | |
115 // Defines the GUID for the Intel H264 DXVA device. | |
116 static const GUID DXVA2_Intel_ModeH264_E = { | |
117 0x604F8E68, 0x4951, 0x4c54,{ 0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6} | |
118 }; | |
119 | |
120 // R600, R700, Evergreen and Cayman AMD cards. These support DXVA via UVD3 | |
121 // or earlier, and don't handle resolutions higher than 1920 x 1088 well. | |
122 static const DWORD g_AMDUVD3GPUList[] = { | |
123 0x9400, 0x9401, 0x9402, 0x9403, 0x9405, 0x940a, 0x940b, 0x940f, 0x94c0, | |
124 0x94c1, 0x94c3, 0x94c4, 0x94c5, 0x94c6, 0x94c7, 0x94c8, 0x94c9, 0x94cb, | |
125 0x94cc, 0x94cd, 0x9580, 0x9581, 0x9583, 0x9586, 0x9587, 0x9588, 0x9589, | |
126 0x958a, 0x958b, 0x958c, 0x958d, 0x958e, 0x958f, 0x9500, 0x9501, 0x9504, | |
127 0x9505, 0x9506, 0x9507, 0x9508, 0x9509, 0x950f, 0x9511, 0x9515, 0x9517, | |
128 0x9519, 0x95c0, 0x95c2, 0x95c4, 0x95c5, 0x95c6, 0x95c7, 0x95c9, 0x95cc, | |
129 0x95cd, 0x95ce, 0x95cf, 0x9590, 0x9591, 0x9593, 0x9595, 0x9596, 0x9597, | |
130 0x9598, 0x9599, 0x959b, 0x9610, 0x9611, 0x9612, 0x9613, 0x9614, 0x9615, | |
131 0x9616, 0x9710, 0x9711, 0x9712, 0x9713, 0x9714, 0x9715, 0x9440, 0x9441, | |
132 0x9442, 0x9443, 0x9444, 0x9446, 0x944a, 0x944b, 0x944c, 0x944e, 0x9450, | |
133 0x9452, 0x9456, 0x945a, 0x945b, 0x945e, 0x9460, 0x9462, 0x946a, 0x946b, | |
134 0x947a, 0x947b, 0x9480, 0x9487, 0x9488, 0x9489, 0x948a, 0x948f, 0x9490, | |
135 0x9491, 0x9495, 0x9498, 0x949c, 0x949e, 0x949f, 0x9540, 0x9541, 0x9542, | |
136 0x954e, 0x954f, 0x9552, 0x9553, 0x9555, 0x9557, 0x955f, 0x94a0, 0x94a1, | |
137 0x94a3, 0x94b1, 0x94b3, 0x94b4, 0x94b5, 0x94b9, 0x68e0, 0x68e1, 0x68e4, | |
138 0x68e5, 0x68e8, 0x68e9, 0x68f1, 0x68f2, 0x68f8, 0x68f9, 0x68fa, 0x68fe, | |
139 0x68c0, 0x68c1, 0x68c7, 0x68c8, 0x68c9, 0x68d8, 0x68d9, 0x68da, 0x68de, | |
140 0x68a0, 0x68a1, 0x68a8, 0x68a9, 0x68b0, 0x68b8, 0x68b9, 0x68ba, 0x68be, | |
141 0x68bf, 0x6880, 0x6888, 0x6889, 0x688a, 0x688c, 0x688d, 0x6898, 0x6899, | |
142 0x689b, 0x689e, 0x689c, 0x689d, 0x9802, 0x9803, 0x9804, 0x9805, 0x9806, | |
143 0x9807, 0x9808, 0x9809, 0x980a, 0x9640, 0x9641, 0x9647, 0x9648, 0x964a, | |
144 0x964b, 0x964c, 0x964e, 0x964f, 0x9642, 0x9643, 0x9644, 0x9645, 0x9649, | |
145 0x6720, 0x6721, 0x6722, 0x6723, 0x6724, 0x6725, 0x6726, 0x6727, 0x6728, | |
146 0x6729, 0x6738, 0x6739, 0x673e, 0x6740, 0x6741, 0x6742, 0x6743, 0x6744, | |
147 0x6745, 0x6746, 0x6747, 0x6748, 0x6749, 0x674a, 0x6750, 0x6751, 0x6758, | |
148 0x6759, 0x675b, 0x675d, 0x675f, 0x6840, 0x6841, 0x6842, 0x6843, 0x6849, | |
149 0x6850, 0x6858, 0x6859, 0x6760, 0x6761, 0x6762, 0x6763, 0x6764, 0x6765, | |
150 0x6766, 0x6767, 0x6768, 0x6770, 0x6771, 0x6772, 0x6778, 0x6779, 0x677b, | |
151 0x6700, 0x6701, 0x6702, 0x6703, 0x6704, 0x6705, 0x6706, 0x6707, 0x6708, | |
152 0x6709, 0x6718, 0x6719, 0x671c, 0x671d, 0x671f, 0x683D, 0x9900, 0x9901, | |
153 0x9903, 0x9904, 0x9905, 0x9906, 0x9907, 0x9908, 0x9909, 0x990a, 0x990b, | |
154 0x990c, 0x990d, 0x990e, 0x990f, 0x9910, 0x9913, 0x9917, 0x9918, 0x9919, | |
155 0x9990, 0x9991, 0x9992, 0x9993, 0x9994, 0x9995, 0x9996, 0x9997, 0x9998, | |
156 0x9999, 0x999a, 0x999b, 0x999c, 0x999d, 0x99a0, 0x99a2, 0x99a4, | |
157 }; | |
158 | |
159 // Legacy Intel GPUs (Second generation) which have trouble with resolutions | |
160 // higher than 1920 x 1088 | |
161 static const DWORD g_IntelLegacyGPUList[] = { | |
162 0x102, 0x106, 0x116, 0x126, | |
163 }; | |
164 | |
165 // Provides scoped access to the underlying buffer in an IMFMediaBuffer | |
166 // instance. | |
167 class MediaBufferScopedPointer { | |
168 public: | |
169 MediaBufferScopedPointer(IMFMediaBuffer* media_buffer) | |
170 : media_buffer_(media_buffer), | |
171 buffer_(nullptr), | |
172 max_length_(0), | |
173 current_length_(0) { | |
174 HRESULT hr = media_buffer_->Lock(&buffer_, &max_length_, ¤t_length_); | |
175 CHECK(SUCCEEDED(hr)); | |
176 } | |
177 | |
178 ~MediaBufferScopedPointer() { | |
179 HRESULT hr = media_buffer_->Unlock(); | |
180 CHECK(SUCCEEDED(hr)); | |
181 } | |
182 | |
183 uint8_t* get() { | |
184 return buffer_; | |
185 } | |
186 | |
187 DWORD current_length() const { | |
188 return current_length_; | |
189 } | |
190 | |
191 private: | |
192 base::win::ScopedComPtr<IMFMediaBuffer> media_buffer_; | |
193 uint8_t* buffer_; | |
194 DWORD max_length_; | |
195 DWORD current_length_; | |
196 | |
197 DISALLOW_COPY_AND_ASSIGN(MediaBufferScopedPointer); | |
198 }; | |
199 | |
200 } // namespace | |
201 | |
202 namespace content { | |
203 | |
204 static const media::VideoCodecProfile kSupportedProfiles[] = { | |
205 media::H264PROFILE_BASELINE, | |
206 media::H264PROFILE_MAIN, | |
207 media::H264PROFILE_HIGH, | |
208 media::VP8PROFILE_ANY, | |
209 media::VP9PROFILE_PROFILE0, | |
210 media::VP9PROFILE_PROFILE1, | |
211 media::VP9PROFILE_PROFILE2, | |
212 media::VP9PROFILE_PROFILE3 | |
213 }; | |
214 | |
215 CreateDXGIDeviceManager DXVAVideoDecodeAccelerator::create_dxgi_device_manager_ | |
216 = NULL; | |
217 | |
218 #define RETURN_ON_FAILURE(result, log, ret) \ | |
219 do { \ | |
220 if (!(result)) { \ | |
221 DLOG(ERROR) << log; \ | |
222 return ret; \ | |
223 } \ | |
224 } while (0) | |
225 | |
226 #define RETURN_ON_HR_FAILURE(result, log, ret) \ | |
227 RETURN_ON_FAILURE(SUCCEEDED(result), \ | |
228 log << ", HRESULT: 0x" << std::hex << result, \ | |
229 ret); | |
230 | |
231 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \ | |
232 do { \ | |
233 if (!(result)) { \ | |
234 DVLOG(1) << log; \ | |
235 StopOnError(error_code); \ | |
236 return ret; \ | |
237 } \ | |
238 } while (0) | |
239 | |
240 #define RETURN_AND_NOTIFY_ON_HR_FAILURE(result, log, error_code, ret) \ | |
241 RETURN_AND_NOTIFY_ON_FAILURE(SUCCEEDED(result), \ | |
242 log << ", HRESULT: 0x" << std::hex << result, \ | |
243 error_code, ret); | |
244 | |
245 enum { | |
246 // Maximum number of iterations we allow before aborting the attempt to flush | |
247 // the batched queries to the driver and allow torn/corrupt frames to be | |
248 // rendered. | |
249 kFlushDecoderSurfaceTimeoutMs = 1, | |
250 // Maximum iterations where we try to flush the d3d device. | |
251 kMaxIterationsForD3DFlush = 4, | |
252 // Maximum iterations where we try to flush the ANGLE device before reusing | |
253 // the texture. | |
254 kMaxIterationsForANGLEReuseFlush = 16, | |
255 // We only request 5 picture buffers from the client which are used to hold | |
256 // the decoded samples. These buffers are then reused when the client tells | |
257 // us that it is done with the buffer. | |
258 kNumPictureBuffers = 5, | |
259 // The keyed mutex should always be released before the other thread | |
260 // attempts to acquire it, so AcquireSync should always return immediately. | |
261 kAcquireSyncWaitMs = 0, | |
262 }; | |
263 | |
264 static IMFSample* CreateEmptySample() { | |
265 base::win::ScopedComPtr<IMFSample> sample; | |
266 HRESULT hr = MFCreateSample(sample.Receive()); | |
267 RETURN_ON_HR_FAILURE(hr, "MFCreateSample failed", NULL); | |
268 return sample.Detach(); | |
269 } | |
270 | |
271 // Creates a Media Foundation sample with one buffer of length |buffer_length| | |
272 // on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0. | |
273 static IMFSample* CreateEmptySampleWithBuffer(uint32_t buffer_length, | |
274 int align) { | |
275 CHECK_GT(buffer_length, 0U); | |
276 | |
277 base::win::ScopedComPtr<IMFSample> sample; | |
278 sample.Attach(CreateEmptySample()); | |
279 | |
280 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
281 HRESULT hr = E_FAIL; | |
282 if (align == 0) { | |
283 // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer | |
284 // with the align argument being 0. | |
285 hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive()); | |
286 } else { | |
287 hr = MFCreateAlignedMemoryBuffer(buffer_length, | |
288 align - 1, | |
289 buffer.Receive()); | |
290 } | |
291 RETURN_ON_HR_FAILURE(hr, "Failed to create memory buffer for sample", NULL); | |
292 | |
293 hr = sample->AddBuffer(buffer.get()); | |
294 RETURN_ON_HR_FAILURE(hr, "Failed to add buffer to sample", NULL); | |
295 | |
296 buffer->SetCurrentLength(0); | |
297 return sample.Detach(); | |
298 } | |
299 | |
300 // Creates a Media Foundation sample with one buffer containing a copy of the | |
301 // given Annex B stream data. | |
302 // If duration and sample time are not known, provide 0. | |
303 // |min_size| specifies the minimum size of the buffer (might be required by | |
304 // the decoder for input). If no alignment is required, provide 0. | |
305 static IMFSample* CreateInputSample(const uint8_t* stream, | |
306 uint32_t size, | |
307 uint32_t min_size, | |
308 int alignment) { | |
309 CHECK(stream); | |
310 CHECK_GT(size, 0U); | |
311 base::win::ScopedComPtr<IMFSample> sample; | |
312 sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size), | |
313 alignment)); | |
314 RETURN_ON_FAILURE(sample.get(), "Failed to create empty sample", NULL); | |
315 | |
316 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
317 HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive()); | |
318 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample", NULL); | |
319 | |
320 DWORD max_length = 0; | |
321 DWORD current_length = 0; | |
322 uint8_t* destination = NULL; | |
323 hr = buffer->Lock(&destination, &max_length, ¤t_length); | |
324 RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL); | |
325 | |
326 CHECK_EQ(current_length, 0u); | |
327 CHECK_GE(max_length, size); | |
328 memcpy(destination, stream, size); | |
329 | |
330 hr = buffer->SetCurrentLength(size); | |
331 RETURN_ON_HR_FAILURE(hr, "Failed to set buffer length", NULL); | |
332 | |
333 hr = buffer->Unlock(); | |
334 RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", NULL); | |
335 | |
336 return sample.Detach(); | |
337 } | |
338 | |
339 // Helper function to create a COM object instance from a DLL. The alternative | |
340 // is to use the CoCreateInstance API which requires the COM apartment to be | |
341 // initialized which is not the case on the GPU main thread. We want to avoid | |
342 // initializing COM as it may have sideeffects. | |
343 HRESULT CreateCOMObjectFromDll(HMODULE dll, const CLSID& clsid, const IID& iid, | |
344 void** object) { | |
345 if (!dll || !object) | |
346 return E_INVALIDARG; | |
347 | |
348 using GetClassObject = HRESULT (WINAPI*)( | |
349 const CLSID& clsid, const IID& iid, void** object); | |
350 | |
351 GetClassObject get_class_object = reinterpret_cast<GetClassObject>( | |
352 GetProcAddress(dll, "DllGetClassObject")); | |
353 RETURN_ON_FAILURE( | |
354 get_class_object, "Failed to get DllGetClassObject pointer", E_FAIL); | |
355 | |
356 base::win::ScopedComPtr<IClassFactory> factory; | |
357 HRESULT hr = get_class_object( | |
358 clsid, | |
359 __uuidof(IClassFactory), | |
360 factory.ReceiveVoid()); | |
361 RETURN_ON_HR_FAILURE(hr, "DllGetClassObject failed", hr); | |
362 | |
363 hr = factory->CreateInstance(NULL, iid, object); | |
364 return hr; | |
365 } | |
366 | |
367 // Helper function to query the ANGLE device object. The template argument T | |
368 // identifies the device interface being queried. IDirect3DDevice9Ex for d3d9 | |
369 // and ID3D11Device for dx11. | |
370 template<class T> | |
371 base::win::ScopedComPtr<T> QueryDeviceObjectFromANGLE(int object_type) { | |
372 base::win::ScopedComPtr<T> device_object; | |
373 | |
374 EGLDisplay egl_display = nullptr; | |
375 intptr_t egl_device = 0; | |
376 intptr_t device = 0; | |
377 | |
378 { | |
379 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. GetHardwareDisplay"); | |
380 egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay(); | |
381 } | |
382 | |
383 RETURN_ON_FAILURE( | |
384 gfx::GLSurfaceEGL::HasEGLExtension("EGL_EXT_device_query"), | |
385 "EGL_EXT_device_query missing", | |
386 device_object); | |
387 | |
388 PFNEGLQUERYDISPLAYATTRIBEXTPROC QueryDisplayAttribEXT = nullptr; | |
389 | |
390 { | |
391 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. eglGetProcAddress"); | |
392 | |
393 QueryDisplayAttribEXT = | |
394 reinterpret_cast<PFNEGLQUERYDISPLAYATTRIBEXTPROC>(eglGetProcAddress( | |
395 "eglQueryDisplayAttribEXT")); | |
396 | |
397 RETURN_ON_FAILURE( | |
398 QueryDisplayAttribEXT, | |
399 "Failed to get the eglQueryDisplayAttribEXT function from ANGLE", | |
400 device_object); | |
401 } | |
402 | |
403 PFNEGLQUERYDEVICEATTRIBEXTPROC QueryDeviceAttribEXT = nullptr; | |
404 | |
405 { | |
406 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. eglGetProcAddress"); | |
407 | |
408 QueryDeviceAttribEXT = | |
409 reinterpret_cast<PFNEGLQUERYDEVICEATTRIBEXTPROC>(eglGetProcAddress( | |
410 "eglQueryDeviceAttribEXT")); | |
411 | |
412 RETURN_ON_FAILURE( | |
413 QueryDeviceAttribEXT, | |
414 "Failed to get the eglQueryDeviceAttribEXT function from ANGLE", | |
415 device_object); | |
416 } | |
417 | |
418 { | |
419 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. QueryDisplayAttribEXT"); | |
420 | |
421 RETURN_ON_FAILURE( | |
422 QueryDisplayAttribEXT(egl_display, EGL_DEVICE_EXT, &egl_device), | |
423 "The eglQueryDisplayAttribEXT function failed to get the EGL device", | |
424 device_object); | |
425 } | |
426 | |
427 RETURN_ON_FAILURE( | |
428 egl_device, | |
429 "Failed to get the EGL device", | |
430 device_object); | |
431 | |
432 { | |
433 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. QueryDisplayAttribEXT"); | |
434 | |
435 RETURN_ON_FAILURE( | |
436 QueryDeviceAttribEXT( | |
437 reinterpret_cast<EGLDeviceEXT>(egl_device), object_type, &device), | |
438 "The eglQueryDeviceAttribEXT function failed to get the device", | |
439 device_object); | |
440 | |
441 RETURN_ON_FAILURE(device, "Failed to get the ANGLE device", device_object); | |
442 } | |
443 | |
444 device_object = reinterpret_cast<T*>(device); | |
445 return device_object; | |
446 } | |
447 | |
448 H264ConfigChangeDetector::H264ConfigChangeDetector() | |
449 : last_sps_id_(0), | |
450 last_pps_id_(0), | |
451 config_changed_(false), | |
452 pending_config_changed_(false) { | |
453 } | |
454 | |
455 H264ConfigChangeDetector::~H264ConfigChangeDetector() { | |
456 } | |
457 | |
458 bool H264ConfigChangeDetector::DetectConfig(const uint8_t* stream, | |
459 unsigned int size) { | |
460 std::vector<uint8_t> sps; | |
461 std::vector<uint8_t> pps; | |
462 media::H264NALU nalu; | |
463 bool idr_seen = false; | |
464 | |
465 if (!parser_.get()) | |
466 parser_.reset(new media::H264Parser); | |
467 | |
468 parser_->SetStream(stream, size); | |
469 config_changed_ = false; | |
470 | |
471 while (true) { | |
472 media::H264Parser::Result result = parser_->AdvanceToNextNALU(&nalu); | |
473 | |
474 if (result == media::H264Parser::kEOStream) | |
475 break; | |
476 | |
477 if (result == media::H264Parser::kUnsupportedStream) { | |
478 DLOG(ERROR) << "Unsupported H.264 stream"; | |
479 return false; | |
480 } | |
481 | |
482 if (result != media::H264Parser::kOk) { | |
483 DLOG(ERROR) << "Failed to parse H.264 stream"; | |
484 return false; | |
485 } | |
486 | |
487 switch (nalu.nal_unit_type) { | |
488 case media::H264NALU::kSPS: | |
489 result = parser_->ParseSPS(&last_sps_id_); | |
490 if (result == media::H264Parser::kUnsupportedStream) { | |
491 DLOG(ERROR) << "Unsupported SPS"; | |
492 return false; | |
493 } | |
494 | |
495 if (result != media::H264Parser::kOk) { | |
496 DLOG(ERROR) << "Could not parse SPS"; | |
497 return false; | |
498 } | |
499 | |
500 sps.assign(nalu.data, nalu.data + nalu.size); | |
501 break; | |
502 | |
503 case media::H264NALU::kPPS: | |
504 result = parser_->ParsePPS(&last_pps_id_); | |
505 if (result == media::H264Parser::kUnsupportedStream) { | |
506 DLOG(ERROR) << "Unsupported PPS"; | |
507 return false; | |
508 } | |
509 if (result != media::H264Parser::kOk) { | |
510 DLOG(ERROR) << "Could not parse PPS"; | |
511 return false; | |
512 } | |
513 pps.assign(nalu.data, nalu.data + nalu.size); | |
514 break; | |
515 | |
516 case media::H264NALU::kIDRSlice: | |
517 idr_seen = true; | |
518 // If we previously detected a configuration change, and see an IDR | |
519 // slice next time around, we need to flag a configuration change. | |
520 if (pending_config_changed_) { | |
521 config_changed_ = true; | |
522 pending_config_changed_ = false; | |
523 } | |
524 break; | |
525 | |
526 default: | |
527 break; | |
528 } | |
529 } | |
530 | |
531 if (!sps.empty() && sps != last_sps_) { | |
532 if (!last_sps_.empty()) { | |
533 // Flag configuration changes after we see an IDR slice. | |
534 if (idr_seen) { | |
535 config_changed_ = true; | |
536 } else { | |
537 pending_config_changed_ = true; | |
538 } | |
539 } | |
540 last_sps_.swap(sps); | |
541 } | |
542 | |
543 if (!pps.empty() && pps != last_pps_) { | |
544 if (!last_pps_.empty()) { | |
545 // Flag configuration changes after we see an IDR slice. | |
546 if (idr_seen) { | |
547 config_changed_ = true; | |
548 } else { | |
549 pending_config_changed_ = true; | |
550 } | |
551 } | |
552 last_pps_.swap(pps); | |
553 } | |
554 return true; | |
555 } | |
556 | |
557 // Maintains information about a DXVA picture buffer, i.e. whether it is | |
558 // available for rendering, the texture information, etc. | |
559 struct DXVAVideoDecodeAccelerator::DXVAPictureBuffer { | |
560 public: | |
561 static linked_ptr<DXVAPictureBuffer> Create( | |
562 const DXVAVideoDecodeAccelerator& decoder, | |
563 const media::PictureBuffer& buffer, | |
564 EGLConfig egl_config); | |
565 ~DXVAPictureBuffer(); | |
566 | |
567 bool InitializeTexture(const DXVAVideoDecodeAccelerator& decoder, | |
568 bool use_rgb); | |
569 | |
570 bool ReusePictureBuffer(); | |
571 void ResetReuseFence(); | |
572 // Copies the output sample data to the picture buffer provided by the | |
573 // client. | |
574 // The dest_surface parameter contains the decoded bits. | |
575 bool CopyOutputSampleDataToPictureBuffer( | |
576 DXVAVideoDecodeAccelerator* decoder, | |
577 IDirect3DSurface9* dest_surface, | |
578 ID3D11Texture2D* dx11_texture, | |
579 int input_buffer_id); | |
580 | |
581 bool available() const { | |
582 return available_; | |
583 } | |
584 | |
585 void set_available(bool available) { | |
586 available_ = available; | |
587 } | |
588 | |
589 int id() const { | |
590 return picture_buffer_.id(); | |
591 } | |
592 | |
593 gfx::Size size() const { | |
594 return picture_buffer_.size(); | |
595 } | |
596 | |
597 bool waiting_to_reuse() const { return waiting_to_reuse_; } | |
598 | |
599 gfx::GLFence* reuse_fence() { return reuse_fence_.get(); } | |
600 | |
601 // Called when the source surface |src_surface| is copied to the destination | |
602 // |dest_surface| | |
603 bool CopySurfaceComplete(IDirect3DSurface9* src_surface, | |
604 IDirect3DSurface9* dest_surface); | |
605 | |
606 private: | |
607 explicit DXVAPictureBuffer(const media::PictureBuffer& buffer); | |
608 | |
609 bool available_; | |
610 | |
611 // This is true if the decoder is currently waiting on the fence before | |
612 // reusing the buffer. | |
613 bool waiting_to_reuse_; | |
614 media::PictureBuffer picture_buffer_; | |
615 EGLSurface decoding_surface_; | |
616 std::unique_ptr<gfx::GLFence> reuse_fence_; | |
617 | |
618 HANDLE texture_share_handle_; | |
619 base::win::ScopedComPtr<IDirect3DTexture9> decoding_texture_; | |
620 base::win::ScopedComPtr<ID3D11Texture2D> dx11_decoding_texture_; | |
621 | |
622 base::win::ScopedComPtr<IDXGIKeyedMutex> egl_keyed_mutex_; | |
623 base::win::ScopedComPtr<IDXGIKeyedMutex> dx11_keyed_mutex_; | |
624 | |
625 // This is the last value that was used to release the keyed mutex. | |
626 uint64_t keyed_mutex_value_; | |
627 | |
628 // The following |IDirect3DSurface9| interface pointers are used to hold | |
629 // references on the surfaces during the course of a StretchRect operation | |
630 // to copy the source surface to the target. The references are released | |
631 // when the StretchRect operation i.e. the copy completes. | |
632 base::win::ScopedComPtr<IDirect3DSurface9> decoder_surface_; | |
633 base::win::ScopedComPtr<IDirect3DSurface9> target_surface_; | |
634 | |
635 // This ID3D11Texture2D interface pointer is used to hold a reference to the | |
636 // decoder texture during the course of a copy operation. This reference is | |
637 // released when the copy completes. | |
638 base::win::ScopedComPtr<ID3D11Texture2D> decoder_dx11_texture_; | |
639 | |
640 // Set to true if RGB is supported by the texture. | |
641 // Defaults to true. | |
642 bool use_rgb_; | |
643 | |
644 DISALLOW_COPY_AND_ASSIGN(DXVAPictureBuffer); | |
645 }; | |
646 | |
647 // static | |
648 linked_ptr<DXVAVideoDecodeAccelerator::DXVAPictureBuffer> | |
649 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::Create( | |
650 const DXVAVideoDecodeAccelerator& decoder, | |
651 const media::PictureBuffer& buffer, | |
652 EGLConfig egl_config) { | |
653 linked_ptr<DXVAPictureBuffer> picture_buffer(new DXVAPictureBuffer(buffer)); | |
654 | |
655 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay(); | |
656 | |
657 EGLint use_rgb = 1; | |
658 eglGetConfigAttrib(egl_display, egl_config, EGL_BIND_TO_TEXTURE_RGB, | |
659 &use_rgb); | |
660 | |
661 if (!picture_buffer->InitializeTexture(decoder, !!use_rgb)) | |
662 return linked_ptr<DXVAPictureBuffer>(nullptr); | |
663 | |
664 EGLint attrib_list[] = { | |
665 EGL_WIDTH, buffer.size().width(), | |
666 EGL_HEIGHT, buffer.size().height(), | |
667 EGL_TEXTURE_FORMAT, use_rgb ? EGL_TEXTURE_RGB : EGL_TEXTURE_RGBA, | |
668 EGL_TEXTURE_TARGET, EGL_TEXTURE_2D, | |
669 EGL_NONE | |
670 }; | |
671 | |
672 picture_buffer->decoding_surface_ = eglCreatePbufferFromClientBuffer( | |
673 egl_display, EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE, | |
674 picture_buffer->texture_share_handle_, egl_config, attrib_list); | |
675 RETURN_ON_FAILURE(picture_buffer->decoding_surface_, | |
676 "Failed to create surface", | |
677 linked_ptr<DXVAPictureBuffer>(NULL)); | |
678 if (decoder.d3d11_device_ && decoder.use_keyed_mutex_) { | |
679 void* keyed_mutex = nullptr; | |
680 EGLBoolean ret = eglQuerySurfacePointerANGLE( | |
681 egl_display, picture_buffer->decoding_surface_, | |
682 EGL_DXGI_KEYED_MUTEX_ANGLE, &keyed_mutex); | |
683 RETURN_ON_FAILURE(keyed_mutex && ret == EGL_TRUE, | |
684 "Failed to query ANGLE keyed mutex", | |
685 linked_ptr<DXVAPictureBuffer>(nullptr)); | |
686 picture_buffer->egl_keyed_mutex_ = base::win::ScopedComPtr<IDXGIKeyedMutex>( | |
687 static_cast<IDXGIKeyedMutex*>(keyed_mutex)); | |
688 } | |
689 picture_buffer->use_rgb_ = !!use_rgb; | |
690 return picture_buffer; | |
691 } | |
692 | |
693 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::InitializeTexture( | |
694 const DXVAVideoDecodeAccelerator& decoder, | |
695 bool use_rgb) { | |
696 DCHECK(!texture_share_handle_); | |
697 if (decoder.d3d11_device_) { | |
698 D3D11_TEXTURE2D_DESC desc; | |
699 desc.Width = picture_buffer_.size().width(); | |
700 desc.Height = picture_buffer_.size().height(); | |
701 desc.MipLevels = 1; | |
702 desc.ArraySize = 1; | |
703 desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; | |
704 desc.SampleDesc.Count = 1; | |
705 desc.SampleDesc.Quality = 0; | |
706 desc.Usage = D3D11_USAGE_DEFAULT; | |
707 desc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET; | |
708 desc.CPUAccessFlags = 0; | |
709 desc.MiscFlags = decoder.use_keyed_mutex_ | |
710 ? D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX | |
711 : D3D11_RESOURCE_MISC_SHARED; | |
712 | |
713 HRESULT hr = decoder.d3d11_device_->CreateTexture2D( | |
714 &desc, nullptr, dx11_decoding_texture_.Receive()); | |
715 RETURN_ON_HR_FAILURE(hr, "Failed to create texture", false); | |
716 if (decoder.use_keyed_mutex_) { | |
717 hr = dx11_keyed_mutex_.QueryFrom(dx11_decoding_texture_.get()); | |
718 RETURN_ON_HR_FAILURE(hr, "Failed to get keyed mutex", false); | |
719 } | |
720 | |
721 base::win::ScopedComPtr<IDXGIResource> resource; | |
722 hr = resource.QueryFrom(dx11_decoding_texture_.get()); | |
723 DCHECK(SUCCEEDED(hr)); | |
724 hr = resource->GetSharedHandle(&texture_share_handle_); | |
725 RETURN_ON_FAILURE(SUCCEEDED(hr) && texture_share_handle_, | |
726 "Failed to query shared handle", false); | |
727 | |
728 } else { | |
729 HRESULT hr = E_FAIL; | |
730 hr = decoder.d3d9_device_ex_->CreateTexture( | |
731 picture_buffer_.size().width(), picture_buffer_.size().height(), 1, | |
732 D3DUSAGE_RENDERTARGET, use_rgb ? D3DFMT_X8R8G8B8 : D3DFMT_A8R8G8B8, | |
733 D3DPOOL_DEFAULT, decoding_texture_.Receive(), &texture_share_handle_); | |
734 RETURN_ON_HR_FAILURE(hr, "Failed to create texture", false); | |
735 RETURN_ON_FAILURE(texture_share_handle_, "Failed to query shared handle", | |
736 false); | |
737 } | |
738 return true; | |
739 } | |
740 | |
741 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer( | |
742 const media::PictureBuffer& buffer) | |
743 : available_(true), | |
744 waiting_to_reuse_(false), | |
745 picture_buffer_(buffer), | |
746 decoding_surface_(NULL), | |
747 texture_share_handle_(nullptr), | |
748 keyed_mutex_value_(0), | |
749 use_rgb_(true) {} | |
750 | |
751 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::~DXVAPictureBuffer() { | |
752 if (decoding_surface_) { | |
753 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay(); | |
754 | |
755 eglReleaseTexImage( | |
756 egl_display, | |
757 decoding_surface_, | |
758 EGL_BACK_BUFFER); | |
759 | |
760 eglDestroySurface( | |
761 egl_display, | |
762 decoding_surface_); | |
763 decoding_surface_ = NULL; | |
764 } | |
765 } | |
766 | |
767 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ReusePictureBuffer() { | |
768 DCHECK(decoding_surface_); | |
769 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay(); | |
770 eglReleaseTexImage( | |
771 egl_display, | |
772 decoding_surface_, | |
773 EGL_BACK_BUFFER); | |
774 decoder_surface_.Release(); | |
775 target_surface_.Release(); | |
776 decoder_dx11_texture_.Release(); | |
777 waiting_to_reuse_ = false; | |
778 set_available(true); | |
779 if (egl_keyed_mutex_) { | |
780 HRESULT hr = egl_keyed_mutex_->ReleaseSync(++keyed_mutex_value_); | |
781 RETURN_ON_FAILURE(hr == S_OK, "Could not release sync mutex", false); | |
782 } | |
783 return true; | |
784 } | |
785 | |
786 void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ResetReuseFence() { | |
787 if (!reuse_fence_ || !reuse_fence_->ResetSupported()) | |
788 reuse_fence_.reset(gfx::GLFence::Create()); | |
789 else | |
790 reuse_fence_->ResetState(); | |
791 waiting_to_reuse_ = true; | |
792 } | |
793 | |
794 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer:: | |
795 CopyOutputSampleDataToPictureBuffer( | |
796 DXVAVideoDecodeAccelerator* decoder, | |
797 IDirect3DSurface9* dest_surface, | |
798 ID3D11Texture2D* dx11_texture, | |
799 int input_buffer_id) { | |
800 DCHECK(dest_surface || dx11_texture); | |
801 if (dx11_texture) { | |
802 // Grab a reference on the decoder texture. This reference will be released | |
803 // when we receive a notification that the copy was completed or when the | |
804 // DXVAPictureBuffer instance is destroyed. | |
805 decoder_dx11_texture_ = dx11_texture; | |
806 decoder->CopyTexture(dx11_texture, dx11_decoding_texture_.get(), | |
807 dx11_keyed_mutex_, keyed_mutex_value_, NULL, id(), | |
808 input_buffer_id); | |
809 return true; | |
810 } | |
811 D3DSURFACE_DESC surface_desc; | |
812 HRESULT hr = dest_surface->GetDesc(&surface_desc); | |
813 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false); | |
814 | |
815 D3DSURFACE_DESC texture_desc; | |
816 decoding_texture_->GetLevelDesc(0, &texture_desc); | |
817 | |
818 if (texture_desc.Width != surface_desc.Width || | |
819 texture_desc.Height != surface_desc.Height) { | |
820 NOTREACHED() << "Decode surface of different dimension than texture"; | |
821 return false; | |
822 } | |
823 | |
824 hr = decoder->d3d9_->CheckDeviceFormatConversion( | |
825 D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, surface_desc.Format, | |
826 use_rgb_ ? D3DFMT_X8R8G8B8 : D3DFMT_A8R8G8B8); | |
827 RETURN_ON_HR_FAILURE(hr, "Device does not support format converision", false); | |
828 | |
829 // The same picture buffer can be reused for a different frame. Release the | |
830 // target surface and the decoder references here. | |
831 target_surface_.Release(); | |
832 decoder_surface_.Release(); | |
833 | |
834 // Grab a reference on the decoder surface and the target surface. These | |
835 // references will be released when we receive a notification that the | |
836 // copy was completed or when the DXVAPictureBuffer instance is destroyed. | |
837 // We hold references here as it is easier to manage their lifetimes. | |
838 hr = decoding_texture_->GetSurfaceLevel(0, target_surface_.Receive()); | |
839 RETURN_ON_HR_FAILURE(hr, "Failed to get surface from texture", false); | |
840 | |
841 decoder_surface_ = dest_surface; | |
842 | |
843 decoder->CopySurface(decoder_surface_.get(), target_surface_.get(), id(), | |
844 input_buffer_id); | |
845 return true; | |
846 } | |
847 | |
848 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::CopySurfaceComplete( | |
849 IDirect3DSurface9* src_surface, | |
850 IDirect3DSurface9* dest_surface) { | |
851 DCHECK(!available()); | |
852 | |
853 GLint current_texture = 0; | |
854 glGetIntegerv(GL_TEXTURE_BINDING_2D, ¤t_texture); | |
855 | |
856 glBindTexture(GL_TEXTURE_2D, picture_buffer_.texture_ids()[0]); | |
857 | |
858 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); | |
859 | |
860 if (src_surface && dest_surface) { | |
861 DCHECK_EQ(src_surface, decoder_surface_.get()); | |
862 DCHECK_EQ(dest_surface, target_surface_.get()); | |
863 decoder_surface_.Release(); | |
864 target_surface_.Release(); | |
865 } else { | |
866 DCHECK(decoder_dx11_texture_.get()); | |
867 decoder_dx11_texture_.Release(); | |
868 } | |
869 if (egl_keyed_mutex_) { | |
870 keyed_mutex_value_++; | |
871 HRESULT result = | |
872 egl_keyed_mutex_->AcquireSync(keyed_mutex_value_, kAcquireSyncWaitMs); | |
873 RETURN_ON_FAILURE(result == S_OK, "Could not acquire sync mutex", false); | |
874 } | |
875 | |
876 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay(); | |
877 eglBindTexImage( | |
878 egl_display, | |
879 decoding_surface_, | |
880 EGL_BACK_BUFFER); | |
881 | |
882 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); | |
883 glBindTexture(GL_TEXTURE_2D, current_texture); | |
884 return true; | |
885 } | |
886 | |
887 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo( | |
888 int32_t buffer_id, | |
889 IMFSample* sample) | |
890 : input_buffer_id(buffer_id), picture_buffer_id(-1) { | |
891 output_sample.Attach(sample); | |
892 } | |
893 | |
894 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo( | |
895 const PendingSampleInfo& other) = default; | |
896 | |
897 DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {} | |
898 | |
899 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator( | |
900 const GetGLContextCallback& get_gl_context_cb, | |
901 const MakeGLContextCurrentCallback& make_context_current_cb, | |
902 bool enable_accelerated_vpx_decode) | |
903 : client_(NULL), | |
904 dev_manager_reset_token_(0), | |
905 dx11_dev_manager_reset_token_(0), | |
906 egl_config_(NULL), | |
907 state_(kUninitialized), | |
908 pictures_requested_(false), | |
909 inputs_before_decode_(0), | |
910 sent_drain_message_(false), | |
911 get_gl_context_cb_(get_gl_context_cb), | |
912 make_context_current_cb_(make_context_current_cb), | |
913 codec_(media::kUnknownVideoCodec), | |
914 decoder_thread_("DXVAVideoDecoderThread"), | |
915 pending_flush_(false), | |
916 use_dx11_(false), | |
917 use_keyed_mutex_(false), | |
918 dx11_video_format_converter_media_type_needs_init_(true), | |
919 using_angle_device_(false), | |
920 enable_accelerated_vpx_decode_(enable_accelerated_vpx_decode), | |
921 weak_this_factory_(this) { | |
922 weak_ptr_ = weak_this_factory_.GetWeakPtr(); | |
923 memset(&input_stream_info_, 0, sizeof(input_stream_info_)); | |
924 memset(&output_stream_info_, 0, sizeof(output_stream_info_)); | |
925 } | |
926 | |
927 DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() { | |
928 client_ = NULL; | |
929 } | |
930 | |
931 bool DXVAVideoDecodeAccelerator::Initialize(const Config& config, | |
932 Client* client) { | |
933 if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) { | |
934 NOTREACHED() << "GL callbacks are required for this VDA"; | |
935 return false; | |
936 } | |
937 | |
938 if (config.is_encrypted) { | |
939 NOTREACHED() << "Encrypted streams are not supported for this VDA"; | |
940 return false; | |
941 } | |
942 | |
943 if (config.output_mode != Config::OutputMode::ALLOCATE) { | |
944 NOTREACHED() << "Only ALLOCATE OutputMode is supported by this VDA"; | |
945 return false; | |
946 } | |
947 | |
948 client_ = client; | |
949 | |
950 main_thread_task_runner_ = base::MessageLoop::current()->task_runner(); | |
951 | |
952 bool profile_supported = false; | |
953 for (const auto& supported_profile : kSupportedProfiles) { | |
954 if (config.profile == supported_profile) { | |
955 profile_supported = true; | |
956 break; | |
957 } | |
958 } | |
959 if (!profile_supported) { | |
960 RETURN_AND_NOTIFY_ON_FAILURE(false, | |
961 "Unsupported h.264, vp8, or vp9 profile", PLATFORM_FAILURE, false); | |
962 } | |
963 | |
964 // Not all versions of Windows 7 and later include Media Foundation DLLs. | |
965 // Instead of crashing while delay loading the DLL when calling MFStartup() | |
966 // below, probe whether we can successfully load the DLL now. | |
967 // See http://crbug.com/339678 for details. | |
968 HMODULE dxgi_manager_dll = ::GetModuleHandle(L"MFPlat.dll"); | |
969 RETURN_ON_FAILURE(dxgi_manager_dll, "MFPlat.dll is required for decoding", | |
970 false); | |
971 | |
972 // On Windows 8+ mfplat.dll provides the MFCreateDXGIDeviceManager API. | |
973 // On Windows 7 mshtmlmedia.dll provides it. | |
974 | |
975 // TODO(ananta) | |
976 // The code below works, as in we can create the DX11 device manager for | |
977 // Windows 7. However the IMFTransform we use for texture conversion and | |
978 // copy does not exist on Windows 7. Look into an alternate approach | |
979 // and enable the code below. | |
980 #if defined(ENABLE_DX11_FOR_WIN7) | |
981 if (base::win::GetVersion() == base::win::VERSION_WIN7) { | |
982 dxgi_manager_dll = ::GetModuleHandle(L"mshtmlmedia.dll"); | |
983 RETURN_ON_FAILURE(dxgi_manager_dll, | |
984 "mshtmlmedia.dll is required for decoding", false); | |
985 } | |
986 #endif | |
987 // If we don't find the MFCreateDXGIDeviceManager API we fallback to D3D9 | |
988 // decoding. | |
989 if (dxgi_manager_dll && !create_dxgi_device_manager_) { | |
990 create_dxgi_device_manager_ = reinterpret_cast<CreateDXGIDeviceManager>( | |
991 ::GetProcAddress(dxgi_manager_dll, "MFCreateDXGIDeviceManager")); | |
992 } | |
993 | |
994 RETURN_AND_NOTIFY_ON_FAILURE( | |
995 gfx::g_driver_egl.ext.b_EGL_ANGLE_surface_d3d_texture_2d_share_handle, | |
996 "EGL_ANGLE_surface_d3d_texture_2d_share_handle unavailable", | |
997 PLATFORM_FAILURE, | |
998 false); | |
999 | |
1000 RETURN_AND_NOTIFY_ON_FAILURE(gfx::GLFence::IsSupported(), | |
1001 "GL fences are unsupported", PLATFORM_FAILURE, | |
1002 false); | |
1003 | |
1004 State state = GetState(); | |
1005 RETURN_AND_NOTIFY_ON_FAILURE((state == kUninitialized), | |
1006 "Initialize: invalid state: " << state, ILLEGAL_STATE, false); | |
1007 | |
1008 media::InitializeMediaFoundation(); | |
1009 | |
1010 RETURN_AND_NOTIFY_ON_FAILURE(InitDecoder(config.profile), | |
1011 "Failed to initialize decoder", PLATFORM_FAILURE, false); | |
1012 | |
1013 RETURN_AND_NOTIFY_ON_FAILURE(GetStreamsInfoAndBufferReqs(), | |
1014 "Failed to get input/output stream info.", PLATFORM_FAILURE, false); | |
1015 | |
1016 RETURN_AND_NOTIFY_ON_FAILURE( | |
1017 SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0), | |
1018 "Send MFT_MESSAGE_NOTIFY_BEGIN_STREAMING notification failed", | |
1019 PLATFORM_FAILURE, false); | |
1020 | |
1021 RETURN_AND_NOTIFY_ON_FAILURE( | |
1022 SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0), | |
1023 "Send MFT_MESSAGE_NOTIFY_START_OF_STREAM notification failed", | |
1024 PLATFORM_FAILURE, false); | |
1025 | |
1026 config_ = config; | |
1027 | |
1028 config_change_detector_.reset(new H264ConfigChangeDetector); | |
1029 | |
1030 SetState(kNormal); | |
1031 | |
1032 StartDecoderThread(); | |
1033 return true; | |
1034 } | |
1035 | |
1036 bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() { | |
1037 TRACE_EVENT0("gpu", "DXVAVideoDecodeAccelerator_CreateD3DDevManager"); | |
1038 | |
1039 HRESULT hr = E_FAIL; | |
1040 | |
1041 hr = Direct3DCreate9Ex(D3D_SDK_VERSION, d3d9_.Receive()); | |
1042 RETURN_ON_HR_FAILURE(hr, "Direct3DCreate9Ex failed", false); | |
1043 | |
1044 hr = d3d9_->CheckDeviceFormatConversion( | |
1045 D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, | |
1046 static_cast<D3DFORMAT>(MAKEFOURCC('N', 'V', '1', '2')), | |
1047 D3DFMT_X8R8G8B8); | |
1048 RETURN_ON_HR_FAILURE(hr, | |
1049 "D3D9 driver does not support H/W format conversion", false); | |
1050 | |
1051 base::win::ScopedComPtr<IDirect3DDevice9> angle_device = | |
1052 QueryDeviceObjectFromANGLE<IDirect3DDevice9>(EGL_D3D9_DEVICE_ANGLE); | |
1053 if (angle_device.get()) | |
1054 using_angle_device_ = true; | |
1055 | |
1056 if (using_angle_device_) { | |
1057 hr = d3d9_device_ex_.QueryFrom(angle_device.get()); | |
1058 RETURN_ON_HR_FAILURE(hr, | |
1059 "QueryInterface for IDirect3DDevice9Ex from angle device failed", | |
1060 false); | |
1061 } else { | |
1062 D3DPRESENT_PARAMETERS present_params = {0}; | |
1063 present_params.BackBufferWidth = 1; | |
1064 present_params.BackBufferHeight = 1; | |
1065 present_params.BackBufferFormat = D3DFMT_UNKNOWN; | |
1066 present_params.BackBufferCount = 1; | |
1067 present_params.SwapEffect = D3DSWAPEFFECT_DISCARD; | |
1068 present_params.hDeviceWindow = NULL; | |
1069 present_params.Windowed = TRUE; | |
1070 present_params.Flags = D3DPRESENTFLAG_VIDEO; | |
1071 present_params.FullScreen_RefreshRateInHz = 0; | |
1072 present_params.PresentationInterval = 0; | |
1073 | |
1074 hr = d3d9_->CreateDeviceEx(D3DADAPTER_DEFAULT, | |
1075 D3DDEVTYPE_HAL, | |
1076 NULL, | |
1077 D3DCREATE_FPU_PRESERVE | | |
1078 D3DCREATE_MIXED_VERTEXPROCESSING | | |
1079 D3DCREATE_MULTITHREADED, | |
1080 &present_params, | |
1081 NULL, | |
1082 d3d9_device_ex_.Receive()); | |
1083 RETURN_ON_HR_FAILURE(hr, "Failed to create D3D device", false); | |
1084 } | |
1085 | |
1086 hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token_, | |
1087 device_manager_.Receive()); | |
1088 RETURN_ON_HR_FAILURE(hr, "DXVA2CreateDirect3DDeviceManager9 failed", false); | |
1089 | |
1090 hr = device_manager_->ResetDevice(d3d9_device_ex_.get(), | |
1091 dev_manager_reset_token_); | |
1092 RETURN_ON_HR_FAILURE(hr, "Failed to reset device", false); | |
1093 | |
1094 hr = d3d9_device_ex_->CreateQuery(D3DQUERYTYPE_EVENT, query_.Receive()); | |
1095 RETURN_ON_HR_FAILURE(hr, "Failed to create D3D device query", false); | |
1096 // Ensure query_ API works (to avoid an infinite loop later in | |
1097 // CopyOutputSampleDataToPictureBuffer). | |
1098 hr = query_->Issue(D3DISSUE_END); | |
1099 RETURN_ON_HR_FAILURE(hr, "Failed to issue END test query", false); | |
1100 return true; | |
1101 } | |
1102 | |
1103 bool DXVAVideoDecodeAccelerator::CreateDX11DevManager() { | |
1104 HRESULT hr = create_dxgi_device_manager_(&dx11_dev_manager_reset_token_, | |
1105 d3d11_device_manager_.Receive()); | |
1106 RETURN_ON_HR_FAILURE(hr, "MFCreateDXGIDeviceManager failed", false); | |
1107 | |
1108 // This array defines the set of DirectX hardware feature levels we support. | |
1109 // The ordering MUST be preserved. All applications are assumed to support | |
1110 // 9.1 unless otherwise stated by the application. | |
1111 D3D_FEATURE_LEVEL feature_levels[] = { | |
1112 D3D_FEATURE_LEVEL_11_1, | |
1113 D3D_FEATURE_LEVEL_11_0, | |
1114 D3D_FEATURE_LEVEL_10_1, | |
1115 D3D_FEATURE_LEVEL_10_0, | |
1116 D3D_FEATURE_LEVEL_9_3, | |
1117 D3D_FEATURE_LEVEL_9_2, | |
1118 D3D_FEATURE_LEVEL_9_1 | |
1119 }; | |
1120 | |
1121 UINT flags = D3D11_CREATE_DEVICE_VIDEO_SUPPORT; | |
1122 | |
1123 #if defined _DEBUG | |
1124 flags |= D3D11_CREATE_DEVICE_DEBUG; | |
1125 #endif | |
1126 | |
1127 D3D_FEATURE_LEVEL feature_level_out = D3D_FEATURE_LEVEL_11_0; | |
1128 hr = D3D11CreateDevice(NULL, | |
1129 D3D_DRIVER_TYPE_HARDWARE, | |
1130 NULL, | |
1131 flags, | |
1132 feature_levels, | |
1133 arraysize(feature_levels), | |
1134 D3D11_SDK_VERSION, | |
1135 d3d11_device_.Receive(), | |
1136 &feature_level_out, | |
1137 d3d11_device_context_.Receive()); | |
1138 RETURN_ON_HR_FAILURE(hr, "Failed to create DX11 device", false); | |
1139 | |
1140 // Enable multithreaded mode on the device. This ensures that accesses to | |
1141 // context are synchronized across threads. We have multiple threads | |
1142 // accessing the context, the media foundation decoder threads and the | |
1143 // decoder thread via the video format conversion transform. | |
1144 hr = multi_threaded_.QueryFrom(d3d11_device_.get()); | |
1145 RETURN_ON_HR_FAILURE(hr, "Failed to query ID3D10Multithread", false); | |
1146 multi_threaded_->SetMultithreadProtected(TRUE); | |
1147 | |
1148 hr = d3d11_device_manager_->ResetDevice(d3d11_device_.get(), | |
1149 dx11_dev_manager_reset_token_); | |
1150 RETURN_ON_HR_FAILURE(hr, "Failed to reset device", false); | |
1151 | |
1152 D3D11_QUERY_DESC query_desc; | |
1153 query_desc.Query = D3D11_QUERY_EVENT; | |
1154 query_desc.MiscFlags = 0; | |
1155 hr = d3d11_device_->CreateQuery( | |
1156 &query_desc, | |
1157 d3d11_query_.Receive()); | |
1158 RETURN_ON_HR_FAILURE(hr, "Failed to create DX11 device query", false); | |
1159 | |
1160 HMODULE video_processor_dll = ::GetModuleHandle(L"msvproc.dll"); | |
1161 RETURN_ON_FAILURE(video_processor_dll, "Failed to load video processor", | |
1162 false); | |
1163 | |
1164 hr = CreateCOMObjectFromDll( | |
1165 video_processor_dll, | |
1166 CLSID_VideoProcessorMFT, | |
1167 __uuidof(IMFTransform), | |
1168 video_format_converter_mft_.ReceiveVoid()); | |
1169 if (FAILED(hr)) { | |
1170 base::debug::Alias(&hr); | |
1171 // TODO(ananta) | |
1172 // Remove this CHECK when the change to use DX11 for H/W decoding | |
1173 // stablizes. | |
1174 CHECK(false); | |
1175 } | |
1176 | |
1177 RETURN_ON_HR_FAILURE(hr, "Failed to create video format converter", false); | |
1178 | |
1179 base::win::ScopedComPtr<IMFAttributes> converter_attributes; | |
1180 hr = video_format_converter_mft_->GetAttributes( | |
1181 converter_attributes.Receive()); | |
1182 RETURN_ON_HR_FAILURE(hr, "Failed to get converter attributes", false); | |
1183 | |
1184 hr = converter_attributes->SetUINT32(MF_XVP_PLAYBACK_MODE, TRUE); | |
1185 RETURN_ON_HR_FAILURE( | |
1186 hr, | |
1187 "Failed to set MF_XVP_PLAYBACK_MODE attribute on converter", | |
1188 false); | |
1189 | |
1190 hr = converter_attributes->SetUINT32(MF_LOW_LATENCY, FALSE); | |
1191 RETURN_ON_HR_FAILURE( | |
1192 hr, | |
1193 "Failed to set MF_LOW_LATENCY attribute on converter", | |
1194 false); | |
1195 return true; | |
1196 } | |
1197 | |
1198 void DXVAVideoDecodeAccelerator::Decode( | |
1199 const media::BitstreamBuffer& bitstream_buffer) { | |
1200 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
1201 | |
1202 // SharedMemory will take over the ownership of handle. | |
1203 base::SharedMemory shm(bitstream_buffer.handle(), true); | |
1204 | |
1205 State state = GetState(); | |
1206 RETURN_AND_NOTIFY_ON_FAILURE((state == kNormal || state == kStopped || | |
1207 state == kFlushing), | |
1208 "Invalid state: " << state, ILLEGAL_STATE,); | |
1209 if (bitstream_buffer.id() < 0) { | |
1210 RETURN_AND_NOTIFY_ON_FAILURE( | |
1211 false, "Invalid bitstream_buffer, id: " << bitstream_buffer.id(), | |
1212 INVALID_ARGUMENT, ); | |
1213 } | |
1214 | |
1215 base::win::ScopedComPtr<IMFSample> sample; | |
1216 RETURN_AND_NOTIFY_ON_FAILURE(shm.Map(bitstream_buffer.size()), | |
1217 "Failed in base::SharedMemory::Map", | |
1218 PLATFORM_FAILURE, ); | |
1219 | |
1220 sample.Attach(CreateInputSample( | |
1221 reinterpret_cast<const uint8_t*>(shm.memory()), bitstream_buffer.size(), | |
1222 std::min<uint32_t>(bitstream_buffer.size(), input_stream_info_.cbSize), | |
1223 input_stream_info_.cbAlignment)); | |
1224 RETURN_AND_NOTIFY_ON_FAILURE(sample.get(), "Failed to create input sample", | |
1225 PLATFORM_FAILURE, ); | |
1226 | |
1227 RETURN_AND_NOTIFY_ON_HR_FAILURE(sample->SetSampleTime(bitstream_buffer.id()), | |
1228 "Failed to associate input buffer id with sample", PLATFORM_FAILURE,); | |
1229 | |
1230 decoder_thread_task_runner_->PostTask( | |
1231 FROM_HERE, | |
1232 base::Bind(&DXVAVideoDecodeAccelerator::DecodeInternal, | |
1233 base::Unretained(this), sample)); | |
1234 } | |
1235 | |
1236 void DXVAVideoDecodeAccelerator::AssignPictureBuffers( | |
1237 const std::vector<media::PictureBuffer>& buffers) { | |
1238 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
1239 | |
1240 State state = GetState(); | |
1241 RETURN_AND_NOTIFY_ON_FAILURE((state != kUninitialized), | |
1242 "Invalid state: " << state, ILLEGAL_STATE,); | |
1243 RETURN_AND_NOTIFY_ON_FAILURE((kNumPictureBuffers >= buffers.size()), | |
1244 "Failed to provide requested picture buffers. (Got " << buffers.size() << | |
1245 ", requested " << kNumPictureBuffers << ")", INVALID_ARGUMENT,); | |
1246 | |
1247 // Copy the picture buffers provided by the client to the available list, | |
1248 // and mark these buffers as available for use. | |
1249 for (size_t buffer_index = 0; buffer_index < buffers.size(); | |
1250 ++buffer_index) { | |
1251 DCHECK_LE(1u, buffers[buffer_index].texture_ids().size()); | |
1252 linked_ptr<DXVAPictureBuffer> picture_buffer = | |
1253 DXVAPictureBuffer::Create(*this, buffers[buffer_index], egl_config_); | |
1254 RETURN_AND_NOTIFY_ON_FAILURE(picture_buffer.get(), | |
1255 "Failed to allocate picture buffer", PLATFORM_FAILURE,); | |
1256 | |
1257 bool inserted = output_picture_buffers_.insert(std::make_pair( | |
1258 buffers[buffer_index].id(), picture_buffer)).second; | |
1259 DCHECK(inserted); | |
1260 } | |
1261 | |
1262 ProcessPendingSamples(); | |
1263 if (pending_flush_) { | |
1264 decoder_thread_task_runner_->PostTask( | |
1265 FROM_HERE, | |
1266 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal, | |
1267 base::Unretained(this))); | |
1268 } | |
1269 } | |
1270 | |
1271 void DXVAVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) { | |
1272 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
1273 | |
1274 State state = GetState(); | |
1275 RETURN_AND_NOTIFY_ON_FAILURE((state != kUninitialized), | |
1276 "Invalid state: " << state, ILLEGAL_STATE,); | |
1277 | |
1278 if (output_picture_buffers_.empty() && stale_output_picture_buffers_.empty()) | |
1279 return; | |
1280 | |
1281 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id); | |
1282 // If we didn't find the picture id in the |output_picture_buffers_| map we | |
1283 // try the |stale_output_picture_buffers_| map, as this may have been an | |
1284 // output picture buffer from before a resolution change, that at resolution | |
1285 // change time had yet to be displayed. The client is calling us back to tell | |
1286 // us that we can now recycle this picture buffer, so if we were waiting to | |
1287 // dispose of it we now can. | |
1288 if (it == output_picture_buffers_.end()) { | |
1289 if (!stale_output_picture_buffers_.empty()) { | |
1290 it = stale_output_picture_buffers_.find(picture_buffer_id); | |
1291 RETURN_AND_NOTIFY_ON_FAILURE(it != stale_output_picture_buffers_.end(), | |
1292 "Invalid picture id: " << picture_buffer_id, INVALID_ARGUMENT,); | |
1293 main_thread_task_runner_->PostTask( | |
1294 FROM_HERE, | |
1295 base::Bind(&DXVAVideoDecodeAccelerator::DeferredDismissStaleBuffer, | |
1296 weak_this_factory_.GetWeakPtr(), picture_buffer_id)); | |
1297 } | |
1298 return; | |
1299 } | |
1300 | |
1301 if (it->second->available() || it->second->waiting_to_reuse()) | |
1302 return; | |
1303 | |
1304 if (use_keyed_mutex_ || using_angle_device_) { | |
1305 RETURN_AND_NOTIFY_ON_FAILURE(it->second->ReusePictureBuffer(), | |
1306 "Failed to reuse picture buffer", | |
1307 PLATFORM_FAILURE, ); | |
1308 | |
1309 ProcessPendingSamples(); | |
1310 if (pending_flush_) { | |
1311 decoder_thread_task_runner_->PostTask( | |
1312 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal, | |
1313 base::Unretained(this))); | |
1314 } | |
1315 } else { | |
1316 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(), | |
1317 "Failed to make context current", | |
1318 PLATFORM_FAILURE, ); | |
1319 it->second->ResetReuseFence(); | |
1320 | |
1321 WaitForOutputBuffer(picture_buffer_id, 0); | |
1322 } | |
1323 } | |
1324 | |
1325 void DXVAVideoDecodeAccelerator::WaitForOutputBuffer(int32_t picture_buffer_id, | |
1326 int count) { | |
1327 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
1328 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id); | |
1329 if (it == output_picture_buffers_.end()) | |
1330 return; | |
1331 | |
1332 DXVAPictureBuffer* picture_buffer = it->second.get(); | |
1333 | |
1334 DCHECK(!picture_buffer->available()); | |
1335 DCHECK(picture_buffer->waiting_to_reuse()); | |
1336 | |
1337 gfx::GLFence* fence = picture_buffer->reuse_fence(); | |
1338 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(), | |
1339 "Failed to make context current", | |
1340 PLATFORM_FAILURE, ); | |
1341 if (count <= kMaxIterationsForANGLEReuseFlush && !fence->HasCompleted()) { | |
1342 main_thread_task_runner_->PostDelayedTask( | |
1343 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::WaitForOutputBuffer, | |
1344 weak_this_factory_.GetWeakPtr(), | |
1345 picture_buffer_id, count + 1), | |
1346 base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs)); | |
1347 return; | |
1348 } | |
1349 RETURN_AND_NOTIFY_ON_FAILURE(picture_buffer->ReusePictureBuffer(), | |
1350 "Failed to reuse picture buffer", | |
1351 PLATFORM_FAILURE, ); | |
1352 | |
1353 ProcessPendingSamples(); | |
1354 if (pending_flush_) { | |
1355 decoder_thread_task_runner_->PostTask( | |
1356 FROM_HERE, | |
1357 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal, | |
1358 base::Unretained(this))); | |
1359 } | |
1360 } | |
1361 | |
1362 void DXVAVideoDecodeAccelerator::Flush() { | |
1363 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
1364 | |
1365 DVLOG(1) << "DXVAVideoDecodeAccelerator::Flush"; | |
1366 | |
1367 State state = GetState(); | |
1368 RETURN_AND_NOTIFY_ON_FAILURE((state == kNormal || state == kStopped), | |
1369 "Unexpected decoder state: " << state, ILLEGAL_STATE,); | |
1370 | |
1371 SetState(kFlushing); | |
1372 | |
1373 pending_flush_ = true; | |
1374 | |
1375 decoder_thread_task_runner_->PostTask( | |
1376 FROM_HERE, | |
1377 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal, | |
1378 base::Unretained(this))); | |
1379 } | |
1380 | |
1381 void DXVAVideoDecodeAccelerator::Reset() { | |
1382 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
1383 | |
1384 DVLOG(1) << "DXVAVideoDecodeAccelerator::Reset"; | |
1385 | |
1386 State state = GetState(); | |
1387 RETURN_AND_NOTIFY_ON_FAILURE((state == kNormal || state == kStopped), | |
1388 "Reset: invalid state: " << state, ILLEGAL_STATE,); | |
1389 | |
1390 decoder_thread_.Stop(); | |
1391 | |
1392 SetState(kResetting); | |
1393 | |
1394 // If we have pending output frames waiting for display then we drop those | |
1395 // frames and set the corresponding picture buffer as available. | |
1396 PendingOutputSamples::iterator index; | |
1397 for (index = pending_output_samples_.begin(); | |
1398 index != pending_output_samples_.end(); | |
1399 ++index) { | |
1400 if (index->picture_buffer_id != -1) { | |
1401 OutputBuffers::iterator it = output_picture_buffers_.find( | |
1402 index->picture_buffer_id); | |
1403 if (it != output_picture_buffers_.end()) { | |
1404 DXVAPictureBuffer* picture_buffer = it->second.get(); | |
1405 picture_buffer->ReusePictureBuffer(); | |
1406 } | |
1407 } | |
1408 } | |
1409 | |
1410 pending_output_samples_.clear(); | |
1411 | |
1412 NotifyInputBuffersDropped(); | |
1413 | |
1414 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0), | |
1415 "Reset: Failed to send message.", PLATFORM_FAILURE,); | |
1416 | |
1417 main_thread_task_runner_->PostTask( | |
1418 FROM_HERE, | |
1419 base::Bind(&DXVAVideoDecodeAccelerator::NotifyResetDone, | |
1420 weak_this_factory_.GetWeakPtr())); | |
1421 | |
1422 StartDecoderThread(); | |
1423 SetState(kNormal); | |
1424 } | |
1425 | |
1426 void DXVAVideoDecodeAccelerator::Destroy() { | |
1427 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
1428 Invalidate(); | |
1429 delete this; | |
1430 } | |
1431 | |
1432 bool DXVAVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread( | |
1433 const base::WeakPtr<Client>& decode_client, | |
1434 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) { | |
1435 return false; | |
1436 } | |
1437 | |
1438 GLenum DXVAVideoDecodeAccelerator::GetSurfaceInternalFormat() const { | |
1439 return GL_BGRA_EXT; | |
1440 } | |
1441 | |
1442 // static | |
1443 media::VideoDecodeAccelerator::SupportedProfiles | |
1444 DXVAVideoDecodeAccelerator::GetSupportedProfiles() { | |
1445 TRACE_EVENT0("gpu,startup", | |
1446 "DXVAVideoDecodeAccelerator::GetSupportedProfiles"); | |
1447 | |
1448 // TODO(henryhsu): Need to ensure the profiles are actually supported. | |
1449 SupportedProfiles profiles; | |
1450 for (const auto& supported_profile : kSupportedProfiles) { | |
1451 std::pair<int, int> min_resolution = GetMinResolution(supported_profile); | |
1452 std::pair<int, int> max_resolution = GetMaxResolution(supported_profile); | |
1453 | |
1454 SupportedProfile profile; | |
1455 profile.profile = supported_profile; | |
1456 profile.min_resolution.SetSize(min_resolution.first, min_resolution.second); | |
1457 profile.max_resolution.SetSize(max_resolution.first, max_resolution.second); | |
1458 profiles.push_back(profile); | |
1459 } | |
1460 return profiles; | |
1461 } | |
1462 | |
1463 // static | |
1464 void DXVAVideoDecodeAccelerator::PreSandboxInitialization() { | |
1465 ::LoadLibrary(L"MFPlat.dll"); | |
1466 ::LoadLibrary(L"msmpeg2vdec.dll"); | |
1467 ::LoadLibrary(L"mf.dll"); | |
1468 ::LoadLibrary(L"dxva2.dll"); | |
1469 | |
1470 if (base::win::GetVersion() > base::win::VERSION_WIN7) { | |
1471 LoadLibrary(L"msvproc.dll"); | |
1472 } else { | |
1473 #if defined(ENABLE_DX11_FOR_WIN7) | |
1474 LoadLibrary(L"mshtmlmedia.dll"); | |
1475 #endif | |
1476 } | |
1477 } | |
1478 | |
1479 // static | |
1480 std::pair<int, int> DXVAVideoDecodeAccelerator::GetMinResolution( | |
1481 media::VideoCodecProfile profile) { | |
1482 TRACE_EVENT0("gpu,startup", | |
1483 "DXVAVideoDecodeAccelerator::GetMinResolution"); | |
1484 std::pair<int, int> min_resolution; | |
1485 if (profile >= media::H264PROFILE_BASELINE && | |
1486 profile <= media::H264PROFILE_HIGH) { | |
1487 // Windows Media Foundation H.264 decoding does not support decoding videos | |
1488 // with any dimension smaller than 48 pixels: | |
1489 // http://msdn.microsoft.com/en-us/library/windows/desktop/dd797815 | |
1490 min_resolution = std::make_pair(48, 48); | |
1491 } else { | |
1492 // TODO(ananta) | |
1493 // Detect this properly for VP8/VP9 profiles. | |
1494 min_resolution = std::make_pair(16, 16); | |
1495 } | |
1496 return min_resolution; | |
1497 } | |
1498 | |
1499 // static | |
1500 std::pair<int, int> DXVAVideoDecodeAccelerator::GetMaxResolution( | |
1501 const media::VideoCodecProfile profile) { | |
1502 TRACE_EVENT0("gpu,startup", | |
1503 "DXVAVideoDecodeAccelerator::GetMaxResolution"); | |
1504 std::pair<int, int> max_resolution; | |
1505 if (profile >= media::H264PROFILE_BASELINE && | |
1506 profile <= media::H264PROFILE_HIGH) { | |
1507 max_resolution = GetMaxH264Resolution(); | |
1508 } else { | |
1509 // TODO(ananta) | |
1510 // Detect this properly for VP8/VP9 profiles. | |
1511 max_resolution = std::make_pair(4096, 2160); | |
1512 } | |
1513 return max_resolution; | |
1514 } | |
1515 | |
1516 std::pair<int, int> DXVAVideoDecodeAccelerator::GetMaxH264Resolution() { | |
1517 TRACE_EVENT0("gpu,startup", | |
1518 "DXVAVideoDecodeAccelerator::GetMaxH264Resolution"); | |
1519 // The H.264 resolution detection operation is expensive. This static flag | |
1520 // allows us to run the detection once. | |
1521 static bool resolution_detected = false; | |
1522 // Use 1088 to account for 16x16 macroblocks. | |
1523 static std::pair<int, int> max_resolution = std::make_pair(1920, 1088); | |
1524 if (resolution_detected) | |
1525 return max_resolution; | |
1526 | |
1527 resolution_detected = true; | |
1528 | |
1529 // On Windows 7 the maximum resolution supported by media foundation is | |
1530 // 1920 x 1088. | |
1531 if (base::win::GetVersion() == base::win::VERSION_WIN7) | |
1532 return max_resolution; | |
1533 | |
1534 // To detect if a driver supports the desired resolutions, we try and create | |
1535 // a DXVA decoder instance for that resolution and profile. If that succeeds | |
1536 // we assume that the driver supports H/W H.264 decoding for that resolution. | |
1537 HRESULT hr = E_FAIL; | |
1538 base::win::ScopedComPtr<ID3D11Device> device; | |
1539 | |
1540 { | |
1541 TRACE_EVENT0("gpu,startup", | |
1542 "GetMaxH264Resolution. QueryDeviceObjectFromANGLE"); | |
1543 | |
1544 device = QueryDeviceObjectFromANGLE<ID3D11Device>(EGL_D3D11_DEVICE_ANGLE); | |
1545 if (!device.get()) | |
1546 return max_resolution; | |
1547 } | |
1548 | |
1549 base::win::ScopedComPtr<ID3D11VideoDevice> video_device; | |
1550 hr = device.QueryInterface(IID_ID3D11VideoDevice, | |
1551 video_device.ReceiveVoid()); | |
1552 if (FAILED(hr)) | |
1553 return max_resolution; | |
1554 | |
1555 GUID decoder_guid = {}; | |
1556 | |
1557 { | |
1558 TRACE_EVENT0("gpu,startup", | |
1559 "GetMaxH264Resolution. H.264 guid search begin"); | |
1560 // Enumerate supported video profiles and look for the H264 profile. | |
1561 bool found = false; | |
1562 UINT profile_count = video_device->GetVideoDecoderProfileCount(); | |
1563 for (UINT profile_idx = 0; profile_idx < profile_count; profile_idx++) { | |
1564 GUID profile_id = {}; | |
1565 hr = video_device->GetVideoDecoderProfile(profile_idx, &profile_id); | |
1566 if (SUCCEEDED(hr) && | |
1567 (profile_id == DXVA2_ModeH264_E || | |
1568 profile_id == DXVA2_Intel_ModeH264_E)) { | |
1569 decoder_guid = profile_id; | |
1570 found = true; | |
1571 break; | |
1572 } | |
1573 } | |
1574 if (!found) | |
1575 return max_resolution; | |
1576 } | |
1577 | |
1578 // Legacy AMD drivers with UVD3 or earlier and some Intel GPU's crash while | |
1579 // creating surfaces larger than 1920 x 1088. | |
1580 if (IsLegacyGPU(device.get())) | |
1581 return max_resolution; | |
1582 | |
1583 // We look for the following resolutions in the driver. | |
1584 // TODO(ananta) | |
1585 // Look into whether this list needs to be expanded. | |
1586 static std::pair<int, int> resolution_array[] = { | |
1587 // Use 1088 to account for 16x16 macroblocks. | |
1588 std::make_pair(1920, 1088), | |
1589 std::make_pair(2560, 1440), | |
1590 std::make_pair(3840, 2160), | |
1591 std::make_pair(4096, 2160), | |
1592 std::make_pair(4096, 2304), | |
1593 }; | |
1594 | |
1595 { | |
1596 TRACE_EVENT0("gpu,startup", | |
1597 "GetMaxH264Resolution. Resolution search begin"); | |
1598 | |
1599 for (size_t res_idx = 0; res_idx < arraysize(resolution_array); | |
1600 res_idx++) { | |
1601 D3D11_VIDEO_DECODER_DESC desc = {}; | |
1602 desc.Guid = decoder_guid; | |
1603 desc.SampleWidth = resolution_array[res_idx].first; | |
1604 desc.SampleHeight = resolution_array[res_idx].second; | |
1605 desc.OutputFormat = DXGI_FORMAT_NV12; | |
1606 UINT config_count = 0; | |
1607 hr = video_device->GetVideoDecoderConfigCount(&desc, &config_count); | |
1608 if (FAILED(hr) || config_count == 0) | |
1609 return max_resolution; | |
1610 | |
1611 D3D11_VIDEO_DECODER_CONFIG config = {}; | |
1612 hr = video_device->GetVideoDecoderConfig(&desc, 0, &config); | |
1613 if (FAILED(hr)) | |
1614 return max_resolution; | |
1615 | |
1616 base::win::ScopedComPtr<ID3D11VideoDecoder> video_decoder; | |
1617 hr = video_device->CreateVideoDecoder(&desc, &config, | |
1618 video_decoder.Receive()); | |
1619 if (!video_decoder.get()) | |
1620 return max_resolution; | |
1621 | |
1622 max_resolution = resolution_array[res_idx]; | |
1623 } | |
1624 } | |
1625 return max_resolution; | |
1626 } | |
1627 | |
1628 // static | |
1629 bool DXVAVideoDecodeAccelerator::IsLegacyGPU(ID3D11Device* device) { | |
1630 static const int kAMDGPUId1 = 0x1002; | |
1631 static const int kAMDGPUId2 = 0x1022; | |
1632 static const int kIntelGPU = 0x8086; | |
1633 | |
1634 static bool legacy_gpu = true; | |
1635 // This flag ensures that we determine the GPU type once. | |
1636 static bool legacy_gpu_determined = false; | |
1637 | |
1638 if (legacy_gpu_determined) | |
1639 return legacy_gpu; | |
1640 | |
1641 legacy_gpu_determined = true; | |
1642 | |
1643 base::win::ScopedComPtr<IDXGIDevice> dxgi_device; | |
1644 HRESULT hr = dxgi_device.QueryFrom(device); | |
1645 if (FAILED(hr)) | |
1646 return legacy_gpu; | |
1647 | |
1648 base::win::ScopedComPtr<IDXGIAdapter> adapter; | |
1649 hr = dxgi_device->GetAdapter(adapter.Receive()); | |
1650 if (FAILED(hr)) | |
1651 return legacy_gpu; | |
1652 | |
1653 DXGI_ADAPTER_DESC adapter_desc = {}; | |
1654 hr = adapter->GetDesc(&adapter_desc); | |
1655 if (FAILED(hr)) | |
1656 return legacy_gpu; | |
1657 | |
1658 // We check if the device is an Intel or an AMD device and whether it is in | |
1659 // the global list defined by the g_AMDUVD3GPUList and g_IntelLegacyGPUList | |
1660 // arrays above. If yes then the device is treated as a legacy device. | |
1661 if ((adapter_desc.VendorId == kAMDGPUId1) || | |
1662 adapter_desc.VendorId == kAMDGPUId2) { | |
1663 { | |
1664 TRACE_EVENT0("gpu,startup", | |
1665 "DXVAVideoDecodeAccelerator::IsLegacyGPU. AMD check"); | |
1666 for (size_t i = 0; i < arraysize(g_AMDUVD3GPUList); i++) { | |
1667 if (adapter_desc.DeviceId == g_AMDUVD3GPUList[i]) | |
1668 return legacy_gpu; | |
1669 } | |
1670 } | |
1671 } else if (adapter_desc.VendorId == kIntelGPU) { | |
1672 { | |
1673 TRACE_EVENT0("gpu,startup", | |
1674 "DXVAVideoDecodeAccelerator::IsLegacyGPU. Intel check"); | |
1675 for (size_t i = 0; i < arraysize(g_IntelLegacyGPUList); i++) { | |
1676 if (adapter_desc.DeviceId == g_IntelLegacyGPUList[i]) | |
1677 return legacy_gpu; | |
1678 } | |
1679 } | |
1680 } | |
1681 legacy_gpu = false; | |
1682 return legacy_gpu; | |
1683 } | |
1684 | |
1685 bool DXVAVideoDecodeAccelerator::InitDecoder(media::VideoCodecProfile profile) { | |
1686 HMODULE decoder_dll = NULL; | |
1687 | |
1688 CLSID clsid = {}; | |
1689 | |
1690 // Profile must fall within the valid range for one of the supported codecs. | |
1691 if (profile >= media::H264PROFILE_MIN && profile <= media::H264PROFILE_MAX) { | |
1692 // We mimic the steps CoCreateInstance uses to instantiate the object. This | |
1693 // was previously done because it failed inside the sandbox, and now is done | |
1694 // as a more minimal approach to avoid other side-effects CCI might have (as | |
1695 // we are still in a reduced sandbox). | |
1696 decoder_dll = ::GetModuleHandle(L"msmpeg2vdec.dll"); | |
1697 RETURN_ON_FAILURE(decoder_dll, | |
1698 "msmpeg2vdec.dll required for decoding is not loaded", | |
1699 false); | |
1700 | |
1701 // Check version of DLL, version 6.1.7140 is blacklisted due to high crash | |
1702 // rates in browsers loading that DLL. If that is the version installed we | |
1703 // fall back to software decoding. See crbug/403440. | |
1704 std::unique_ptr<FileVersionInfo> version_info( | |
1705 FileVersionInfo::CreateFileVersionInfoForModule(decoder_dll)); | |
1706 RETURN_ON_FAILURE(version_info, | |
1707 "unable to get version of msmpeg2vdec.dll", | |
1708 false); | |
1709 base::string16 file_version = version_info->file_version(); | |
1710 RETURN_ON_FAILURE(file_version.find(L"6.1.7140") == base::string16::npos, | |
1711 "blacklisted version of msmpeg2vdec.dll 6.1.7140", | |
1712 false); | |
1713 codec_ = media::kCodecH264; | |
1714 clsid = __uuidof(CMSH264DecoderMFT); | |
1715 } else if (enable_accelerated_vpx_decode_ && | |
1716 (profile == media::VP8PROFILE_ANY || | |
1717 profile == media::VP9PROFILE_PROFILE0 || | |
1718 profile == media::VP9PROFILE_PROFILE1 || | |
1719 profile == media::VP9PROFILE_PROFILE2 || | |
1720 profile == media::VP9PROFILE_PROFILE3)) { | |
1721 int program_files_key = base::DIR_PROGRAM_FILES; | |
1722 if (base::win::OSInfo::GetInstance()->wow64_status() == | |
1723 base::win::OSInfo::WOW64_ENABLED) { | |
1724 program_files_key = base::DIR_PROGRAM_FILES6432; | |
1725 } | |
1726 | |
1727 base::FilePath dll_path; | |
1728 RETURN_ON_FAILURE(PathService::Get(program_files_key, &dll_path), | |
1729 "failed to get path for Program Files", false); | |
1730 | |
1731 dll_path = dll_path.Append(kVPXDecoderDLLPath); | |
1732 if (profile == media::VP8PROFILE_ANY) { | |
1733 codec_ = media::kCodecVP8; | |
1734 dll_path = dll_path.Append(kVP8DecoderDLLName); | |
1735 clsid = CLSID_WebmMfVp8Dec; | |
1736 } else { | |
1737 codec_ = media::kCodecVP9; | |
1738 dll_path = dll_path.Append(kVP9DecoderDLLName); | |
1739 clsid = CLSID_WebmMfVp9Dec; | |
1740 } | |
1741 decoder_dll = ::LoadLibraryEx(dll_path.value().data(), NULL, | |
1742 LOAD_WITH_ALTERED_SEARCH_PATH); | |
1743 RETURN_ON_FAILURE(decoder_dll, "vpx decoder dll is not loaded", false); | |
1744 } else { | |
1745 RETURN_ON_FAILURE(false, "Unsupported codec.", false); | |
1746 } | |
1747 | |
1748 HRESULT hr = CreateCOMObjectFromDll(decoder_dll, | |
1749 clsid, | |
1750 __uuidof(IMFTransform), | |
1751 decoder_.ReceiveVoid()); | |
1752 RETURN_ON_HR_FAILURE(hr, "Failed to create decoder instance", false); | |
1753 | |
1754 RETURN_ON_FAILURE(CheckDecoderDxvaSupport(), | |
1755 "Failed to check decoder DXVA support", false); | |
1756 | |
1757 ULONG_PTR device_manager_to_use = NULL; | |
1758 if (use_dx11_) { | |
1759 CHECK(create_dxgi_device_manager_); | |
1760 RETURN_AND_NOTIFY_ON_FAILURE(CreateDX11DevManager(), | |
1761 "Failed to initialize DX11 device and manager", | |
1762 PLATFORM_FAILURE, | |
1763 false); | |
1764 device_manager_to_use = reinterpret_cast<ULONG_PTR>( | |
1765 d3d11_device_manager_.get()); | |
1766 } else { | |
1767 RETURN_AND_NOTIFY_ON_FAILURE(CreateD3DDevManager(), | |
1768 "Failed to initialize D3D device and manager", | |
1769 PLATFORM_FAILURE, | |
1770 false); | |
1771 device_manager_to_use = reinterpret_cast<ULONG_PTR>(device_manager_.get()); | |
1772 } | |
1773 | |
1774 hr = decoder_->ProcessMessage( | |
1775 MFT_MESSAGE_SET_D3D_MANAGER, | |
1776 device_manager_to_use); | |
1777 if (use_dx11_) { | |
1778 RETURN_ON_HR_FAILURE(hr, "Failed to pass DX11 manager to decoder", false); | |
1779 } else { | |
1780 RETURN_ON_HR_FAILURE(hr, "Failed to pass D3D manager to decoder", false); | |
1781 } | |
1782 | |
1783 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay(); | |
1784 | |
1785 EGLint config_attribs[] = { | |
1786 EGL_BUFFER_SIZE, 32, | |
1787 EGL_RED_SIZE, 8, | |
1788 EGL_GREEN_SIZE, 8, | |
1789 EGL_BLUE_SIZE, 8, | |
1790 EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, | |
1791 EGL_ALPHA_SIZE, 0, | |
1792 EGL_NONE | |
1793 }; | |
1794 | |
1795 EGLint num_configs; | |
1796 | |
1797 if (!eglChooseConfig( | |
1798 egl_display, | |
1799 config_attribs, | |
1800 &egl_config_, | |
1801 1, | |
1802 &num_configs)) | |
1803 return false; | |
1804 | |
1805 return SetDecoderMediaTypes(); | |
1806 } | |
1807 | |
1808 bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() { | |
1809 base::win::ScopedComPtr<IMFAttributes> attributes; | |
1810 HRESULT hr = decoder_->GetAttributes(attributes.Receive()); | |
1811 RETURN_ON_HR_FAILURE(hr, "Failed to get decoder attributes", false); | |
1812 | |
1813 UINT32 dxva = 0; | |
1814 hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva); | |
1815 RETURN_ON_HR_FAILURE(hr, "Failed to check if decoder supports DXVA", false); | |
1816 | |
1817 if (codec_ == media::kCodecH264) { | |
1818 hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE); | |
1819 RETURN_ON_HR_FAILURE(hr, "Failed to enable DXVA H/W decoding", false); | |
1820 } | |
1821 | |
1822 hr = attributes->SetUINT32(CODECAPI_AVLowLatencyMode, TRUE); | |
1823 if (SUCCEEDED(hr)) { | |
1824 DVLOG(1) << "Successfully set Low latency mode on decoder."; | |
1825 } else { | |
1826 DVLOG(1) << "Failed to set Low latency mode on decoder. Error: " << hr; | |
1827 } | |
1828 | |
1829 auto gl_context = get_gl_context_cb_.Run(); | |
1830 RETURN_ON_FAILURE(gl_context, "Couldn't get GL context", false); | |
1831 | |
1832 // The decoder should use DX11 iff | |
1833 // 1. The underlying H/W decoder supports it. | |
1834 // 2. We have a pointer to the MFCreateDXGIDeviceManager function needed for | |
1835 // this. This should always be true for Windows 8+. | |
1836 // 3. ANGLE is using DX11. | |
1837 if (create_dxgi_device_manager_ && | |
1838 (gl_context->GetGLRenderer().find("Direct3D11") != std::string::npos)) { | |
1839 UINT32 dx11_aware = 0; | |
1840 attributes->GetUINT32(MF_SA_D3D11_AWARE, &dx11_aware); | |
1841 use_dx11_ = !!dx11_aware; | |
1842 } | |
1843 | |
1844 use_keyed_mutex_ = | |
1845 use_dx11_ && gfx::GLSurfaceEGL::HasEGLExtension("EGL_ANGLE_keyed_mutex"); | |
1846 | |
1847 return true; | |
1848 } | |
1849 | |
1850 bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() { | |
1851 RETURN_ON_FAILURE(SetDecoderInputMediaType(), | |
1852 "Failed to set decoder input media type", false); | |
1853 return SetDecoderOutputMediaType(MFVideoFormat_NV12); | |
1854 } | |
1855 | |
1856 bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() { | |
1857 base::win::ScopedComPtr<IMFMediaType> media_type; | |
1858 HRESULT hr = MFCreateMediaType(media_type.Receive()); | |
1859 RETURN_ON_HR_FAILURE(hr, "MFCreateMediaType failed", false); | |
1860 | |
1861 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); | |
1862 RETURN_ON_HR_FAILURE(hr, "Failed to set major input type", false); | |
1863 | |
1864 if (codec_ == media::kCodecH264) { | |
1865 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264); | |
1866 } else if (codec_ == media::kCodecVP8) { | |
1867 hr = media_type->SetGUID(MF_MT_SUBTYPE, MEDIASUBTYPE_VP80); | |
1868 } else if (codec_ == media::kCodecVP9) { | |
1869 hr = media_type->SetGUID(MF_MT_SUBTYPE, MEDIASUBTYPE_VP90); | |
1870 } else { | |
1871 NOTREACHED(); | |
1872 RETURN_ON_FAILURE(false, "Unsupported codec on input media type.", false); | |
1873 } | |
1874 RETURN_ON_HR_FAILURE(hr, "Failed to set subtype", false); | |
1875 | |
1876 // Not sure about this. msdn recommends setting this value on the input | |
1877 // media type. | |
1878 hr = media_type->SetUINT32(MF_MT_INTERLACE_MODE, | |
1879 MFVideoInterlace_MixedInterlaceOrProgressive); | |
1880 RETURN_ON_HR_FAILURE(hr, "Failed to set interlace mode", false); | |
1881 | |
1882 hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags | |
1883 RETURN_ON_HR_FAILURE(hr, "Failed to set decoder input type", false); | |
1884 return true; | |
1885 } | |
1886 | |
1887 bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType( | |
1888 const GUID& subtype) { | |
1889 return SetTransformOutputType(decoder_.get(), subtype, 0, 0); | |
1890 } | |
1891 | |
1892 bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg, | |
1893 int32_t param) { | |
1894 HRESULT hr = decoder_->ProcessMessage(msg, param); | |
1895 return SUCCEEDED(hr); | |
1896 } | |
1897 | |
1898 // Gets the minimum buffer sizes for input and output samples. The MFT will not | |
1899 // allocate buffer for input nor output, so we have to do it ourselves and make | |
1900 // sure they're the correct size. We only provide decoding if DXVA is enabled. | |
1901 bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() { | |
1902 HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_); | |
1903 RETURN_ON_HR_FAILURE(hr, "Failed to get input stream info", false); | |
1904 | |
1905 hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_); | |
1906 RETURN_ON_HR_FAILURE(hr, "Failed to get decoder output stream info", false); | |
1907 | |
1908 DVLOG(1) << "Input stream info: "; | |
1909 DVLOG(1) << "Max latency: " << input_stream_info_.hnsMaxLatency; | |
1910 if (codec_ == media::kCodecH264) { | |
1911 // There should be three flags, one for requiring a whole frame be in a | |
1912 // single sample, one for requiring there be one buffer only in a single | |
1913 // sample, and one that specifies a fixed sample size. (as in cbSize) | |
1914 CHECK_EQ(input_stream_info_.dwFlags, 0x7u); | |
1915 } | |
1916 | |
1917 DVLOG(1) << "Min buffer size: " << input_stream_info_.cbSize; | |
1918 DVLOG(1) << "Max lookahead: " << input_stream_info_.cbMaxLookahead; | |
1919 DVLOG(1) << "Alignment: " << input_stream_info_.cbAlignment; | |
1920 | |
1921 DVLOG(1) << "Output stream info: "; | |
1922 // The flags here should be the same and mean the same thing, except when | |
1923 // DXVA is enabled, there is an extra 0x100 flag meaning decoder will | |
1924 // allocate its own sample. | |
1925 DVLOG(1) << "Flags: " | |
1926 << std::hex << std::showbase << output_stream_info_.dwFlags; | |
1927 if (codec_ == media::kCodecH264) { | |
1928 CHECK_EQ(output_stream_info_.dwFlags, 0x107u); | |
1929 } | |
1930 DVLOG(1) << "Min buffer size: " << output_stream_info_.cbSize; | |
1931 DVLOG(1) << "Alignment: " << output_stream_info_.cbAlignment; | |
1932 return true; | |
1933 } | |
1934 | |
1935 void DXVAVideoDecodeAccelerator::DoDecode() { | |
1936 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
1937 // This function is also called from FlushInternal in a loop which could | |
1938 // result in the state transitioning to kStopped due to no decoded output. | |
1939 State state = GetState(); | |
1940 RETURN_AND_NOTIFY_ON_FAILURE( | |
1941 (state == kNormal || state == kFlushing || state == kStopped), | |
1942 "DoDecode: not in normal/flushing/stopped state", ILLEGAL_STATE,); | |
1943 | |
1944 MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0}; | |
1945 DWORD status = 0; | |
1946 | |
1947 HRESULT hr = decoder_->ProcessOutput(0, // No flags | |
1948 1, // # of out streams to pull from | |
1949 &output_data_buffer, | |
1950 &status); | |
1951 IMFCollection* events = output_data_buffer.pEvents; | |
1952 if (events != NULL) { | |
1953 DVLOG(1) << "Got events from ProcessOuput, but discarding"; | |
1954 events->Release(); | |
1955 } | |
1956 if (FAILED(hr)) { | |
1957 // A stream change needs further ProcessInput calls to get back decoder | |
1958 // output which is why we need to set the state to stopped. | |
1959 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { | |
1960 if (!SetDecoderOutputMediaType(MFVideoFormat_NV12)) { | |
1961 // Decoder didn't let us set NV12 output format. Not sure as to why | |
1962 // this can happen. Give up in disgust. | |
1963 NOTREACHED() << "Failed to set decoder output media type to NV12"; | |
1964 SetState(kStopped); | |
1965 } else { | |
1966 DVLOG(1) << "Received output format change from the decoder." | |
1967 " Recursively invoking DoDecode"; | |
1968 DoDecode(); | |
1969 } | |
1970 return; | |
1971 } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { | |
1972 // No more output from the decoder. Stop playback. | |
1973 SetState(kStopped); | |
1974 return; | |
1975 } else { | |
1976 NOTREACHED() << "Unhandled error in DoDecode()"; | |
1977 return; | |
1978 } | |
1979 } | |
1980 TRACE_EVENT_ASYNC_END0("gpu", "DXVAVideoDecodeAccelerator.Decoding", this); | |
1981 | |
1982 TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode", | |
1983 inputs_before_decode_); | |
1984 | |
1985 inputs_before_decode_ = 0; | |
1986 | |
1987 RETURN_AND_NOTIFY_ON_FAILURE(ProcessOutputSample(output_data_buffer.pSample), | |
1988 "Failed to process output sample.", PLATFORM_FAILURE,); | |
1989 } | |
1990 | |
1991 bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) { | |
1992 RETURN_ON_FAILURE(sample, "Decode succeeded with NULL output sample", false); | |
1993 | |
1994 LONGLONG input_buffer_id = 0; | |
1995 RETURN_ON_HR_FAILURE(sample->GetSampleTime(&input_buffer_id), | |
1996 "Failed to get input buffer id associated with sample", | |
1997 false); | |
1998 | |
1999 { | |
2000 base::AutoLock lock(decoder_lock_); | |
2001 DCHECK(pending_output_samples_.empty()); | |
2002 pending_output_samples_.push_back( | |
2003 PendingSampleInfo(input_buffer_id, sample)); | |
2004 } | |
2005 | |
2006 if (pictures_requested_) { | |
2007 DVLOG(1) << "Waiting for picture slots from the client."; | |
2008 main_thread_task_runner_->PostTask( | |
2009 FROM_HERE, | |
2010 base::Bind(&DXVAVideoDecodeAccelerator::ProcessPendingSamples, | |
2011 weak_this_factory_.GetWeakPtr())); | |
2012 return true; | |
2013 } | |
2014 | |
2015 int width = 0; | |
2016 int height = 0; | |
2017 if (!GetVideoFrameDimensions(sample, &width, &height)) { | |
2018 RETURN_ON_FAILURE(false, "Failed to get D3D surface from output sample", | |
2019 false); | |
2020 } | |
2021 | |
2022 // Go ahead and request picture buffers. | |
2023 main_thread_task_runner_->PostTask( | |
2024 FROM_HERE, | |
2025 base::Bind(&DXVAVideoDecodeAccelerator::RequestPictureBuffers, | |
2026 weak_this_factory_.GetWeakPtr(), | |
2027 width, | |
2028 height)); | |
2029 | |
2030 pictures_requested_ = true; | |
2031 return true; | |
2032 } | |
2033 | |
2034 void DXVAVideoDecodeAccelerator::ProcessPendingSamples() { | |
2035 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
2036 | |
2037 if (!output_picture_buffers_.size()) | |
2038 return; | |
2039 | |
2040 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(), | |
2041 "Failed to make context current", | |
2042 PLATFORM_FAILURE, ); | |
2043 | |
2044 OutputBuffers::iterator index; | |
2045 | |
2046 for (index = output_picture_buffers_.begin(); | |
2047 index != output_picture_buffers_.end() && | |
2048 OutputSamplesPresent(); | |
2049 ++index) { | |
2050 if (index->second->available()) { | |
2051 PendingSampleInfo* pending_sample = NULL; | |
2052 { | |
2053 base::AutoLock lock(decoder_lock_); | |
2054 PendingSampleInfo& sample_info = pending_output_samples_.front(); | |
2055 if (sample_info.picture_buffer_id != -1) | |
2056 continue; | |
2057 pending_sample = &sample_info; | |
2058 } | |
2059 | |
2060 int width = 0; | |
2061 int height = 0; | |
2062 if (!GetVideoFrameDimensions(pending_sample->output_sample.get(), | |
2063 &width, &height)) { | |
2064 RETURN_AND_NOTIFY_ON_FAILURE(false, | |
2065 "Failed to get D3D surface from output sample", PLATFORM_FAILURE,); | |
2066 } | |
2067 | |
2068 if (width != index->second->size().width() || | |
2069 height != index->second->size().height()) { | |
2070 HandleResolutionChanged(width, height); | |
2071 return; | |
2072 } | |
2073 | |
2074 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | |
2075 HRESULT hr = pending_sample->output_sample->GetBufferByIndex( | |
2076 0, output_buffer.Receive()); | |
2077 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
2078 "Failed to get buffer from output sample", PLATFORM_FAILURE,); | |
2079 | |
2080 base::win::ScopedComPtr<IDirect3DSurface9> surface; | |
2081 base::win::ScopedComPtr<ID3D11Texture2D> d3d11_texture; | |
2082 | |
2083 if (use_dx11_) { | |
2084 base::win::ScopedComPtr<IMFDXGIBuffer> dxgi_buffer; | |
2085 hr = dxgi_buffer.QueryFrom(output_buffer.get()); | |
2086 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
2087 "Failed to get DXGIBuffer from output sample", PLATFORM_FAILURE,); | |
2088 hr = dxgi_buffer->GetResource( | |
2089 __uuidof(ID3D11Texture2D), | |
2090 reinterpret_cast<void**>(d3d11_texture.Receive())); | |
2091 } else { | |
2092 hr = MFGetService(output_buffer.get(), MR_BUFFER_SERVICE, | |
2093 IID_PPV_ARGS(surface.Receive())); | |
2094 } | |
2095 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
2096 "Failed to get surface from output sample", PLATFORM_FAILURE,); | |
2097 | |
2098 pending_sample->picture_buffer_id = index->second->id(); | |
2099 | |
2100 RETURN_AND_NOTIFY_ON_FAILURE( | |
2101 index->second->CopyOutputSampleDataToPictureBuffer( | |
2102 this, | |
2103 surface.get(), | |
2104 d3d11_texture.get(), | |
2105 pending_sample->input_buffer_id), | |
2106 "Failed to copy output sample", PLATFORM_FAILURE,); | |
2107 | |
2108 index->second->set_available(false); | |
2109 } | |
2110 } | |
2111 } | |
2112 | |
2113 void DXVAVideoDecodeAccelerator::StopOnError( | |
2114 media::VideoDecodeAccelerator::Error error) { | |
2115 if (!main_thread_task_runner_->BelongsToCurrentThread()) { | |
2116 main_thread_task_runner_->PostTask( | |
2117 FROM_HERE, | |
2118 base::Bind(&DXVAVideoDecodeAccelerator::StopOnError, | |
2119 weak_this_factory_.GetWeakPtr(), | |
2120 error)); | |
2121 return; | |
2122 } | |
2123 | |
2124 if (client_) | |
2125 client_->NotifyError(error); | |
2126 client_ = NULL; | |
2127 | |
2128 if (GetState() != kUninitialized) { | |
2129 Invalidate(); | |
2130 } | |
2131 } | |
2132 | |
2133 void DXVAVideoDecodeAccelerator::Invalidate() { | |
2134 if (GetState() == kUninitialized) | |
2135 return; | |
2136 | |
2137 // Best effort to make the GL context current. | |
2138 make_context_current_cb_.Run(); | |
2139 | |
2140 decoder_thread_.Stop(); | |
2141 weak_this_factory_.InvalidateWeakPtrs(); | |
2142 output_picture_buffers_.clear(); | |
2143 stale_output_picture_buffers_.clear(); | |
2144 pending_output_samples_.clear(); | |
2145 // We want to continue processing pending input after detecting a config | |
2146 // change. | |
2147 if (GetState() != kConfigChange) | |
2148 pending_input_buffers_.clear(); | |
2149 decoder_.Release(); | |
2150 pictures_requested_ = false; | |
2151 | |
2152 config_change_detector_.reset(); | |
2153 | |
2154 if (use_dx11_) { | |
2155 if (video_format_converter_mft_.get()) { | |
2156 video_format_converter_mft_->ProcessMessage( | |
2157 MFT_MESSAGE_NOTIFY_END_STREAMING, 0); | |
2158 video_format_converter_mft_.Release(); | |
2159 } | |
2160 d3d11_device_context_.Release(); | |
2161 d3d11_device_.Release(); | |
2162 d3d11_device_manager_.Release(); | |
2163 d3d11_query_.Release(); | |
2164 dx11_video_format_converter_media_type_needs_init_ = true; | |
2165 multi_threaded_.Release(); | |
2166 } else { | |
2167 d3d9_.Release(); | |
2168 d3d9_device_ex_.Release(); | |
2169 device_manager_.Release(); | |
2170 query_.Release(); | |
2171 } | |
2172 | |
2173 SetState(kUninitialized); | |
2174 } | |
2175 | |
2176 void DXVAVideoDecodeAccelerator::NotifyInputBufferRead(int input_buffer_id) { | |
2177 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
2178 if (client_) | |
2179 client_->NotifyEndOfBitstreamBuffer(input_buffer_id); | |
2180 } | |
2181 | |
2182 void DXVAVideoDecodeAccelerator::NotifyFlushDone() { | |
2183 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
2184 if (client_ && pending_flush_) { | |
2185 pending_flush_ = false; | |
2186 { | |
2187 base::AutoLock lock(decoder_lock_); | |
2188 sent_drain_message_ = false; | |
2189 } | |
2190 | |
2191 client_->NotifyFlushDone(); | |
2192 } | |
2193 } | |
2194 | |
2195 void DXVAVideoDecodeAccelerator::NotifyResetDone() { | |
2196 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
2197 if (client_) | |
2198 client_->NotifyResetDone(); | |
2199 } | |
2200 | |
2201 void DXVAVideoDecodeAccelerator::RequestPictureBuffers(int width, int height) { | |
2202 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
2203 // This task could execute after the decoder has been torn down. | |
2204 if (GetState() != kUninitialized && client_) { | |
2205 client_->ProvidePictureBuffers(kNumPictureBuffers, 1, | |
2206 gfx::Size(width, height), GL_TEXTURE_2D); | |
2207 } | |
2208 } | |
2209 | |
2210 void DXVAVideoDecodeAccelerator::NotifyPictureReady( | |
2211 int picture_buffer_id, | |
2212 int input_buffer_id) { | |
2213 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
2214 // This task could execute after the decoder has been torn down. | |
2215 if (GetState() != kUninitialized && client_) { | |
2216 // TODO(henryhsu): Use correct visible size instead of (0, 0). We can't use | |
2217 // coded size here so use (0, 0) intentionally to have the client choose. | |
2218 media::Picture picture(picture_buffer_id, input_buffer_id, | |
2219 gfx::Rect(0, 0), false); | |
2220 client_->PictureReady(picture); | |
2221 } | |
2222 } | |
2223 | |
2224 void DXVAVideoDecodeAccelerator::NotifyInputBuffersDropped() { | |
2225 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
2226 if (!client_) | |
2227 return; | |
2228 | |
2229 for (PendingInputs::iterator it = pending_input_buffers_.begin(); | |
2230 it != pending_input_buffers_.end(); ++it) { | |
2231 LONGLONG input_buffer_id = 0; | |
2232 RETURN_ON_HR_FAILURE((*it)->GetSampleTime(&input_buffer_id), | |
2233 "Failed to get buffer id associated with sample",); | |
2234 client_->NotifyEndOfBitstreamBuffer(input_buffer_id); | |
2235 } | |
2236 pending_input_buffers_.clear(); | |
2237 } | |
2238 | |
2239 void DXVAVideoDecodeAccelerator::DecodePendingInputBuffers() { | |
2240 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
2241 State state = GetState(); | |
2242 RETURN_AND_NOTIFY_ON_FAILURE((state != kUninitialized), | |
2243 "Invalid state: " << state, ILLEGAL_STATE,); | |
2244 | |
2245 if (pending_input_buffers_.empty() || OutputSamplesPresent()) | |
2246 return; | |
2247 | |
2248 PendingInputs pending_input_buffers_copy; | |
2249 std::swap(pending_input_buffers_, pending_input_buffers_copy); | |
2250 | |
2251 for (PendingInputs::iterator it = pending_input_buffers_copy.begin(); | |
2252 it != pending_input_buffers_copy.end(); ++it) { | |
2253 DecodeInternal(*it); | |
2254 } | |
2255 } | |
2256 | |
2257 void DXVAVideoDecodeAccelerator::FlushInternal() { | |
2258 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
2259 | |
2260 // We allow only one output frame to be present at any given time. If we have | |
2261 // an output frame, then we cannot complete the flush at this time. | |
2262 if (OutputSamplesPresent()) | |
2263 return; | |
2264 | |
2265 // First drain the pending input because once the drain message is sent below, | |
2266 // the decoder will ignore further input until it's drained. | |
2267 if (!pending_input_buffers_.empty()) { | |
2268 decoder_thread_task_runner_->PostTask( | |
2269 FROM_HERE, | |
2270 base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers, | |
2271 base::Unretained(this))); | |
2272 decoder_thread_task_runner_->PostTask( | |
2273 FROM_HERE, | |
2274 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal, | |
2275 base::Unretained(this))); | |
2276 return; | |
2277 } | |
2278 | |
2279 { | |
2280 base::AutoLock lock(decoder_lock_); | |
2281 if (!sent_drain_message_) { | |
2282 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0), | |
2283 "Failed to send drain message", | |
2284 PLATFORM_FAILURE,); | |
2285 sent_drain_message_ = true; | |
2286 } | |
2287 } | |
2288 | |
2289 // Attempt to retrieve an output frame from the decoder. If we have one, | |
2290 // return and proceed when the output frame is processed. If we don't have a | |
2291 // frame then we are done. | |
2292 DoDecode(); | |
2293 if (OutputSamplesPresent()) | |
2294 return; | |
2295 | |
2296 SetState(kFlushing); | |
2297 | |
2298 main_thread_task_runner_->PostTask( | |
2299 FROM_HERE, | |
2300 base::Bind(&DXVAVideoDecodeAccelerator::NotifyFlushDone, | |
2301 weak_this_factory_.GetWeakPtr())); | |
2302 | |
2303 SetState(kNormal); | |
2304 } | |
2305 | |
2306 void DXVAVideoDecodeAccelerator::DecodeInternal( | |
2307 const base::win::ScopedComPtr<IMFSample>& sample) { | |
2308 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
2309 | |
2310 if (GetState() == kUninitialized) | |
2311 return; | |
2312 | |
2313 if (OutputSamplesPresent() || !pending_input_buffers_.empty()) { | |
2314 pending_input_buffers_.push_back(sample); | |
2315 return; | |
2316 } | |
2317 | |
2318 // Check if the resolution, bit rate, etc changed in the stream. If yes we | |
2319 // reinitialize the decoder to ensure that the stream decodes correctly. | |
2320 bool config_changed = false; | |
2321 | |
2322 HRESULT hr = CheckConfigChanged(sample.get(), &config_changed); | |
2323 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to check video stream config", | |
2324 PLATFORM_FAILURE,); | |
2325 | |
2326 if (config_changed) { | |
2327 pending_input_buffers_.push_back(sample); | |
2328 main_thread_task_runner_->PostTask( | |
2329 FROM_HERE, | |
2330 base::Bind(&DXVAVideoDecodeAccelerator::ConfigChanged, | |
2331 weak_this_factory_.GetWeakPtr(), | |
2332 config_)); | |
2333 return; | |
2334 } | |
2335 | |
2336 if (!inputs_before_decode_) { | |
2337 TRACE_EVENT_ASYNC_BEGIN0("gpu", "DXVAVideoDecodeAccelerator.Decoding", | |
2338 this); | |
2339 } | |
2340 inputs_before_decode_++; | |
2341 | |
2342 hr = decoder_->ProcessInput(0, sample.get(), 0); | |
2343 // As per msdn if the decoder returns MF_E_NOTACCEPTING then it means that it | |
2344 // has enough data to produce one or more output samples. In this case the | |
2345 // recommended options are to | |
2346 // 1. Generate new output by calling IMFTransform::ProcessOutput until it | |
2347 // returns MF_E_TRANSFORM_NEED_MORE_INPUT. | |
2348 // 2. Flush the input data | |
2349 // We implement the first option, i.e to retrieve the output sample and then | |
2350 // process the input again. Failure in either of these steps is treated as a | |
2351 // decoder failure. | |
2352 if (hr == MF_E_NOTACCEPTING) { | |
2353 DoDecode(); | |
2354 // If the DoDecode call resulted in an output frame then we should not | |
2355 // process any more input until that frame is copied to the target surface. | |
2356 if (!OutputSamplesPresent()) { | |
2357 State state = GetState(); | |
2358 RETURN_AND_NOTIFY_ON_FAILURE((state == kStopped || state == kNormal || | |
2359 state == kFlushing), | |
2360 "Failed to process output. Unexpected decoder state: " << state, | |
2361 PLATFORM_FAILURE,); | |
2362 hr = decoder_->ProcessInput(0, sample.get(), 0); | |
2363 } | |
2364 // If we continue to get the MF_E_NOTACCEPTING error we do the following:- | |
2365 // 1. Add the input sample to the pending queue. | |
2366 // 2. If we don't have any output samples we post the | |
2367 // DecodePendingInputBuffers task to process the pending input samples. | |
2368 // If we have an output sample then the above task is posted when the | |
2369 // output samples are sent to the client. | |
2370 // This is because we only support 1 pending output sample at any | |
2371 // given time due to the limitation with the Microsoft media foundation | |
2372 // decoder where it recycles the output Decoder surfaces. | |
2373 if (hr == MF_E_NOTACCEPTING) { | |
2374 pending_input_buffers_.push_back(sample); | |
2375 decoder_thread_task_runner_->PostTask( | |
2376 FROM_HERE, | |
2377 base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers, | |
2378 base::Unretained(this))); | |
2379 return; | |
2380 } | |
2381 } | |
2382 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to process input sample", | |
2383 PLATFORM_FAILURE,); | |
2384 | |
2385 DoDecode(); | |
2386 | |
2387 State state = GetState(); | |
2388 RETURN_AND_NOTIFY_ON_FAILURE((state == kStopped || state == kNormal || | |
2389 state == kFlushing), | |
2390 "Failed to process output. Unexpected decoder state: " << state, | |
2391 ILLEGAL_STATE,); | |
2392 | |
2393 LONGLONG input_buffer_id = 0; | |
2394 RETURN_ON_HR_FAILURE(sample->GetSampleTime(&input_buffer_id), | |
2395 "Failed to get input buffer id associated with sample",); | |
2396 // The Microsoft Media foundation decoder internally buffers up to 30 frames | |
2397 // before returning a decoded frame. We need to inform the client that this | |
2398 // input buffer is processed as it may stop sending us further input. | |
2399 // Note: This may break clients which expect every input buffer to be | |
2400 // associated with a decoded output buffer. | |
2401 // TODO(ananta) | |
2402 // Do some more investigation into whether it is possible to get the MFT | |
2403 // decoder to emit an output packet for every input packet. | |
2404 // http://code.google.com/p/chromium/issues/detail?id=108121 | |
2405 // http://code.google.com/p/chromium/issues/detail?id=150925 | |
2406 main_thread_task_runner_->PostTask( | |
2407 FROM_HERE, | |
2408 base::Bind(&DXVAVideoDecodeAccelerator::NotifyInputBufferRead, | |
2409 weak_this_factory_.GetWeakPtr(), | |
2410 input_buffer_id)); | |
2411 } | |
2412 | |
2413 void DXVAVideoDecodeAccelerator::HandleResolutionChanged(int width, | |
2414 int height) { | |
2415 dx11_video_format_converter_media_type_needs_init_ = true; | |
2416 | |
2417 main_thread_task_runner_->PostTask( | |
2418 FROM_HERE, | |
2419 base::Bind(&DXVAVideoDecodeAccelerator::DismissStaleBuffers, | |
2420 weak_this_factory_.GetWeakPtr(), false)); | |
2421 | |
2422 main_thread_task_runner_->PostTask( | |
2423 FROM_HERE, | |
2424 base::Bind(&DXVAVideoDecodeAccelerator::RequestPictureBuffers, | |
2425 weak_this_factory_.GetWeakPtr(), | |
2426 width, | |
2427 height)); | |
2428 } | |
2429 | |
2430 void DXVAVideoDecodeAccelerator::DismissStaleBuffers(bool force) { | |
2431 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(), | |
2432 "Failed to make context current", | |
2433 PLATFORM_FAILURE, ); | |
2434 | |
2435 OutputBuffers::iterator index; | |
2436 | |
2437 for (index = output_picture_buffers_.begin(); | |
2438 index != output_picture_buffers_.end(); | |
2439 ++index) { | |
2440 if (force || index->second->available()) { | |
2441 DVLOG(1) << "Dismissing picture id: " << index->second->id(); | |
2442 client_->DismissPictureBuffer(index->second->id()); | |
2443 } else { | |
2444 // Move to |stale_output_picture_buffers_| for deferred deletion. | |
2445 stale_output_picture_buffers_.insert( | |
2446 std::make_pair(index->first, index->second)); | |
2447 } | |
2448 } | |
2449 | |
2450 output_picture_buffers_.clear(); | |
2451 } | |
2452 | |
2453 void DXVAVideoDecodeAccelerator::DeferredDismissStaleBuffer( | |
2454 int32_t picture_buffer_id) { | |
2455 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(), | |
2456 "Failed to make context current", | |
2457 PLATFORM_FAILURE, ); | |
2458 | |
2459 OutputBuffers::iterator it = stale_output_picture_buffers_.find( | |
2460 picture_buffer_id); | |
2461 DCHECK(it != stale_output_picture_buffers_.end()); | |
2462 DVLOG(1) << "Dismissing picture id: " << it->second->id(); | |
2463 client_->DismissPictureBuffer(it->second->id()); | |
2464 stale_output_picture_buffers_.erase(it); | |
2465 } | |
2466 | |
2467 DXVAVideoDecodeAccelerator::State | |
2468 DXVAVideoDecodeAccelerator::GetState() { | |
2469 static_assert(sizeof(State) == sizeof(long), "mismatched type sizes"); | |
2470 State state = static_cast<State>( | |
2471 InterlockedAdd(reinterpret_cast<volatile long*>(&state_), 0)); | |
2472 return state; | |
2473 } | |
2474 | |
2475 void DXVAVideoDecodeAccelerator::SetState(State new_state) { | |
2476 if (!main_thread_task_runner_->BelongsToCurrentThread()) { | |
2477 main_thread_task_runner_->PostTask( | |
2478 FROM_HERE, | |
2479 base::Bind(&DXVAVideoDecodeAccelerator::SetState, | |
2480 weak_this_factory_.GetWeakPtr(), | |
2481 new_state)); | |
2482 return; | |
2483 } | |
2484 | |
2485 static_assert(sizeof(State) == sizeof(long), "mismatched type sizes"); | |
2486 ::InterlockedExchange(reinterpret_cast<volatile long*>(&state_), | |
2487 new_state); | |
2488 DCHECK_EQ(state_, new_state); | |
2489 } | |
2490 | |
2491 void DXVAVideoDecodeAccelerator::StartDecoderThread() { | |
2492 decoder_thread_.init_com_with_mta(false); | |
2493 decoder_thread_.Start(); | |
2494 decoder_thread_task_runner_ = decoder_thread_.task_runner(); | |
2495 } | |
2496 | |
2497 bool DXVAVideoDecodeAccelerator::OutputSamplesPresent() { | |
2498 base::AutoLock lock(decoder_lock_); | |
2499 return !pending_output_samples_.empty(); | |
2500 } | |
2501 | |
2502 void DXVAVideoDecodeAccelerator::CopySurface(IDirect3DSurface9* src_surface, | |
2503 IDirect3DSurface9* dest_surface, | |
2504 int picture_buffer_id, | |
2505 int input_buffer_id) { | |
2506 if (!decoder_thread_task_runner_->BelongsToCurrentThread()) { | |
2507 decoder_thread_task_runner_->PostTask( | |
2508 FROM_HERE, | |
2509 base::Bind(&DXVAVideoDecodeAccelerator::CopySurface, | |
2510 base::Unretained(this), | |
2511 src_surface, | |
2512 dest_surface, | |
2513 picture_buffer_id, | |
2514 input_buffer_id)); | |
2515 return; | |
2516 } | |
2517 | |
2518 HRESULT hr = d3d9_device_ex_->StretchRect(src_surface, NULL, dest_surface, | |
2519 NULL, D3DTEXF_NONE); | |
2520 RETURN_ON_HR_FAILURE(hr, "Colorspace conversion via StretchRect failed",); | |
2521 | |
2522 // Ideally, this should be done immediately before the draw call that uses | |
2523 // the texture. Flush it once here though. | |
2524 hr = query_->Issue(D3DISSUE_END); | |
2525 RETURN_ON_HR_FAILURE(hr, "Failed to issue END",); | |
2526 | |
2527 // If we are sharing the ANGLE device we don't need to wait for the Flush to | |
2528 // complete. | |
2529 if (using_angle_device_) { | |
2530 main_thread_task_runner_->PostTask( | |
2531 FROM_HERE, | |
2532 base::Bind(&DXVAVideoDecodeAccelerator::CopySurfaceComplete, | |
2533 weak_this_factory_.GetWeakPtr(), | |
2534 src_surface, | |
2535 dest_surface, | |
2536 picture_buffer_id, | |
2537 input_buffer_id)); | |
2538 return; | |
2539 } | |
2540 | |
2541 // Flush the decoder device to ensure that the decoded frame is copied to the | |
2542 // target surface. | |
2543 decoder_thread_task_runner_->PostDelayedTask( | |
2544 FROM_HERE, | |
2545 base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder, | |
2546 base::Unretained(this), 0, src_surface, dest_surface, | |
2547 picture_buffer_id, input_buffer_id), | |
2548 base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs)); | |
2549 } | |
2550 | |
2551 void DXVAVideoDecodeAccelerator::CopySurfaceComplete( | |
2552 IDirect3DSurface9* src_surface, | |
2553 IDirect3DSurface9* dest_surface, | |
2554 int picture_buffer_id, | |
2555 int input_buffer_id) { | |
2556 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
2557 | |
2558 // The output buffers may have changed in the following scenarios:- | |
2559 // 1. A resolution change. | |
2560 // 2. Decoder instance was destroyed. | |
2561 // Ignore copy surface notifications for such buffers. | |
2562 // copy surface notifications for such buffers. | |
2563 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id); | |
2564 if (it == output_picture_buffers_.end()) | |
2565 return; | |
2566 | |
2567 // If the picture buffer is marked as available it probably means that there | |
2568 // was a Reset operation which dropped the output frame. | |
2569 DXVAPictureBuffer* picture_buffer = it->second.get(); | |
2570 if (picture_buffer->available()) | |
2571 return; | |
2572 | |
2573 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(), | |
2574 "Failed to make context current", | |
2575 PLATFORM_FAILURE, ); | |
2576 | |
2577 DCHECK(!output_picture_buffers_.empty()); | |
2578 | |
2579 bool result = picture_buffer->CopySurfaceComplete(src_surface, dest_surface); | |
2580 RETURN_AND_NOTIFY_ON_FAILURE(result, "Failed to complete copying surface", | |
2581 PLATFORM_FAILURE, ); | |
2582 | |
2583 NotifyPictureReady(picture_buffer->id(), input_buffer_id); | |
2584 | |
2585 { | |
2586 base::AutoLock lock(decoder_lock_); | |
2587 if (!pending_output_samples_.empty()) | |
2588 pending_output_samples_.pop_front(); | |
2589 } | |
2590 | |
2591 if (pending_flush_) { | |
2592 decoder_thread_task_runner_->PostTask( | |
2593 FROM_HERE, | |
2594 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal, | |
2595 base::Unretained(this))); | |
2596 return; | |
2597 } | |
2598 decoder_thread_task_runner_->PostTask( | |
2599 FROM_HERE, | |
2600 base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers, | |
2601 base::Unretained(this))); | |
2602 } | |
2603 | |
2604 void DXVAVideoDecodeAccelerator::CopyTexture( | |
2605 ID3D11Texture2D* src_texture, | |
2606 ID3D11Texture2D* dest_texture, | |
2607 base::win::ScopedComPtr<IDXGIKeyedMutex> dest_keyed_mutex, | |
2608 uint64_t keyed_mutex_value, | |
2609 IMFSample* video_frame, | |
2610 int picture_buffer_id, | |
2611 int input_buffer_id) { | |
2612 HRESULT hr = E_FAIL; | |
2613 | |
2614 DCHECK(use_dx11_); | |
2615 | |
2616 if (!decoder_thread_task_runner_->BelongsToCurrentThread()) { | |
2617 // The media foundation H.264 decoder outputs YUV12 textures which we | |
2618 // cannot copy into ANGLE as they expect ARGB textures. In D3D land | |
2619 // the StretchRect API in the IDirect3DDevice9Ex interface did the color | |
2620 // space conversion for us. Sadly in DX11 land the API does not provide | |
2621 // a straightforward way to do this. | |
2622 // We use the video processor MFT. | |
2623 // https://msdn.microsoft.com/en-us/library/hh162913(v=vs.85).aspx | |
2624 // This object implements a media foundation transform (IMFTransform) | |
2625 // which follows the same contract as the decoder. The color space | |
2626 // conversion as per msdn is done in the GPU. | |
2627 | |
2628 D3D11_TEXTURE2D_DESC source_desc; | |
2629 src_texture->GetDesc(&source_desc); | |
2630 | |
2631 // Set up the input and output types for the video processor MFT. | |
2632 if (!InitializeDX11VideoFormatConverterMediaType(source_desc.Width, | |
2633 source_desc.Height)) { | |
2634 RETURN_AND_NOTIFY_ON_FAILURE( | |
2635 false, "Failed to initialize media types for convesion.", | |
2636 PLATFORM_FAILURE,); | |
2637 } | |
2638 | |
2639 // The input to the video processor is the output sample. | |
2640 base::win::ScopedComPtr<IMFSample> input_sample_for_conversion; | |
2641 { | |
2642 base::AutoLock lock(decoder_lock_); | |
2643 PendingSampleInfo& sample_info = pending_output_samples_.front(); | |
2644 input_sample_for_conversion = sample_info.output_sample; | |
2645 } | |
2646 | |
2647 decoder_thread_task_runner_->PostTask( | |
2648 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::CopyTexture, | |
2649 base::Unretained(this), src_texture, dest_texture, | |
2650 dest_keyed_mutex, keyed_mutex_value, | |
2651 input_sample_for_conversion.Detach(), | |
2652 picture_buffer_id, input_buffer_id)); | |
2653 return; | |
2654 } | |
2655 | |
2656 DCHECK(video_frame); | |
2657 | |
2658 base::win::ScopedComPtr<IMFSample> input_sample; | |
2659 input_sample.Attach(video_frame); | |
2660 | |
2661 DCHECK(video_format_converter_mft_.get()); | |
2662 | |
2663 if (dest_keyed_mutex) { | |
2664 HRESULT hr = | |
2665 dest_keyed_mutex->AcquireSync(keyed_mutex_value, kAcquireSyncWaitMs); | |
2666 RETURN_AND_NOTIFY_ON_FAILURE( | |
2667 hr == S_OK, "D3D11 failed to acquire keyed mutex for texture.", | |
2668 PLATFORM_FAILURE, ); | |
2669 } | |
2670 // The video processor MFT requires output samples to be allocated by the | |
2671 // caller. We create a sample with a buffer backed with the ID3D11Texture2D | |
2672 // interface exposed by ANGLE. This works nicely as this ensures that the | |
2673 // video processor coverts the color space of the output frame and copies | |
2674 // the result into the ANGLE texture. | |
2675 base::win::ScopedComPtr<IMFSample> output_sample; | |
2676 hr = MFCreateSample(output_sample.Receive()); | |
2677 if (FAILED(hr)) { | |
2678 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
2679 "Failed to create output sample.", PLATFORM_FAILURE,); | |
2680 } | |
2681 | |
2682 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | |
2683 hr = MFCreateDXGISurfaceBuffer( | |
2684 __uuidof(ID3D11Texture2D), dest_texture, 0, FALSE, | |
2685 output_buffer.Receive()); | |
2686 if (FAILED(hr)) { | |
2687 base::debug::Alias(&hr); | |
2688 // TODO(ananta) | |
2689 // Remove this CHECK when the change to use DX11 for H/W decoding | |
2690 // stablizes. | |
2691 CHECK(false); | |
2692 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
2693 "Failed to create output sample.", PLATFORM_FAILURE,); | |
2694 } | |
2695 | |
2696 output_sample->AddBuffer(output_buffer.get()); | |
2697 | |
2698 hr = video_format_converter_mft_->ProcessInput(0, video_frame, 0); | |
2699 if (FAILED(hr)) { | |
2700 DCHECK(false); | |
2701 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
2702 "Failed to convert output sample format.", PLATFORM_FAILURE,); | |
2703 } | |
2704 | |
2705 DWORD status = 0; | |
2706 MFT_OUTPUT_DATA_BUFFER format_converter_output = {}; | |
2707 format_converter_output.pSample = output_sample.get(); | |
2708 hr = video_format_converter_mft_->ProcessOutput( | |
2709 0, // No flags | |
2710 1, // # of out streams to pull from | |
2711 &format_converter_output, | |
2712 &status); | |
2713 | |
2714 if (FAILED(hr)) { | |
2715 base::debug::Alias(&hr); | |
2716 // TODO(ananta) | |
2717 // Remove this CHECK when the change to use DX11 for H/W decoding | |
2718 // stablizes. | |
2719 CHECK(false); | |
2720 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
2721 "Failed to convert output sample format.", PLATFORM_FAILURE,); | |
2722 } | |
2723 | |
2724 if (dest_keyed_mutex) { | |
2725 HRESULT hr = dest_keyed_mutex->ReleaseSync(keyed_mutex_value + 1); | |
2726 RETURN_AND_NOTIFY_ON_FAILURE(hr == S_OK, "Failed to release keyed mutex.", | |
2727 PLATFORM_FAILURE, ); | |
2728 | |
2729 main_thread_task_runner_->PostTask( | |
2730 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::CopySurfaceComplete, | |
2731 weak_this_factory_.GetWeakPtr(), nullptr, nullptr, | |
2732 picture_buffer_id, input_buffer_id)); | |
2733 } else { | |
2734 d3d11_device_context_->Flush(); | |
2735 d3d11_device_context_->End(d3d11_query_.get()); | |
2736 | |
2737 decoder_thread_task_runner_->PostDelayedTask( | |
2738 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder, | |
2739 base::Unretained(this), 0, | |
2740 reinterpret_cast<IDirect3DSurface9*>(NULL), | |
2741 reinterpret_cast<IDirect3DSurface9*>(NULL), | |
2742 picture_buffer_id, input_buffer_id), | |
2743 base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs)); | |
2744 } | |
2745 } | |
2746 | |
2747 void DXVAVideoDecodeAccelerator::FlushDecoder( | |
2748 int iterations, | |
2749 IDirect3DSurface9* src_surface, | |
2750 IDirect3DSurface9* dest_surface, | |
2751 int picture_buffer_id, | |
2752 int input_buffer_id) { | |
2753 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
2754 | |
2755 // The DXVA decoder has its own device which it uses for decoding. ANGLE | |
2756 // has its own device which we don't have access to. | |
2757 // The above code attempts to copy the decoded picture into a surface | |
2758 // which is owned by ANGLE. As there are multiple devices involved in | |
2759 // this, the StretchRect call above is not synchronous. | |
2760 // We attempt to flush the batched operations to ensure that the picture is | |
2761 // copied to the surface owned by ANGLE. | |
2762 // We need to do this in a loop and call flush multiple times. | |
2763 // We have seen the GetData call for flushing the command buffer fail to | |
2764 // return success occassionally on multi core machines, leading to an | |
2765 // infinite loop. | |
2766 // Workaround is to have an upper limit of 4 on the number of iterations to | |
2767 // wait for the Flush to finish. | |
2768 | |
2769 HRESULT hr = E_FAIL; | |
2770 if (use_dx11_) { | |
2771 BOOL query_data = 0; | |
2772 hr = d3d11_device_context_->GetData(d3d11_query_.get(), &query_data, | |
2773 sizeof(BOOL), 0); | |
2774 if (FAILED(hr)) { | |
2775 base::debug::Alias(&hr); | |
2776 // TODO(ananta) | |
2777 // Remove this CHECK when the change to use DX11 for H/W decoding | |
2778 // stablizes. | |
2779 CHECK(false); | |
2780 } | |
2781 } else { | |
2782 hr = query_->GetData(NULL, 0, D3DGETDATA_FLUSH); | |
2783 } | |
2784 | |
2785 if ((hr == S_FALSE) && (++iterations < kMaxIterationsForD3DFlush)) { | |
2786 decoder_thread_task_runner_->PostDelayedTask( | |
2787 FROM_HERE, | |
2788 base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder, | |
2789 base::Unretained(this), iterations, src_surface, | |
2790 dest_surface, picture_buffer_id, input_buffer_id), | |
2791 base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs)); | |
2792 return; | |
2793 } | |
2794 | |
2795 main_thread_task_runner_->PostTask( | |
2796 FROM_HERE, | |
2797 base::Bind(&DXVAVideoDecodeAccelerator::CopySurfaceComplete, | |
2798 weak_this_factory_.GetWeakPtr(), | |
2799 src_surface, | |
2800 dest_surface, | |
2801 picture_buffer_id, | |
2802 input_buffer_id)); | |
2803 } | |
2804 | |
2805 bool DXVAVideoDecodeAccelerator::InitializeDX11VideoFormatConverterMediaType( | |
2806 int width, int height) { | |
2807 if (!dx11_video_format_converter_media_type_needs_init_) | |
2808 return true; | |
2809 | |
2810 CHECK(video_format_converter_mft_.get()); | |
2811 | |
2812 HRESULT hr = video_format_converter_mft_->ProcessMessage( | |
2813 MFT_MESSAGE_SET_D3D_MANAGER, | |
2814 reinterpret_cast<ULONG_PTR>( | |
2815 d3d11_device_manager_.get())); | |
2816 | |
2817 if (FAILED(hr)) { | |
2818 base::debug::Alias(&hr); | |
2819 // TODO(ananta) | |
2820 // Remove this CHECK when the change to use DX11 for H/W decoding | |
2821 // stablizes. | |
2822 CHECK(false); | |
2823 } | |
2824 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
2825 "Failed to initialize video format converter", PLATFORM_FAILURE, false); | |
2826 | |
2827 video_format_converter_mft_->ProcessMessage( | |
2828 MFT_MESSAGE_NOTIFY_END_STREAMING, 0); | |
2829 | |
2830 base::win::ScopedComPtr<IMFMediaType> media_type; | |
2831 hr = MFCreateMediaType(media_type.Receive()); | |
2832 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "MFCreateMediaType failed", | |
2833 PLATFORM_FAILURE, false); | |
2834 | |
2835 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); | |
2836 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set major input type", | |
2837 PLATFORM_FAILURE, false); | |
2838 | |
2839 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_NV12); | |
2840 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set input sub type", | |
2841 PLATFORM_FAILURE, false); | |
2842 | |
2843 hr = MFSetAttributeSize(media_type.get(), MF_MT_FRAME_SIZE, width, height); | |
2844 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set media type attributes", | |
2845 PLATFORM_FAILURE, false); | |
2846 | |
2847 hr = video_format_converter_mft_->SetInputType(0, media_type.get(), 0); | |
2848 if (FAILED(hr)) { | |
2849 base::debug::Alias(&hr); | |
2850 // TODO(ananta) | |
2851 // Remove this CHECK when the change to use DX11 for H/W decoding | |
2852 // stablizes. | |
2853 CHECK(false); | |
2854 } | |
2855 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set converter input type", | |
2856 PLATFORM_FAILURE, false); | |
2857 | |
2858 // It appears that we fail to set MFVideoFormat_ARGB32 as the output media | |
2859 // type in certain configurations. Try to fallback to MFVideoFormat_RGB32 | |
2860 // in such cases. If both fail, then bail. | |
2861 bool media_type_set = | |
2862 SetTransformOutputType(video_format_converter_mft_.get(), | |
2863 MFVideoFormat_ARGB32, | |
2864 width, | |
2865 height); | |
2866 if (!media_type_set) { | |
2867 media_type_set = | |
2868 SetTransformOutputType(video_format_converter_mft_.get(), | |
2869 MFVideoFormat_RGB32, | |
2870 width, | |
2871 height); | |
2872 } | |
2873 | |
2874 if (!media_type_set) { | |
2875 // Remove this once this stabilizes in the field. | |
2876 CHECK(false); | |
2877 LOG(ERROR) << "Failed to find a matching RGB output type in the converter"; | |
2878 return false; | |
2879 } | |
2880 | |
2881 dx11_video_format_converter_media_type_needs_init_ = false; | |
2882 return true; | |
2883 } | |
2884 | |
2885 bool DXVAVideoDecodeAccelerator::GetVideoFrameDimensions( | |
2886 IMFSample* sample, | |
2887 int* width, | |
2888 int* height) { | |
2889 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | |
2890 HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive()); | |
2891 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from output sample", false); | |
2892 | |
2893 if (use_dx11_) { | |
2894 base::win::ScopedComPtr<IMFDXGIBuffer> dxgi_buffer; | |
2895 base::win::ScopedComPtr<ID3D11Texture2D> d3d11_texture; | |
2896 hr = dxgi_buffer.QueryFrom(output_buffer.get()); | |
2897 RETURN_ON_HR_FAILURE(hr, "Failed to get DXGIBuffer from output sample", | |
2898 false); | |
2899 hr = dxgi_buffer->GetResource( | |
2900 __uuidof(ID3D11Texture2D), | |
2901 reinterpret_cast<void**>(d3d11_texture.Receive())); | |
2902 RETURN_ON_HR_FAILURE(hr, "Failed to get D3D11Texture from output buffer", | |
2903 false); | |
2904 D3D11_TEXTURE2D_DESC d3d11_texture_desc; | |
2905 d3d11_texture->GetDesc(&d3d11_texture_desc); | |
2906 *width = d3d11_texture_desc.Width; | |
2907 *height = d3d11_texture_desc.Height; | |
2908 } else { | |
2909 base::win::ScopedComPtr<IDirect3DSurface9> surface; | |
2910 hr = MFGetService(output_buffer.get(), MR_BUFFER_SERVICE, | |
2911 IID_PPV_ARGS(surface.Receive())); | |
2912 RETURN_ON_HR_FAILURE(hr, "Failed to get D3D surface from output sample", | |
2913 false); | |
2914 D3DSURFACE_DESC surface_desc; | |
2915 hr = surface->GetDesc(&surface_desc); | |
2916 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false); | |
2917 *width = surface_desc.Width; | |
2918 *height = surface_desc.Height; | |
2919 } | |
2920 return true; | |
2921 } | |
2922 | |
2923 bool DXVAVideoDecodeAccelerator::SetTransformOutputType( | |
2924 IMFTransform* transform, | |
2925 const GUID& output_type, | |
2926 int width, | |
2927 int height) { | |
2928 HRESULT hr = E_FAIL; | |
2929 base::win::ScopedComPtr<IMFMediaType> media_type; | |
2930 | |
2931 for (uint32_t i = 0; | |
2932 SUCCEEDED(transform->GetOutputAvailableType( | |
2933 0, i, media_type.Receive())); | |
2934 ++i) { | |
2935 GUID out_subtype = {0}; | |
2936 hr = media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype); | |
2937 RETURN_ON_HR_FAILURE(hr, "Failed to get output major type", false); | |
2938 | |
2939 if (out_subtype == output_type) { | |
2940 if (width && height) { | |
2941 hr = MFSetAttributeSize(media_type.get(), MF_MT_FRAME_SIZE, width, | |
2942 height); | |
2943 RETURN_ON_HR_FAILURE(hr, "Failed to set media type attributes", false); | |
2944 } | |
2945 hr = transform->SetOutputType(0, media_type.get(), 0); // No flags | |
2946 RETURN_ON_HR_FAILURE(hr, "Failed to set output type", false); | |
2947 return true; | |
2948 } | |
2949 media_type.Release(); | |
2950 } | |
2951 return false; | |
2952 } | |
2953 | |
2954 HRESULT DXVAVideoDecodeAccelerator::CheckConfigChanged( | |
2955 IMFSample* sample, bool* config_changed) { | |
2956 if (codec_ != media::kCodecH264) | |
2957 return S_FALSE; | |
2958 | |
2959 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
2960 HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive()); | |
2961 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from input sample", hr); | |
2962 | |
2963 MediaBufferScopedPointer scoped_media_buffer(buffer.get()); | |
2964 | |
2965 if (!config_change_detector_->DetectConfig( | |
2966 scoped_media_buffer.get(), | |
2967 scoped_media_buffer.current_length())) { | |
2968 RETURN_ON_HR_FAILURE(E_FAIL, "Failed to detect H.264 stream config", | |
2969 E_FAIL); | |
2970 } | |
2971 *config_changed = config_change_detector_->config_changed(); | |
2972 return S_OK; | |
2973 } | |
2974 | |
2975 void DXVAVideoDecodeAccelerator::ConfigChanged( | |
2976 const Config& config) { | |
2977 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
2978 | |
2979 SetState(kConfigChange); | |
2980 DismissStaleBuffers(true); | |
2981 Invalidate(); | |
2982 Initialize(config_, client_); | |
2983 decoder_thread_task_runner_->PostTask( | |
2984 FROM_HERE, | |
2985 base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers, | |
2986 base::Unretained(this))); | |
2987 } | |
2988 | |
2989 } // namespace content | |
OLD | NEW |