Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(340)

Side by Side Diff: content/common/gpu/media/dxva_video_decode_accelerator_win.cc

Issue 1882373004: Migrate content/common/gpu/media code to media/gpu (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Fix prefix to content references in content_gpu.gypi Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/media/dxva_video_decode_accelerator_win.h"
6
7 #include <memory>
8
9 #if !defined(OS_WIN)
10 #error This file should only be built on Windows.
11 #endif // !defined(OS_WIN)
12
13 #include <codecapi.h>
14 #include <dxgi1_2.h>
15 #include <ks.h>
16 #include <mfapi.h>
17 #include <mferror.h>
18 #include <ntverp.h>
19 #include <stddef.h>
20 #include <string.h>
21 #include <wmcodecdsp.h>
22
23 #include "base/base_paths_win.h"
24 #include "base/bind.h"
25 #include "base/callback.h"
26 #include "base/debug/alias.h"
27 #include "base/file_version_info.h"
28 #include "base/files/file_path.h"
29 #include "base/logging.h"
30 #include "base/macros.h"
31 #include "base/memory/shared_memory.h"
32 #include "base/message_loop/message_loop.h"
33 #include "base/path_service.h"
34 #include "base/trace_event/trace_event.h"
35 #include "base/win/windows_version.h"
36 #include "build/build_config.h"
37 #include "media/base/win/mf_initializer.h"
38 #include "media/video/video_decode_accelerator.h"
39 #include "third_party/angle/include/EGL/egl.h"
40 #include "third_party/angle/include/EGL/eglext.h"
41 #include "ui/gl/gl_bindings.h"
42 #include "ui/gl/gl_context.h"
43 #include "ui/gl/gl_fence.h"
44 #include "ui/gl/gl_surface_egl.h"
45
46 namespace {
47
48 // Path is appended on to the PROGRAM_FILES base path.
49 const wchar_t kVPXDecoderDLLPath[] = L"Intel\\Media SDK\\";
50
51 const wchar_t kVP8DecoderDLLName[] =
52 #if defined(ARCH_CPU_X86)
53 L"mfx_mft_vp8vd_32.dll";
54 #elif defined(ARCH_CPU_X86_64)
55 L"mfx_mft_vp8vd_64.dll";
56 #else
57 #error Unsupported Windows CPU Architecture
58 #endif
59
60 const wchar_t kVP9DecoderDLLName[] =
61 #if defined(ARCH_CPU_X86)
62 L"mfx_mft_vp9vd_32.dll";
63 #elif defined(ARCH_CPU_X86_64)
64 L"mfx_mft_vp9vd_64.dll";
65 #else
66 #error Unsupported Windows CPU Architecture
67 #endif
68
69 const CLSID CLSID_WebmMfVp8Dec = {
70 0x451e3cb7,
71 0x2622,
72 0x4ba5,
73 { 0x8e, 0x1d, 0x44, 0xb3, 0xc4, 0x1d, 0x09, 0x24 }
74 };
75
76 const CLSID CLSID_WebmMfVp9Dec = {
77 0x07ab4bd2,
78 0x1979,
79 0x4fcd,
80 { 0xa6, 0x97, 0xdf, 0x9a, 0xd1, 0x5b, 0x34, 0xfe }
81 };
82
83 const CLSID MEDIASUBTYPE_VP80 = {
84 0x30385056,
85 0x0000,
86 0x0010,
87 { 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 }
88 };
89
90 const CLSID MEDIASUBTYPE_VP90 = {
91 0x30395056,
92 0x0000,
93 0x0010,
94 { 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 }
95 };
96
97 // The CLSID of the video processor media foundation transform which we use for
98 // texture color conversion in DX11.
99 // Defined in mfidl.h in the Windows 10 SDK. ntverp.h provides VER_PRODUCTBUILD
100 // to detect which SDK we are compiling with.
101 #if VER_PRODUCTBUILD < 10011 // VER_PRODUCTBUILD for 10.0.10158.0 SDK.
102 DEFINE_GUID(CLSID_VideoProcessorMFT,
103 0x88753b26, 0x5b24, 0x49bd, 0xb2, 0xe7, 0xc, 0x44, 0x5c, 0x78,
104 0xc9, 0x82);
105 #endif
106
107 // MF_XVP_PLAYBACK_MODE
108 // Data type: UINT32 (treat as BOOL)
109 // If this attribute is TRUE, the video processor will run in playback mode
110 // where it allows callers to allocate output samples and allows last frame
111 // regeneration (repaint).
112 DEFINE_GUID(MF_XVP_PLAYBACK_MODE, 0x3c5d293f, 0xad67, 0x4e29, 0xaf, 0x12,
113 0xcf, 0x3e, 0x23, 0x8a, 0xcc, 0xe9);
114
115 // Defines the GUID for the Intel H264 DXVA device.
116 static const GUID DXVA2_Intel_ModeH264_E = {
117 0x604F8E68, 0x4951, 0x4c54,{ 0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6}
118 };
119
120 // R600, R700, Evergreen and Cayman AMD cards. These support DXVA via UVD3
121 // or earlier, and don't handle resolutions higher than 1920 x 1088 well.
122 static const DWORD g_AMDUVD3GPUList[] = {
123 0x9400, 0x9401, 0x9402, 0x9403, 0x9405, 0x940a, 0x940b, 0x940f, 0x94c0,
124 0x94c1, 0x94c3, 0x94c4, 0x94c5, 0x94c6, 0x94c7, 0x94c8, 0x94c9, 0x94cb,
125 0x94cc, 0x94cd, 0x9580, 0x9581, 0x9583, 0x9586, 0x9587, 0x9588, 0x9589,
126 0x958a, 0x958b, 0x958c, 0x958d, 0x958e, 0x958f, 0x9500, 0x9501, 0x9504,
127 0x9505, 0x9506, 0x9507, 0x9508, 0x9509, 0x950f, 0x9511, 0x9515, 0x9517,
128 0x9519, 0x95c0, 0x95c2, 0x95c4, 0x95c5, 0x95c6, 0x95c7, 0x95c9, 0x95cc,
129 0x95cd, 0x95ce, 0x95cf, 0x9590, 0x9591, 0x9593, 0x9595, 0x9596, 0x9597,
130 0x9598, 0x9599, 0x959b, 0x9610, 0x9611, 0x9612, 0x9613, 0x9614, 0x9615,
131 0x9616, 0x9710, 0x9711, 0x9712, 0x9713, 0x9714, 0x9715, 0x9440, 0x9441,
132 0x9442, 0x9443, 0x9444, 0x9446, 0x944a, 0x944b, 0x944c, 0x944e, 0x9450,
133 0x9452, 0x9456, 0x945a, 0x945b, 0x945e, 0x9460, 0x9462, 0x946a, 0x946b,
134 0x947a, 0x947b, 0x9480, 0x9487, 0x9488, 0x9489, 0x948a, 0x948f, 0x9490,
135 0x9491, 0x9495, 0x9498, 0x949c, 0x949e, 0x949f, 0x9540, 0x9541, 0x9542,
136 0x954e, 0x954f, 0x9552, 0x9553, 0x9555, 0x9557, 0x955f, 0x94a0, 0x94a1,
137 0x94a3, 0x94b1, 0x94b3, 0x94b4, 0x94b5, 0x94b9, 0x68e0, 0x68e1, 0x68e4,
138 0x68e5, 0x68e8, 0x68e9, 0x68f1, 0x68f2, 0x68f8, 0x68f9, 0x68fa, 0x68fe,
139 0x68c0, 0x68c1, 0x68c7, 0x68c8, 0x68c9, 0x68d8, 0x68d9, 0x68da, 0x68de,
140 0x68a0, 0x68a1, 0x68a8, 0x68a9, 0x68b0, 0x68b8, 0x68b9, 0x68ba, 0x68be,
141 0x68bf, 0x6880, 0x6888, 0x6889, 0x688a, 0x688c, 0x688d, 0x6898, 0x6899,
142 0x689b, 0x689e, 0x689c, 0x689d, 0x9802, 0x9803, 0x9804, 0x9805, 0x9806,
143 0x9807, 0x9808, 0x9809, 0x980a, 0x9640, 0x9641, 0x9647, 0x9648, 0x964a,
144 0x964b, 0x964c, 0x964e, 0x964f, 0x9642, 0x9643, 0x9644, 0x9645, 0x9649,
145 0x6720, 0x6721, 0x6722, 0x6723, 0x6724, 0x6725, 0x6726, 0x6727, 0x6728,
146 0x6729, 0x6738, 0x6739, 0x673e, 0x6740, 0x6741, 0x6742, 0x6743, 0x6744,
147 0x6745, 0x6746, 0x6747, 0x6748, 0x6749, 0x674a, 0x6750, 0x6751, 0x6758,
148 0x6759, 0x675b, 0x675d, 0x675f, 0x6840, 0x6841, 0x6842, 0x6843, 0x6849,
149 0x6850, 0x6858, 0x6859, 0x6760, 0x6761, 0x6762, 0x6763, 0x6764, 0x6765,
150 0x6766, 0x6767, 0x6768, 0x6770, 0x6771, 0x6772, 0x6778, 0x6779, 0x677b,
151 0x6700, 0x6701, 0x6702, 0x6703, 0x6704, 0x6705, 0x6706, 0x6707, 0x6708,
152 0x6709, 0x6718, 0x6719, 0x671c, 0x671d, 0x671f, 0x683D, 0x9900, 0x9901,
153 0x9903, 0x9904, 0x9905, 0x9906, 0x9907, 0x9908, 0x9909, 0x990a, 0x990b,
154 0x990c, 0x990d, 0x990e, 0x990f, 0x9910, 0x9913, 0x9917, 0x9918, 0x9919,
155 0x9990, 0x9991, 0x9992, 0x9993, 0x9994, 0x9995, 0x9996, 0x9997, 0x9998,
156 0x9999, 0x999a, 0x999b, 0x999c, 0x999d, 0x99a0, 0x99a2, 0x99a4,
157 };
158
159 // Legacy Intel GPUs (Second generation) which have trouble with resolutions
160 // higher than 1920 x 1088
161 static const DWORD g_IntelLegacyGPUList[] = {
162 0x102, 0x106, 0x116, 0x126,
163 };
164
165 // Provides scoped access to the underlying buffer in an IMFMediaBuffer
166 // instance.
167 class MediaBufferScopedPointer {
168 public:
169 MediaBufferScopedPointer(IMFMediaBuffer* media_buffer)
170 : media_buffer_(media_buffer),
171 buffer_(nullptr),
172 max_length_(0),
173 current_length_(0) {
174 HRESULT hr = media_buffer_->Lock(&buffer_, &max_length_, &current_length_);
175 CHECK(SUCCEEDED(hr));
176 }
177
178 ~MediaBufferScopedPointer() {
179 HRESULT hr = media_buffer_->Unlock();
180 CHECK(SUCCEEDED(hr));
181 }
182
183 uint8_t* get() {
184 return buffer_;
185 }
186
187 DWORD current_length() const {
188 return current_length_;
189 }
190
191 private:
192 base::win::ScopedComPtr<IMFMediaBuffer> media_buffer_;
193 uint8_t* buffer_;
194 DWORD max_length_;
195 DWORD current_length_;
196
197 DISALLOW_COPY_AND_ASSIGN(MediaBufferScopedPointer);
198 };
199
200 } // namespace
201
202 namespace content {
203
204 static const media::VideoCodecProfile kSupportedProfiles[] = {
205 media::H264PROFILE_BASELINE,
206 media::H264PROFILE_MAIN,
207 media::H264PROFILE_HIGH,
208 media::VP8PROFILE_ANY,
209 media::VP9PROFILE_PROFILE0,
210 media::VP9PROFILE_PROFILE1,
211 media::VP9PROFILE_PROFILE2,
212 media::VP9PROFILE_PROFILE3
213 };
214
215 CreateDXGIDeviceManager DXVAVideoDecodeAccelerator::create_dxgi_device_manager_
216 = NULL;
217
218 #define RETURN_ON_FAILURE(result, log, ret) \
219 do { \
220 if (!(result)) { \
221 DLOG(ERROR) << log; \
222 return ret; \
223 } \
224 } while (0)
225
226 #define RETURN_ON_HR_FAILURE(result, log, ret) \
227 RETURN_ON_FAILURE(SUCCEEDED(result), \
228 log << ", HRESULT: 0x" << std::hex << result, \
229 ret);
230
231 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \
232 do { \
233 if (!(result)) { \
234 DVLOG(1) << log; \
235 StopOnError(error_code); \
236 return ret; \
237 } \
238 } while (0)
239
240 #define RETURN_AND_NOTIFY_ON_HR_FAILURE(result, log, error_code, ret) \
241 RETURN_AND_NOTIFY_ON_FAILURE(SUCCEEDED(result), \
242 log << ", HRESULT: 0x" << std::hex << result, \
243 error_code, ret);
244
245 enum {
246 // Maximum number of iterations we allow before aborting the attempt to flush
247 // the batched queries to the driver and allow torn/corrupt frames to be
248 // rendered.
249 kFlushDecoderSurfaceTimeoutMs = 1,
250 // Maximum iterations where we try to flush the d3d device.
251 kMaxIterationsForD3DFlush = 4,
252 // Maximum iterations where we try to flush the ANGLE device before reusing
253 // the texture.
254 kMaxIterationsForANGLEReuseFlush = 16,
255 // We only request 5 picture buffers from the client which are used to hold
256 // the decoded samples. These buffers are then reused when the client tells
257 // us that it is done with the buffer.
258 kNumPictureBuffers = 5,
259 // The keyed mutex should always be released before the other thread
260 // attempts to acquire it, so AcquireSync should always return immediately.
261 kAcquireSyncWaitMs = 0,
262 };
263
264 static IMFSample* CreateEmptySample() {
265 base::win::ScopedComPtr<IMFSample> sample;
266 HRESULT hr = MFCreateSample(sample.Receive());
267 RETURN_ON_HR_FAILURE(hr, "MFCreateSample failed", NULL);
268 return sample.Detach();
269 }
270
271 // Creates a Media Foundation sample with one buffer of length |buffer_length|
272 // on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0.
273 static IMFSample* CreateEmptySampleWithBuffer(uint32_t buffer_length,
274 int align) {
275 CHECK_GT(buffer_length, 0U);
276
277 base::win::ScopedComPtr<IMFSample> sample;
278 sample.Attach(CreateEmptySample());
279
280 base::win::ScopedComPtr<IMFMediaBuffer> buffer;
281 HRESULT hr = E_FAIL;
282 if (align == 0) {
283 // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer
284 // with the align argument being 0.
285 hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive());
286 } else {
287 hr = MFCreateAlignedMemoryBuffer(buffer_length,
288 align - 1,
289 buffer.Receive());
290 }
291 RETURN_ON_HR_FAILURE(hr, "Failed to create memory buffer for sample", NULL);
292
293 hr = sample->AddBuffer(buffer.get());
294 RETURN_ON_HR_FAILURE(hr, "Failed to add buffer to sample", NULL);
295
296 buffer->SetCurrentLength(0);
297 return sample.Detach();
298 }
299
300 // Creates a Media Foundation sample with one buffer containing a copy of the
301 // given Annex B stream data.
302 // If duration and sample time are not known, provide 0.
303 // |min_size| specifies the minimum size of the buffer (might be required by
304 // the decoder for input). If no alignment is required, provide 0.
305 static IMFSample* CreateInputSample(const uint8_t* stream,
306 uint32_t size,
307 uint32_t min_size,
308 int alignment) {
309 CHECK(stream);
310 CHECK_GT(size, 0U);
311 base::win::ScopedComPtr<IMFSample> sample;
312 sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size),
313 alignment));
314 RETURN_ON_FAILURE(sample.get(), "Failed to create empty sample", NULL);
315
316 base::win::ScopedComPtr<IMFMediaBuffer> buffer;
317 HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive());
318 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample", NULL);
319
320 DWORD max_length = 0;
321 DWORD current_length = 0;
322 uint8_t* destination = NULL;
323 hr = buffer->Lock(&destination, &max_length, &current_length);
324 RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL);
325
326 CHECK_EQ(current_length, 0u);
327 CHECK_GE(max_length, size);
328 memcpy(destination, stream, size);
329
330 hr = buffer->SetCurrentLength(size);
331 RETURN_ON_HR_FAILURE(hr, "Failed to set buffer length", NULL);
332
333 hr = buffer->Unlock();
334 RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", NULL);
335
336 return sample.Detach();
337 }
338
339 // Helper function to create a COM object instance from a DLL. The alternative
340 // is to use the CoCreateInstance API which requires the COM apartment to be
341 // initialized which is not the case on the GPU main thread. We want to avoid
342 // initializing COM as it may have sideeffects.
343 HRESULT CreateCOMObjectFromDll(HMODULE dll, const CLSID& clsid, const IID& iid,
344 void** object) {
345 if (!dll || !object)
346 return E_INVALIDARG;
347
348 using GetClassObject = HRESULT (WINAPI*)(
349 const CLSID& clsid, const IID& iid, void** object);
350
351 GetClassObject get_class_object = reinterpret_cast<GetClassObject>(
352 GetProcAddress(dll, "DllGetClassObject"));
353 RETURN_ON_FAILURE(
354 get_class_object, "Failed to get DllGetClassObject pointer", E_FAIL);
355
356 base::win::ScopedComPtr<IClassFactory> factory;
357 HRESULT hr = get_class_object(
358 clsid,
359 __uuidof(IClassFactory),
360 factory.ReceiveVoid());
361 RETURN_ON_HR_FAILURE(hr, "DllGetClassObject failed", hr);
362
363 hr = factory->CreateInstance(NULL, iid, object);
364 return hr;
365 }
366
367 // Helper function to query the ANGLE device object. The template argument T
368 // identifies the device interface being queried. IDirect3DDevice9Ex for d3d9
369 // and ID3D11Device for dx11.
370 template<class T>
371 base::win::ScopedComPtr<T> QueryDeviceObjectFromANGLE(int object_type) {
372 base::win::ScopedComPtr<T> device_object;
373
374 EGLDisplay egl_display = nullptr;
375 intptr_t egl_device = 0;
376 intptr_t device = 0;
377
378 {
379 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. GetHardwareDisplay");
380 egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
381 }
382
383 RETURN_ON_FAILURE(
384 gfx::GLSurfaceEGL::HasEGLExtension("EGL_EXT_device_query"),
385 "EGL_EXT_device_query missing",
386 device_object);
387
388 PFNEGLQUERYDISPLAYATTRIBEXTPROC QueryDisplayAttribEXT = nullptr;
389
390 {
391 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. eglGetProcAddress");
392
393 QueryDisplayAttribEXT =
394 reinterpret_cast<PFNEGLQUERYDISPLAYATTRIBEXTPROC>(eglGetProcAddress(
395 "eglQueryDisplayAttribEXT"));
396
397 RETURN_ON_FAILURE(
398 QueryDisplayAttribEXT,
399 "Failed to get the eglQueryDisplayAttribEXT function from ANGLE",
400 device_object);
401 }
402
403 PFNEGLQUERYDEVICEATTRIBEXTPROC QueryDeviceAttribEXT = nullptr;
404
405 {
406 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. eglGetProcAddress");
407
408 QueryDeviceAttribEXT =
409 reinterpret_cast<PFNEGLQUERYDEVICEATTRIBEXTPROC>(eglGetProcAddress(
410 "eglQueryDeviceAttribEXT"));
411
412 RETURN_ON_FAILURE(
413 QueryDeviceAttribEXT,
414 "Failed to get the eglQueryDeviceAttribEXT function from ANGLE",
415 device_object);
416 }
417
418 {
419 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. QueryDisplayAttribEXT");
420
421 RETURN_ON_FAILURE(
422 QueryDisplayAttribEXT(egl_display, EGL_DEVICE_EXT, &egl_device),
423 "The eglQueryDisplayAttribEXT function failed to get the EGL device",
424 device_object);
425 }
426
427 RETURN_ON_FAILURE(
428 egl_device,
429 "Failed to get the EGL device",
430 device_object);
431
432 {
433 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. QueryDisplayAttribEXT");
434
435 RETURN_ON_FAILURE(
436 QueryDeviceAttribEXT(
437 reinterpret_cast<EGLDeviceEXT>(egl_device), object_type, &device),
438 "The eglQueryDeviceAttribEXT function failed to get the device",
439 device_object);
440
441 RETURN_ON_FAILURE(device, "Failed to get the ANGLE device", device_object);
442 }
443
444 device_object = reinterpret_cast<T*>(device);
445 return device_object;
446 }
447
448 H264ConfigChangeDetector::H264ConfigChangeDetector()
449 : last_sps_id_(0),
450 last_pps_id_(0),
451 config_changed_(false),
452 pending_config_changed_(false) {
453 }
454
455 H264ConfigChangeDetector::~H264ConfigChangeDetector() {
456 }
457
458 bool H264ConfigChangeDetector::DetectConfig(const uint8_t* stream,
459 unsigned int size) {
460 std::vector<uint8_t> sps;
461 std::vector<uint8_t> pps;
462 media::H264NALU nalu;
463 bool idr_seen = false;
464
465 if (!parser_.get())
466 parser_.reset(new media::H264Parser);
467
468 parser_->SetStream(stream, size);
469 config_changed_ = false;
470
471 while (true) {
472 media::H264Parser::Result result = parser_->AdvanceToNextNALU(&nalu);
473
474 if (result == media::H264Parser::kEOStream)
475 break;
476
477 if (result == media::H264Parser::kUnsupportedStream) {
478 DLOG(ERROR) << "Unsupported H.264 stream";
479 return false;
480 }
481
482 if (result != media::H264Parser::kOk) {
483 DLOG(ERROR) << "Failed to parse H.264 stream";
484 return false;
485 }
486
487 switch (nalu.nal_unit_type) {
488 case media::H264NALU::kSPS:
489 result = parser_->ParseSPS(&last_sps_id_);
490 if (result == media::H264Parser::kUnsupportedStream) {
491 DLOG(ERROR) << "Unsupported SPS";
492 return false;
493 }
494
495 if (result != media::H264Parser::kOk) {
496 DLOG(ERROR) << "Could not parse SPS";
497 return false;
498 }
499
500 sps.assign(nalu.data, nalu.data + nalu.size);
501 break;
502
503 case media::H264NALU::kPPS:
504 result = parser_->ParsePPS(&last_pps_id_);
505 if (result == media::H264Parser::kUnsupportedStream) {
506 DLOG(ERROR) << "Unsupported PPS";
507 return false;
508 }
509 if (result != media::H264Parser::kOk) {
510 DLOG(ERROR) << "Could not parse PPS";
511 return false;
512 }
513 pps.assign(nalu.data, nalu.data + nalu.size);
514 break;
515
516 case media::H264NALU::kIDRSlice:
517 idr_seen = true;
518 // If we previously detected a configuration change, and see an IDR
519 // slice next time around, we need to flag a configuration change.
520 if (pending_config_changed_) {
521 config_changed_ = true;
522 pending_config_changed_ = false;
523 }
524 break;
525
526 default:
527 break;
528 }
529 }
530
531 if (!sps.empty() && sps != last_sps_) {
532 if (!last_sps_.empty()) {
533 // Flag configuration changes after we see an IDR slice.
534 if (idr_seen) {
535 config_changed_ = true;
536 } else {
537 pending_config_changed_ = true;
538 }
539 }
540 last_sps_.swap(sps);
541 }
542
543 if (!pps.empty() && pps != last_pps_) {
544 if (!last_pps_.empty()) {
545 // Flag configuration changes after we see an IDR slice.
546 if (idr_seen) {
547 config_changed_ = true;
548 } else {
549 pending_config_changed_ = true;
550 }
551 }
552 last_pps_.swap(pps);
553 }
554 return true;
555 }
556
557 // Maintains information about a DXVA picture buffer, i.e. whether it is
558 // available for rendering, the texture information, etc.
559 struct DXVAVideoDecodeAccelerator::DXVAPictureBuffer {
560 public:
561 static linked_ptr<DXVAPictureBuffer> Create(
562 const DXVAVideoDecodeAccelerator& decoder,
563 const media::PictureBuffer& buffer,
564 EGLConfig egl_config);
565 ~DXVAPictureBuffer();
566
567 bool InitializeTexture(const DXVAVideoDecodeAccelerator& decoder,
568 bool use_rgb);
569
570 bool ReusePictureBuffer();
571 void ResetReuseFence();
572 // Copies the output sample data to the picture buffer provided by the
573 // client.
574 // The dest_surface parameter contains the decoded bits.
575 bool CopyOutputSampleDataToPictureBuffer(
576 DXVAVideoDecodeAccelerator* decoder,
577 IDirect3DSurface9* dest_surface,
578 ID3D11Texture2D* dx11_texture,
579 int input_buffer_id);
580
581 bool available() const {
582 return available_;
583 }
584
585 void set_available(bool available) {
586 available_ = available;
587 }
588
589 int id() const {
590 return picture_buffer_.id();
591 }
592
593 gfx::Size size() const {
594 return picture_buffer_.size();
595 }
596
597 bool waiting_to_reuse() const { return waiting_to_reuse_; }
598
599 gfx::GLFence* reuse_fence() { return reuse_fence_.get(); }
600
601 // Called when the source surface |src_surface| is copied to the destination
602 // |dest_surface|
603 bool CopySurfaceComplete(IDirect3DSurface9* src_surface,
604 IDirect3DSurface9* dest_surface);
605
606 private:
607 explicit DXVAPictureBuffer(const media::PictureBuffer& buffer);
608
609 bool available_;
610
611 // This is true if the decoder is currently waiting on the fence before
612 // reusing the buffer.
613 bool waiting_to_reuse_;
614 media::PictureBuffer picture_buffer_;
615 EGLSurface decoding_surface_;
616 std::unique_ptr<gfx::GLFence> reuse_fence_;
617
618 HANDLE texture_share_handle_;
619 base::win::ScopedComPtr<IDirect3DTexture9> decoding_texture_;
620 base::win::ScopedComPtr<ID3D11Texture2D> dx11_decoding_texture_;
621
622 base::win::ScopedComPtr<IDXGIKeyedMutex> egl_keyed_mutex_;
623 base::win::ScopedComPtr<IDXGIKeyedMutex> dx11_keyed_mutex_;
624
625 // This is the last value that was used to release the keyed mutex.
626 uint64_t keyed_mutex_value_;
627
628 // The following |IDirect3DSurface9| interface pointers are used to hold
629 // references on the surfaces during the course of a StretchRect operation
630 // to copy the source surface to the target. The references are released
631 // when the StretchRect operation i.e. the copy completes.
632 base::win::ScopedComPtr<IDirect3DSurface9> decoder_surface_;
633 base::win::ScopedComPtr<IDirect3DSurface9> target_surface_;
634
635 // This ID3D11Texture2D interface pointer is used to hold a reference to the
636 // decoder texture during the course of a copy operation. This reference is
637 // released when the copy completes.
638 base::win::ScopedComPtr<ID3D11Texture2D> decoder_dx11_texture_;
639
640 // Set to true if RGB is supported by the texture.
641 // Defaults to true.
642 bool use_rgb_;
643
644 DISALLOW_COPY_AND_ASSIGN(DXVAPictureBuffer);
645 };
646
647 // static
648 linked_ptr<DXVAVideoDecodeAccelerator::DXVAPictureBuffer>
649 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::Create(
650 const DXVAVideoDecodeAccelerator& decoder,
651 const media::PictureBuffer& buffer,
652 EGLConfig egl_config) {
653 linked_ptr<DXVAPictureBuffer> picture_buffer(new DXVAPictureBuffer(buffer));
654
655 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
656
657 EGLint use_rgb = 1;
658 eglGetConfigAttrib(egl_display, egl_config, EGL_BIND_TO_TEXTURE_RGB,
659 &use_rgb);
660
661 if (!picture_buffer->InitializeTexture(decoder, !!use_rgb))
662 return linked_ptr<DXVAPictureBuffer>(nullptr);
663
664 EGLint attrib_list[] = {
665 EGL_WIDTH, buffer.size().width(),
666 EGL_HEIGHT, buffer.size().height(),
667 EGL_TEXTURE_FORMAT, use_rgb ? EGL_TEXTURE_RGB : EGL_TEXTURE_RGBA,
668 EGL_TEXTURE_TARGET, EGL_TEXTURE_2D,
669 EGL_NONE
670 };
671
672 picture_buffer->decoding_surface_ = eglCreatePbufferFromClientBuffer(
673 egl_display, EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE,
674 picture_buffer->texture_share_handle_, egl_config, attrib_list);
675 RETURN_ON_FAILURE(picture_buffer->decoding_surface_,
676 "Failed to create surface",
677 linked_ptr<DXVAPictureBuffer>(NULL));
678 if (decoder.d3d11_device_ && decoder.use_keyed_mutex_) {
679 void* keyed_mutex = nullptr;
680 EGLBoolean ret = eglQuerySurfacePointerANGLE(
681 egl_display, picture_buffer->decoding_surface_,
682 EGL_DXGI_KEYED_MUTEX_ANGLE, &keyed_mutex);
683 RETURN_ON_FAILURE(keyed_mutex && ret == EGL_TRUE,
684 "Failed to query ANGLE keyed mutex",
685 linked_ptr<DXVAPictureBuffer>(nullptr));
686 picture_buffer->egl_keyed_mutex_ = base::win::ScopedComPtr<IDXGIKeyedMutex>(
687 static_cast<IDXGIKeyedMutex*>(keyed_mutex));
688 }
689 picture_buffer->use_rgb_ = !!use_rgb;
690 return picture_buffer;
691 }
692
693 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::InitializeTexture(
694 const DXVAVideoDecodeAccelerator& decoder,
695 bool use_rgb) {
696 DCHECK(!texture_share_handle_);
697 if (decoder.d3d11_device_) {
698 D3D11_TEXTURE2D_DESC desc;
699 desc.Width = picture_buffer_.size().width();
700 desc.Height = picture_buffer_.size().height();
701 desc.MipLevels = 1;
702 desc.ArraySize = 1;
703 desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
704 desc.SampleDesc.Count = 1;
705 desc.SampleDesc.Quality = 0;
706 desc.Usage = D3D11_USAGE_DEFAULT;
707 desc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET;
708 desc.CPUAccessFlags = 0;
709 desc.MiscFlags = decoder.use_keyed_mutex_
710 ? D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX
711 : D3D11_RESOURCE_MISC_SHARED;
712
713 HRESULT hr = decoder.d3d11_device_->CreateTexture2D(
714 &desc, nullptr, dx11_decoding_texture_.Receive());
715 RETURN_ON_HR_FAILURE(hr, "Failed to create texture", false);
716 if (decoder.use_keyed_mutex_) {
717 hr = dx11_keyed_mutex_.QueryFrom(dx11_decoding_texture_.get());
718 RETURN_ON_HR_FAILURE(hr, "Failed to get keyed mutex", false);
719 }
720
721 base::win::ScopedComPtr<IDXGIResource> resource;
722 hr = resource.QueryFrom(dx11_decoding_texture_.get());
723 DCHECK(SUCCEEDED(hr));
724 hr = resource->GetSharedHandle(&texture_share_handle_);
725 RETURN_ON_FAILURE(SUCCEEDED(hr) && texture_share_handle_,
726 "Failed to query shared handle", false);
727
728 } else {
729 HRESULT hr = E_FAIL;
730 hr = decoder.d3d9_device_ex_->CreateTexture(
731 picture_buffer_.size().width(), picture_buffer_.size().height(), 1,
732 D3DUSAGE_RENDERTARGET, use_rgb ? D3DFMT_X8R8G8B8 : D3DFMT_A8R8G8B8,
733 D3DPOOL_DEFAULT, decoding_texture_.Receive(), &texture_share_handle_);
734 RETURN_ON_HR_FAILURE(hr, "Failed to create texture", false);
735 RETURN_ON_FAILURE(texture_share_handle_, "Failed to query shared handle",
736 false);
737 }
738 return true;
739 }
740
741 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer(
742 const media::PictureBuffer& buffer)
743 : available_(true),
744 waiting_to_reuse_(false),
745 picture_buffer_(buffer),
746 decoding_surface_(NULL),
747 texture_share_handle_(nullptr),
748 keyed_mutex_value_(0),
749 use_rgb_(true) {}
750
751 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::~DXVAPictureBuffer() {
752 if (decoding_surface_) {
753 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
754
755 eglReleaseTexImage(
756 egl_display,
757 decoding_surface_,
758 EGL_BACK_BUFFER);
759
760 eglDestroySurface(
761 egl_display,
762 decoding_surface_);
763 decoding_surface_ = NULL;
764 }
765 }
766
767 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ReusePictureBuffer() {
768 DCHECK(decoding_surface_);
769 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
770 eglReleaseTexImage(
771 egl_display,
772 decoding_surface_,
773 EGL_BACK_BUFFER);
774 decoder_surface_.Release();
775 target_surface_.Release();
776 decoder_dx11_texture_.Release();
777 waiting_to_reuse_ = false;
778 set_available(true);
779 if (egl_keyed_mutex_) {
780 HRESULT hr = egl_keyed_mutex_->ReleaseSync(++keyed_mutex_value_);
781 RETURN_ON_FAILURE(hr == S_OK, "Could not release sync mutex", false);
782 }
783 return true;
784 }
785
786 void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ResetReuseFence() {
787 if (!reuse_fence_ || !reuse_fence_->ResetSupported())
788 reuse_fence_.reset(gfx::GLFence::Create());
789 else
790 reuse_fence_->ResetState();
791 waiting_to_reuse_ = true;
792 }
793
794 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::
795 CopyOutputSampleDataToPictureBuffer(
796 DXVAVideoDecodeAccelerator* decoder,
797 IDirect3DSurface9* dest_surface,
798 ID3D11Texture2D* dx11_texture,
799 int input_buffer_id) {
800 DCHECK(dest_surface || dx11_texture);
801 if (dx11_texture) {
802 // Grab a reference on the decoder texture. This reference will be released
803 // when we receive a notification that the copy was completed or when the
804 // DXVAPictureBuffer instance is destroyed.
805 decoder_dx11_texture_ = dx11_texture;
806 decoder->CopyTexture(dx11_texture, dx11_decoding_texture_.get(),
807 dx11_keyed_mutex_, keyed_mutex_value_, NULL, id(),
808 input_buffer_id);
809 return true;
810 }
811 D3DSURFACE_DESC surface_desc;
812 HRESULT hr = dest_surface->GetDesc(&surface_desc);
813 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false);
814
815 D3DSURFACE_DESC texture_desc;
816 decoding_texture_->GetLevelDesc(0, &texture_desc);
817
818 if (texture_desc.Width != surface_desc.Width ||
819 texture_desc.Height != surface_desc.Height) {
820 NOTREACHED() << "Decode surface of different dimension than texture";
821 return false;
822 }
823
824 hr = decoder->d3d9_->CheckDeviceFormatConversion(
825 D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, surface_desc.Format,
826 use_rgb_ ? D3DFMT_X8R8G8B8 : D3DFMT_A8R8G8B8);
827 RETURN_ON_HR_FAILURE(hr, "Device does not support format converision", false);
828
829 // The same picture buffer can be reused for a different frame. Release the
830 // target surface and the decoder references here.
831 target_surface_.Release();
832 decoder_surface_.Release();
833
834 // Grab a reference on the decoder surface and the target surface. These
835 // references will be released when we receive a notification that the
836 // copy was completed or when the DXVAPictureBuffer instance is destroyed.
837 // We hold references here as it is easier to manage their lifetimes.
838 hr = decoding_texture_->GetSurfaceLevel(0, target_surface_.Receive());
839 RETURN_ON_HR_FAILURE(hr, "Failed to get surface from texture", false);
840
841 decoder_surface_ = dest_surface;
842
843 decoder->CopySurface(decoder_surface_.get(), target_surface_.get(), id(),
844 input_buffer_id);
845 return true;
846 }
847
848 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::CopySurfaceComplete(
849 IDirect3DSurface9* src_surface,
850 IDirect3DSurface9* dest_surface) {
851 DCHECK(!available());
852
853 GLint current_texture = 0;
854 glGetIntegerv(GL_TEXTURE_BINDING_2D, &current_texture);
855
856 glBindTexture(GL_TEXTURE_2D, picture_buffer_.texture_ids()[0]);
857
858 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
859
860 if (src_surface && dest_surface) {
861 DCHECK_EQ(src_surface, decoder_surface_.get());
862 DCHECK_EQ(dest_surface, target_surface_.get());
863 decoder_surface_.Release();
864 target_surface_.Release();
865 } else {
866 DCHECK(decoder_dx11_texture_.get());
867 decoder_dx11_texture_.Release();
868 }
869 if (egl_keyed_mutex_) {
870 keyed_mutex_value_++;
871 HRESULT result =
872 egl_keyed_mutex_->AcquireSync(keyed_mutex_value_, kAcquireSyncWaitMs);
873 RETURN_ON_FAILURE(result == S_OK, "Could not acquire sync mutex", false);
874 }
875
876 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
877 eglBindTexImage(
878 egl_display,
879 decoding_surface_,
880 EGL_BACK_BUFFER);
881
882 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
883 glBindTexture(GL_TEXTURE_2D, current_texture);
884 return true;
885 }
886
887 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo(
888 int32_t buffer_id,
889 IMFSample* sample)
890 : input_buffer_id(buffer_id), picture_buffer_id(-1) {
891 output_sample.Attach(sample);
892 }
893
894 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo(
895 const PendingSampleInfo& other) = default;
896
897 DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {}
898
899 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator(
900 const GetGLContextCallback& get_gl_context_cb,
901 const MakeGLContextCurrentCallback& make_context_current_cb,
902 bool enable_accelerated_vpx_decode)
903 : client_(NULL),
904 dev_manager_reset_token_(0),
905 dx11_dev_manager_reset_token_(0),
906 egl_config_(NULL),
907 state_(kUninitialized),
908 pictures_requested_(false),
909 inputs_before_decode_(0),
910 sent_drain_message_(false),
911 get_gl_context_cb_(get_gl_context_cb),
912 make_context_current_cb_(make_context_current_cb),
913 codec_(media::kUnknownVideoCodec),
914 decoder_thread_("DXVAVideoDecoderThread"),
915 pending_flush_(false),
916 use_dx11_(false),
917 use_keyed_mutex_(false),
918 dx11_video_format_converter_media_type_needs_init_(true),
919 using_angle_device_(false),
920 enable_accelerated_vpx_decode_(enable_accelerated_vpx_decode),
921 weak_this_factory_(this) {
922 weak_ptr_ = weak_this_factory_.GetWeakPtr();
923 memset(&input_stream_info_, 0, sizeof(input_stream_info_));
924 memset(&output_stream_info_, 0, sizeof(output_stream_info_));
925 }
926
927 DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() {
928 client_ = NULL;
929 }
930
931 bool DXVAVideoDecodeAccelerator::Initialize(const Config& config,
932 Client* client) {
933 if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) {
934 NOTREACHED() << "GL callbacks are required for this VDA";
935 return false;
936 }
937
938 if (config.is_encrypted) {
939 NOTREACHED() << "Encrypted streams are not supported for this VDA";
940 return false;
941 }
942
943 client_ = client;
944
945 main_thread_task_runner_ = base::MessageLoop::current()->task_runner();
946
947 bool profile_supported = false;
948 for (const auto& supported_profile : kSupportedProfiles) {
949 if (config.profile == supported_profile) {
950 profile_supported = true;
951 break;
952 }
953 }
954 if (!profile_supported) {
955 RETURN_AND_NOTIFY_ON_FAILURE(false,
956 "Unsupported h.264, vp8, or vp9 profile", PLATFORM_FAILURE, false);
957 }
958
959 // Not all versions of Windows 7 and later include Media Foundation DLLs.
960 // Instead of crashing while delay loading the DLL when calling MFStartup()
961 // below, probe whether we can successfully load the DLL now.
962 // See http://crbug.com/339678 for details.
963 HMODULE dxgi_manager_dll = ::GetModuleHandle(L"MFPlat.dll");
964 RETURN_ON_FAILURE(dxgi_manager_dll, "MFPlat.dll is required for decoding",
965 false);
966
967 // On Windows 8+ mfplat.dll provides the MFCreateDXGIDeviceManager API.
968 // On Windows 7 mshtmlmedia.dll provides it.
969
970 // TODO(ananta)
971 // The code below works, as in we can create the DX11 device manager for
972 // Windows 7. However the IMFTransform we use for texture conversion and
973 // copy does not exist on Windows 7. Look into an alternate approach
974 // and enable the code below.
975 #if defined(ENABLE_DX11_FOR_WIN7)
976 if (base::win::GetVersion() == base::win::VERSION_WIN7) {
977 dxgi_manager_dll = ::GetModuleHandle(L"mshtmlmedia.dll");
978 RETURN_ON_FAILURE(dxgi_manager_dll,
979 "mshtmlmedia.dll is required for decoding", false);
980 }
981 #endif
982 // If we don't find the MFCreateDXGIDeviceManager API we fallback to D3D9
983 // decoding.
984 if (dxgi_manager_dll && !create_dxgi_device_manager_) {
985 create_dxgi_device_manager_ = reinterpret_cast<CreateDXGIDeviceManager>(
986 ::GetProcAddress(dxgi_manager_dll, "MFCreateDXGIDeviceManager"));
987 }
988
989 RETURN_AND_NOTIFY_ON_FAILURE(
990 gfx::g_driver_egl.ext.b_EGL_ANGLE_surface_d3d_texture_2d_share_handle,
991 "EGL_ANGLE_surface_d3d_texture_2d_share_handle unavailable",
992 PLATFORM_FAILURE,
993 false);
994
995 RETURN_AND_NOTIFY_ON_FAILURE(gfx::GLFence::IsSupported(),
996 "GL fences are unsupported", PLATFORM_FAILURE,
997 false);
998
999 State state = GetState();
1000 RETURN_AND_NOTIFY_ON_FAILURE((state == kUninitialized),
1001 "Initialize: invalid state: " << state, ILLEGAL_STATE, false);
1002
1003 media::InitializeMediaFoundation();
1004
1005 RETURN_AND_NOTIFY_ON_FAILURE(InitDecoder(config.profile),
1006 "Failed to initialize decoder", PLATFORM_FAILURE, false);
1007
1008 RETURN_AND_NOTIFY_ON_FAILURE(GetStreamsInfoAndBufferReqs(),
1009 "Failed to get input/output stream info.", PLATFORM_FAILURE, false);
1010
1011 RETURN_AND_NOTIFY_ON_FAILURE(
1012 SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0),
1013 "Send MFT_MESSAGE_NOTIFY_BEGIN_STREAMING notification failed",
1014 PLATFORM_FAILURE, false);
1015
1016 RETURN_AND_NOTIFY_ON_FAILURE(
1017 SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0),
1018 "Send MFT_MESSAGE_NOTIFY_START_OF_STREAM notification failed",
1019 PLATFORM_FAILURE, false);
1020
1021 config_ = config;
1022
1023 config_change_detector_.reset(new H264ConfigChangeDetector);
1024
1025 SetState(kNormal);
1026
1027 StartDecoderThread();
1028 return true;
1029 }
1030
1031 bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() {
1032 TRACE_EVENT0("gpu", "DXVAVideoDecodeAccelerator_CreateD3DDevManager");
1033
1034 HRESULT hr = E_FAIL;
1035
1036 hr = Direct3DCreate9Ex(D3D_SDK_VERSION, d3d9_.Receive());
1037 RETURN_ON_HR_FAILURE(hr, "Direct3DCreate9Ex failed", false);
1038
1039 base::win::ScopedComPtr<IDirect3DDevice9> angle_device =
1040 QueryDeviceObjectFromANGLE<IDirect3DDevice9>(EGL_D3D9_DEVICE_ANGLE);
1041 if (angle_device.get())
1042 using_angle_device_ = true;
1043
1044 if (using_angle_device_) {
1045 hr = d3d9_device_ex_.QueryFrom(angle_device.get());
1046 RETURN_ON_HR_FAILURE(hr,
1047 "QueryInterface for IDirect3DDevice9Ex from angle device failed",
1048 false);
1049 } else {
1050 D3DPRESENT_PARAMETERS present_params = {0};
1051 present_params.BackBufferWidth = 1;
1052 present_params.BackBufferHeight = 1;
1053 present_params.BackBufferFormat = D3DFMT_UNKNOWN;
1054 present_params.BackBufferCount = 1;
1055 present_params.SwapEffect = D3DSWAPEFFECT_DISCARD;
1056 present_params.hDeviceWindow = NULL;
1057 present_params.Windowed = TRUE;
1058 present_params.Flags = D3DPRESENTFLAG_VIDEO;
1059 present_params.FullScreen_RefreshRateInHz = 0;
1060 present_params.PresentationInterval = 0;
1061
1062 hr = d3d9_->CreateDeviceEx(D3DADAPTER_DEFAULT,
1063 D3DDEVTYPE_HAL,
1064 NULL,
1065 D3DCREATE_FPU_PRESERVE |
1066 D3DCREATE_HARDWARE_VERTEXPROCESSING |
1067 D3DCREATE_DISABLE_PSGP_THREADING |
1068 D3DCREATE_MULTITHREADED,
1069 &present_params,
1070 NULL,
1071 d3d9_device_ex_.Receive());
1072 RETURN_ON_HR_FAILURE(hr, "Failed to create D3D device", false);
1073 }
1074
1075 hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token_,
1076 device_manager_.Receive());
1077 RETURN_ON_HR_FAILURE(hr, "DXVA2CreateDirect3DDeviceManager9 failed", false);
1078
1079 hr = device_manager_->ResetDevice(d3d9_device_ex_.get(),
1080 dev_manager_reset_token_);
1081 RETURN_ON_HR_FAILURE(hr, "Failed to reset device", false);
1082
1083 hr = d3d9_device_ex_->CreateQuery(D3DQUERYTYPE_EVENT, query_.Receive());
1084 RETURN_ON_HR_FAILURE(hr, "Failed to create D3D device query", false);
1085 // Ensure query_ API works (to avoid an infinite loop later in
1086 // CopyOutputSampleDataToPictureBuffer).
1087 hr = query_->Issue(D3DISSUE_END);
1088 RETURN_ON_HR_FAILURE(hr, "Failed to issue END test query", false);
1089 return true;
1090 }
1091
1092 bool DXVAVideoDecodeAccelerator::CreateDX11DevManager() {
1093 HRESULT hr = create_dxgi_device_manager_(&dx11_dev_manager_reset_token_,
1094 d3d11_device_manager_.Receive());
1095 RETURN_ON_HR_FAILURE(hr, "MFCreateDXGIDeviceManager failed", false);
1096
1097 // This array defines the set of DirectX hardware feature levels we support.
1098 // The ordering MUST be preserved. All applications are assumed to support
1099 // 9.1 unless otherwise stated by the application.
1100 D3D_FEATURE_LEVEL feature_levels[] = {
1101 D3D_FEATURE_LEVEL_11_1,
1102 D3D_FEATURE_LEVEL_11_0,
1103 D3D_FEATURE_LEVEL_10_1,
1104 D3D_FEATURE_LEVEL_10_0,
1105 D3D_FEATURE_LEVEL_9_3,
1106 D3D_FEATURE_LEVEL_9_2,
1107 D3D_FEATURE_LEVEL_9_1
1108 };
1109
1110 UINT flags = D3D11_CREATE_DEVICE_VIDEO_SUPPORT;
1111
1112 #if defined _DEBUG
1113 flags |= D3D11_CREATE_DEVICE_DEBUG;
1114 #endif
1115
1116 D3D_FEATURE_LEVEL feature_level_out = D3D_FEATURE_LEVEL_11_0;
1117 hr = D3D11CreateDevice(NULL,
1118 D3D_DRIVER_TYPE_HARDWARE,
1119 NULL,
1120 flags,
1121 feature_levels,
1122 arraysize(feature_levels),
1123 D3D11_SDK_VERSION,
1124 d3d11_device_.Receive(),
1125 &feature_level_out,
1126 d3d11_device_context_.Receive());
1127 RETURN_ON_HR_FAILURE(hr, "Failed to create DX11 device", false);
1128
1129 // Enable multithreaded mode on the device. This ensures that accesses to
1130 // context are synchronized across threads. We have multiple threads
1131 // accessing the context, the media foundation decoder threads and the
1132 // decoder thread via the video format conversion transform.
1133 hr = multi_threaded_.QueryFrom(d3d11_device_.get());
1134 RETURN_ON_HR_FAILURE(hr, "Failed to query ID3D10Multithread", false);
1135 multi_threaded_->SetMultithreadProtected(TRUE);
1136
1137 hr = d3d11_device_manager_->ResetDevice(d3d11_device_.get(),
1138 dx11_dev_manager_reset_token_);
1139 RETURN_ON_HR_FAILURE(hr, "Failed to reset device", false);
1140
1141 D3D11_QUERY_DESC query_desc;
1142 query_desc.Query = D3D11_QUERY_EVENT;
1143 query_desc.MiscFlags = 0;
1144 hr = d3d11_device_->CreateQuery(
1145 &query_desc,
1146 d3d11_query_.Receive());
1147 RETURN_ON_HR_FAILURE(hr, "Failed to create DX11 device query", false);
1148
1149 HMODULE video_processor_dll = ::GetModuleHandle(L"msvproc.dll");
1150 RETURN_ON_FAILURE(video_processor_dll, "Failed to load video processor",
1151 false);
1152
1153 hr = CreateCOMObjectFromDll(
1154 video_processor_dll,
1155 CLSID_VideoProcessorMFT,
1156 __uuidof(IMFTransform),
1157 video_format_converter_mft_.ReceiveVoid());
1158 if (FAILED(hr)) {
1159 base::debug::Alias(&hr);
1160 // TODO(ananta)
1161 // Remove this CHECK when the change to use DX11 for H/W decoding
1162 // stablizes.
1163 CHECK(false);
1164 }
1165
1166 RETURN_ON_HR_FAILURE(hr, "Failed to create video format converter", false);
1167
1168 base::win::ScopedComPtr<IMFAttributes> converter_attributes;
1169 hr = video_format_converter_mft_->GetAttributes(
1170 converter_attributes.Receive());
1171 RETURN_ON_HR_FAILURE(hr, "Failed to get converter attributes", false);
1172
1173 hr = converter_attributes->SetUINT32(MF_XVP_PLAYBACK_MODE, TRUE);
1174 RETURN_ON_HR_FAILURE(
1175 hr,
1176 "Failed to set MF_XVP_PLAYBACK_MODE attribute on converter",
1177 false);
1178
1179 hr = converter_attributes->SetUINT32(MF_LOW_LATENCY, FALSE);
1180 RETURN_ON_HR_FAILURE(
1181 hr,
1182 "Failed to set MF_LOW_LATENCY attribute on converter",
1183 false);
1184 return true;
1185 }
1186
1187 void DXVAVideoDecodeAccelerator::Decode(
1188 const media::BitstreamBuffer& bitstream_buffer) {
1189 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
1190
1191 // SharedMemory will take over the ownership of handle.
1192 base::SharedMemory shm(bitstream_buffer.handle(), true);
1193
1194 State state = GetState();
1195 RETURN_AND_NOTIFY_ON_FAILURE((state == kNormal || state == kStopped ||
1196 state == kFlushing),
1197 "Invalid state: " << state, ILLEGAL_STATE,);
1198 if (bitstream_buffer.id() < 0) {
1199 RETURN_AND_NOTIFY_ON_FAILURE(
1200 false, "Invalid bitstream_buffer, id: " << bitstream_buffer.id(),
1201 INVALID_ARGUMENT, );
1202 }
1203
1204 base::win::ScopedComPtr<IMFSample> sample;
1205 RETURN_AND_NOTIFY_ON_FAILURE(shm.Map(bitstream_buffer.size()),
1206 "Failed in base::SharedMemory::Map",
1207 PLATFORM_FAILURE, );
1208
1209 sample.Attach(CreateInputSample(
1210 reinterpret_cast<const uint8_t*>(shm.memory()), bitstream_buffer.size(),
1211 std::min<uint32_t>(bitstream_buffer.size(), input_stream_info_.cbSize),
1212 input_stream_info_.cbAlignment));
1213 RETURN_AND_NOTIFY_ON_FAILURE(sample.get(), "Failed to create input sample",
1214 PLATFORM_FAILURE, );
1215
1216 RETURN_AND_NOTIFY_ON_HR_FAILURE(sample->SetSampleTime(bitstream_buffer.id()),
1217 "Failed to associate input buffer id with sample", PLATFORM_FAILURE,);
1218
1219 decoder_thread_task_runner_->PostTask(
1220 FROM_HERE,
1221 base::Bind(&DXVAVideoDecodeAccelerator::DecodeInternal,
1222 base::Unretained(this), sample));
1223 }
1224
1225 void DXVAVideoDecodeAccelerator::AssignPictureBuffers(
1226 const std::vector<media::PictureBuffer>& buffers) {
1227 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
1228
1229 State state = GetState();
1230 RETURN_AND_NOTIFY_ON_FAILURE((state != kUninitialized),
1231 "Invalid state: " << state, ILLEGAL_STATE,);
1232 RETURN_AND_NOTIFY_ON_FAILURE((kNumPictureBuffers >= buffers.size()),
1233 "Failed to provide requested picture buffers. (Got " << buffers.size() <<
1234 ", requested " << kNumPictureBuffers << ")", INVALID_ARGUMENT,);
1235
1236 // Copy the picture buffers provided by the client to the available list,
1237 // and mark these buffers as available for use.
1238 for (size_t buffer_index = 0; buffer_index < buffers.size();
1239 ++buffer_index) {
1240 DCHECK_LE(1u, buffers[buffer_index].texture_ids().size());
1241 linked_ptr<DXVAPictureBuffer> picture_buffer =
1242 DXVAPictureBuffer::Create(*this, buffers[buffer_index], egl_config_);
1243 RETURN_AND_NOTIFY_ON_FAILURE(picture_buffer.get(),
1244 "Failed to allocate picture buffer", PLATFORM_FAILURE,);
1245
1246 bool inserted = output_picture_buffers_.insert(std::make_pair(
1247 buffers[buffer_index].id(), picture_buffer)).second;
1248 DCHECK(inserted);
1249 }
1250
1251 ProcessPendingSamples();
1252 if (pending_flush_) {
1253 decoder_thread_task_runner_->PostTask(
1254 FROM_HERE,
1255 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
1256 base::Unretained(this)));
1257 }
1258 }
1259
1260 void DXVAVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) {
1261 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
1262
1263 State state = GetState();
1264 RETURN_AND_NOTIFY_ON_FAILURE((state != kUninitialized),
1265 "Invalid state: " << state, ILLEGAL_STATE,);
1266
1267 if (output_picture_buffers_.empty() && stale_output_picture_buffers_.empty())
1268 return;
1269
1270 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id);
1271 // If we didn't find the picture id in the |output_picture_buffers_| map we
1272 // try the |stale_output_picture_buffers_| map, as this may have been an
1273 // output picture buffer from before a resolution change, that at resolution
1274 // change time had yet to be displayed. The client is calling us back to tell
1275 // us that we can now recycle this picture buffer, so if we were waiting to
1276 // dispose of it we now can.
1277 if (it == output_picture_buffers_.end()) {
1278 if (!stale_output_picture_buffers_.empty()) {
1279 it = stale_output_picture_buffers_.find(picture_buffer_id);
1280 RETURN_AND_NOTIFY_ON_FAILURE(it != stale_output_picture_buffers_.end(),
1281 "Invalid picture id: " << picture_buffer_id, INVALID_ARGUMENT,);
1282 main_thread_task_runner_->PostTask(
1283 FROM_HERE,
1284 base::Bind(&DXVAVideoDecodeAccelerator::DeferredDismissStaleBuffer,
1285 weak_this_factory_.GetWeakPtr(), picture_buffer_id));
1286 }
1287 return;
1288 }
1289
1290 if (it->second->available() || it->second->waiting_to_reuse())
1291 return;
1292
1293 if (use_keyed_mutex_ || using_angle_device_) {
1294 RETURN_AND_NOTIFY_ON_FAILURE(it->second->ReusePictureBuffer(),
1295 "Failed to reuse picture buffer",
1296 PLATFORM_FAILURE, );
1297
1298 ProcessPendingSamples();
1299 if (pending_flush_) {
1300 decoder_thread_task_runner_->PostTask(
1301 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
1302 base::Unretained(this)));
1303 }
1304 } else {
1305 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
1306 "Failed to make context current",
1307 PLATFORM_FAILURE, );
1308 it->second->ResetReuseFence();
1309
1310 WaitForOutputBuffer(picture_buffer_id, 0);
1311 }
1312 }
1313
1314 void DXVAVideoDecodeAccelerator::WaitForOutputBuffer(int32_t picture_buffer_id,
1315 int count) {
1316 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
1317 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id);
1318 if (it == output_picture_buffers_.end())
1319 return;
1320
1321 DXVAPictureBuffer* picture_buffer = it->second.get();
1322
1323 DCHECK(!picture_buffer->available());
1324 DCHECK(picture_buffer->waiting_to_reuse());
1325
1326 gfx::GLFence* fence = picture_buffer->reuse_fence();
1327 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
1328 "Failed to make context current",
1329 PLATFORM_FAILURE, );
1330 if (count <= kMaxIterationsForANGLEReuseFlush && !fence->HasCompleted()) {
1331 main_thread_task_runner_->PostDelayedTask(
1332 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::WaitForOutputBuffer,
1333 weak_this_factory_.GetWeakPtr(),
1334 picture_buffer_id, count + 1),
1335 base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs));
1336 return;
1337 }
1338 RETURN_AND_NOTIFY_ON_FAILURE(picture_buffer->ReusePictureBuffer(),
1339 "Failed to reuse picture buffer",
1340 PLATFORM_FAILURE, );
1341
1342 ProcessPendingSamples();
1343 if (pending_flush_) {
1344 decoder_thread_task_runner_->PostTask(
1345 FROM_HERE,
1346 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
1347 base::Unretained(this)));
1348 }
1349 }
1350
1351 void DXVAVideoDecodeAccelerator::Flush() {
1352 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
1353
1354 DVLOG(1) << "DXVAVideoDecodeAccelerator::Flush";
1355
1356 State state = GetState();
1357 RETURN_AND_NOTIFY_ON_FAILURE((state == kNormal || state == kStopped),
1358 "Unexpected decoder state: " << state, ILLEGAL_STATE,);
1359
1360 SetState(kFlushing);
1361
1362 pending_flush_ = true;
1363
1364 decoder_thread_task_runner_->PostTask(
1365 FROM_HERE,
1366 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
1367 base::Unretained(this)));
1368 }
1369
1370 void DXVAVideoDecodeAccelerator::Reset() {
1371 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
1372
1373 DVLOG(1) << "DXVAVideoDecodeAccelerator::Reset";
1374
1375 State state = GetState();
1376 RETURN_AND_NOTIFY_ON_FAILURE((state == kNormal || state == kStopped),
1377 "Reset: invalid state: " << state, ILLEGAL_STATE,);
1378
1379 decoder_thread_.Stop();
1380
1381 SetState(kResetting);
1382
1383 // If we have pending output frames waiting for display then we drop those
1384 // frames and set the corresponding picture buffer as available.
1385 PendingOutputSamples::iterator index;
1386 for (index = pending_output_samples_.begin();
1387 index != pending_output_samples_.end();
1388 ++index) {
1389 if (index->picture_buffer_id != -1) {
1390 OutputBuffers::iterator it = output_picture_buffers_.find(
1391 index->picture_buffer_id);
1392 if (it != output_picture_buffers_.end()) {
1393 DXVAPictureBuffer* picture_buffer = it->second.get();
1394 picture_buffer->ReusePictureBuffer();
1395 }
1396 }
1397 }
1398
1399 pending_output_samples_.clear();
1400
1401 NotifyInputBuffersDropped();
1402
1403 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0),
1404 "Reset: Failed to send message.", PLATFORM_FAILURE,);
1405
1406 main_thread_task_runner_->PostTask(
1407 FROM_HERE,
1408 base::Bind(&DXVAVideoDecodeAccelerator::NotifyResetDone,
1409 weak_this_factory_.GetWeakPtr()));
1410
1411 StartDecoderThread();
1412 SetState(kNormal);
1413 }
1414
1415 void DXVAVideoDecodeAccelerator::Destroy() {
1416 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
1417 Invalidate();
1418 delete this;
1419 }
1420
1421 bool DXVAVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
1422 const base::WeakPtr<Client>& decode_client,
1423 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
1424 return false;
1425 }
1426
1427 GLenum DXVAVideoDecodeAccelerator::GetSurfaceInternalFormat() const {
1428 return GL_BGRA_EXT;
1429 }
1430
1431 // static
1432 media::VideoDecodeAccelerator::SupportedProfiles
1433 DXVAVideoDecodeAccelerator::GetSupportedProfiles() {
1434 TRACE_EVENT0("gpu,startup",
1435 "DXVAVideoDecodeAccelerator::GetSupportedProfiles");
1436
1437 // TODO(henryhsu): Need to ensure the profiles are actually supported.
1438 SupportedProfiles profiles;
1439 for (const auto& supported_profile : kSupportedProfiles) {
1440 std::pair<int, int> min_resolution = GetMinResolution(supported_profile);
1441 std::pair<int, int> max_resolution = GetMaxResolution(supported_profile);
1442
1443 SupportedProfile profile;
1444 profile.profile = supported_profile;
1445 profile.min_resolution.SetSize(min_resolution.first, min_resolution.second);
1446 profile.max_resolution.SetSize(max_resolution.first, max_resolution.second);
1447 profiles.push_back(profile);
1448 }
1449 return profiles;
1450 }
1451
1452 // static
1453 void DXVAVideoDecodeAccelerator::PreSandboxInitialization() {
1454 ::LoadLibrary(L"MFPlat.dll");
1455 ::LoadLibrary(L"msmpeg2vdec.dll");
1456 ::LoadLibrary(L"mf.dll");
1457 ::LoadLibrary(L"dxva2.dll");
1458
1459 if (base::win::GetVersion() > base::win::VERSION_WIN7) {
1460 LoadLibrary(L"msvproc.dll");
1461 } else {
1462 #if defined(ENABLE_DX11_FOR_WIN7)
1463 LoadLibrary(L"mshtmlmedia.dll");
1464 #endif
1465 }
1466 }
1467
1468 // static
1469 std::pair<int, int> DXVAVideoDecodeAccelerator::GetMinResolution(
1470 media::VideoCodecProfile profile) {
1471 TRACE_EVENT0("gpu,startup",
1472 "DXVAVideoDecodeAccelerator::GetMinResolution");
1473 std::pair<int, int> min_resolution;
1474 if (profile >= media::H264PROFILE_BASELINE &&
1475 profile <= media::H264PROFILE_HIGH) {
1476 // Windows Media Foundation H.264 decoding does not support decoding videos
1477 // with any dimension smaller than 48 pixels:
1478 // http://msdn.microsoft.com/en-us/library/windows/desktop/dd797815
1479 min_resolution = std::make_pair(48, 48);
1480 } else {
1481 // TODO(ananta)
1482 // Detect this properly for VP8/VP9 profiles.
1483 min_resolution = std::make_pair(16, 16);
1484 }
1485 return min_resolution;
1486 }
1487
1488 // static
1489 std::pair<int, int> DXVAVideoDecodeAccelerator::GetMaxResolution(
1490 const media::VideoCodecProfile profile) {
1491 TRACE_EVENT0("gpu,startup",
1492 "DXVAVideoDecodeAccelerator::GetMaxResolution");
1493 std::pair<int, int> max_resolution;
1494 if (profile >= media::H264PROFILE_BASELINE &&
1495 profile <= media::H264PROFILE_HIGH) {
1496 max_resolution = GetMaxH264Resolution();
1497 } else {
1498 // TODO(ananta)
1499 // Detect this properly for VP8/VP9 profiles.
1500 max_resolution = std::make_pair(4096, 2160);
1501 }
1502 return max_resolution;
1503 }
1504
1505 std::pair<int, int> DXVAVideoDecodeAccelerator::GetMaxH264Resolution() {
1506 TRACE_EVENT0("gpu,startup",
1507 "DXVAVideoDecodeAccelerator::GetMaxH264Resolution");
1508 // The H.264 resolution detection operation is expensive. This static flag
1509 // allows us to run the detection once.
1510 static bool resolution_detected = false;
1511 // Use 1088 to account for 16x16 macroblocks.
1512 static std::pair<int, int> max_resolution = std::make_pair(1920, 1088);
1513 if (resolution_detected)
1514 return max_resolution;
1515
1516 resolution_detected = true;
1517
1518 // On Windows 7 the maximum resolution supported by media foundation is
1519 // 1920 x 1088.
1520 if (base::win::GetVersion() == base::win::VERSION_WIN7)
1521 return max_resolution;
1522
1523 // To detect if a driver supports the desired resolutions, we try and create
1524 // a DXVA decoder instance for that resolution and profile. If that succeeds
1525 // we assume that the driver supports H/W H.264 decoding for that resolution.
1526 HRESULT hr = E_FAIL;
1527 base::win::ScopedComPtr<ID3D11Device> device;
1528
1529 {
1530 TRACE_EVENT0("gpu,startup",
1531 "GetMaxH264Resolution. QueryDeviceObjectFromANGLE");
1532
1533 device = QueryDeviceObjectFromANGLE<ID3D11Device>(EGL_D3D11_DEVICE_ANGLE);
1534 if (!device.get())
1535 return max_resolution;
1536 }
1537
1538 base::win::ScopedComPtr<ID3D11VideoDevice> video_device;
1539 hr = device.QueryInterface(IID_ID3D11VideoDevice,
1540 video_device.ReceiveVoid());
1541 if (FAILED(hr))
1542 return max_resolution;
1543
1544 GUID decoder_guid = {};
1545
1546 {
1547 TRACE_EVENT0("gpu,startup",
1548 "GetMaxH264Resolution. H.264 guid search begin");
1549 // Enumerate supported video profiles and look for the H264 profile.
1550 bool found = false;
1551 UINT profile_count = video_device->GetVideoDecoderProfileCount();
1552 for (UINT profile_idx = 0; profile_idx < profile_count; profile_idx++) {
1553 GUID profile_id = {};
1554 hr = video_device->GetVideoDecoderProfile(profile_idx, &profile_id);
1555 if (SUCCEEDED(hr) &&
1556 (profile_id == DXVA2_ModeH264_E ||
1557 profile_id == DXVA2_Intel_ModeH264_E)) {
1558 decoder_guid = profile_id;
1559 found = true;
1560 break;
1561 }
1562 }
1563 if (!found)
1564 return max_resolution;
1565 }
1566
1567 // Legacy AMD drivers with UVD3 or earlier and some Intel GPU's crash while
1568 // creating surfaces larger than 1920 x 1088.
1569 if (IsLegacyGPU(device.get()))
1570 return max_resolution;
1571
1572 // We look for the following resolutions in the driver.
1573 // TODO(ananta)
1574 // Look into whether this list needs to be expanded.
1575 static std::pair<int, int> resolution_array[] = {
1576 // Use 1088 to account for 16x16 macroblocks.
1577 std::make_pair(1920, 1088),
1578 std::make_pair(2560, 1440),
1579 std::make_pair(3840, 2160),
1580 std::make_pair(4096, 2160),
1581 std::make_pair(4096, 2304),
1582 };
1583
1584 {
1585 TRACE_EVENT0("gpu,startup",
1586 "GetMaxH264Resolution. Resolution search begin");
1587
1588 for (size_t res_idx = 0; res_idx < arraysize(resolution_array);
1589 res_idx++) {
1590 D3D11_VIDEO_DECODER_DESC desc = {};
1591 desc.Guid = decoder_guid;
1592 desc.SampleWidth = resolution_array[res_idx].first;
1593 desc.SampleHeight = resolution_array[res_idx].second;
1594 desc.OutputFormat = DXGI_FORMAT_NV12;
1595 UINT config_count = 0;
1596 hr = video_device->GetVideoDecoderConfigCount(&desc, &config_count);
1597 if (FAILED(hr) || config_count == 0)
1598 return max_resolution;
1599
1600 D3D11_VIDEO_DECODER_CONFIG config = {};
1601 hr = video_device->GetVideoDecoderConfig(&desc, 0, &config);
1602 if (FAILED(hr))
1603 return max_resolution;
1604
1605 base::win::ScopedComPtr<ID3D11VideoDecoder> video_decoder;
1606 hr = video_device->CreateVideoDecoder(&desc, &config,
1607 video_decoder.Receive());
1608 if (!video_decoder.get())
1609 return max_resolution;
1610
1611 max_resolution = resolution_array[res_idx];
1612 }
1613 }
1614 return max_resolution;
1615 }
1616
1617 // static
1618 bool DXVAVideoDecodeAccelerator::IsLegacyGPU(ID3D11Device* device) {
1619 static const int kAMDGPUId1 = 0x1002;
1620 static const int kAMDGPUId2 = 0x1022;
1621 static const int kIntelGPU = 0x8086;
1622
1623 static bool legacy_gpu = true;
1624 // This flag ensures that we determine the GPU type once.
1625 static bool legacy_gpu_determined = false;
1626
1627 if (legacy_gpu_determined)
1628 return legacy_gpu;
1629
1630 legacy_gpu_determined = true;
1631
1632 base::win::ScopedComPtr<IDXGIDevice> dxgi_device;
1633 HRESULT hr = dxgi_device.QueryFrom(device);
1634 if (FAILED(hr))
1635 return legacy_gpu;
1636
1637 base::win::ScopedComPtr<IDXGIAdapter> adapter;
1638 hr = dxgi_device->GetAdapter(adapter.Receive());
1639 if (FAILED(hr))
1640 return legacy_gpu;
1641
1642 DXGI_ADAPTER_DESC adapter_desc = {};
1643 hr = adapter->GetDesc(&adapter_desc);
1644 if (FAILED(hr))
1645 return legacy_gpu;
1646
1647 // We check if the device is an Intel or an AMD device and whether it is in
1648 // the global list defined by the g_AMDUVD3GPUList and g_IntelLegacyGPUList
1649 // arrays above. If yes then the device is treated as a legacy device.
1650 if ((adapter_desc.VendorId == kAMDGPUId1) ||
1651 adapter_desc.VendorId == kAMDGPUId2) {
1652 {
1653 TRACE_EVENT0("gpu,startup",
1654 "DXVAVideoDecodeAccelerator::IsLegacyGPU. AMD check");
1655 for (size_t i = 0; i < arraysize(g_AMDUVD3GPUList); i++) {
1656 if (adapter_desc.DeviceId == g_AMDUVD3GPUList[i])
1657 return legacy_gpu;
1658 }
1659 }
1660 } else if (adapter_desc.VendorId == kIntelGPU) {
1661 {
1662 TRACE_EVENT0("gpu,startup",
1663 "DXVAVideoDecodeAccelerator::IsLegacyGPU. Intel check");
1664 for (size_t i = 0; i < arraysize(g_IntelLegacyGPUList); i++) {
1665 if (adapter_desc.DeviceId == g_IntelLegacyGPUList[i])
1666 return legacy_gpu;
1667 }
1668 }
1669 }
1670 legacy_gpu = false;
1671 return legacy_gpu;
1672 }
1673
1674 bool DXVAVideoDecodeAccelerator::InitDecoder(media::VideoCodecProfile profile) {
1675 HMODULE decoder_dll = NULL;
1676
1677 CLSID clsid = {};
1678
1679 // Profile must fall within the valid range for one of the supported codecs.
1680 if (profile >= media::H264PROFILE_MIN && profile <= media::H264PROFILE_MAX) {
1681 // We mimic the steps CoCreateInstance uses to instantiate the object. This
1682 // was previously done because it failed inside the sandbox, and now is done
1683 // as a more minimal approach to avoid other side-effects CCI might have (as
1684 // we are still in a reduced sandbox).
1685 decoder_dll = ::GetModuleHandle(L"msmpeg2vdec.dll");
1686 RETURN_ON_FAILURE(decoder_dll,
1687 "msmpeg2vdec.dll required for decoding is not loaded",
1688 false);
1689
1690 // Check version of DLL, version 6.1.7140 is blacklisted due to high crash
1691 // rates in browsers loading that DLL. If that is the version installed we
1692 // fall back to software decoding. See crbug/403440.
1693 std::unique_ptr<FileVersionInfo> version_info(
1694 FileVersionInfo::CreateFileVersionInfoForModule(decoder_dll));
1695 RETURN_ON_FAILURE(version_info,
1696 "unable to get version of msmpeg2vdec.dll",
1697 false);
1698 base::string16 file_version = version_info->file_version();
1699 RETURN_ON_FAILURE(file_version.find(L"6.1.7140") == base::string16::npos,
1700 "blacklisted version of msmpeg2vdec.dll 6.1.7140",
1701 false);
1702 codec_ = media::kCodecH264;
1703 clsid = __uuidof(CMSH264DecoderMFT);
1704 } else if (enable_accelerated_vpx_decode_ &&
1705 (profile == media::VP8PROFILE_ANY ||
1706 profile == media::VP9PROFILE_PROFILE0 ||
1707 profile == media::VP9PROFILE_PROFILE1 ||
1708 profile == media::VP9PROFILE_PROFILE2 ||
1709 profile == media::VP9PROFILE_PROFILE3)) {
1710 int program_files_key = base::DIR_PROGRAM_FILES;
1711 if (base::win::OSInfo::GetInstance()->wow64_status() ==
1712 base::win::OSInfo::WOW64_ENABLED) {
1713 program_files_key = base::DIR_PROGRAM_FILES6432;
1714 }
1715
1716 base::FilePath dll_path;
1717 RETURN_ON_FAILURE(PathService::Get(program_files_key, &dll_path),
1718 "failed to get path for Program Files", false);
1719
1720 dll_path = dll_path.Append(kVPXDecoderDLLPath);
1721 if (profile == media::VP8PROFILE_ANY) {
1722 codec_ = media::kCodecVP8;
1723 dll_path = dll_path.Append(kVP8DecoderDLLName);
1724 clsid = CLSID_WebmMfVp8Dec;
1725 } else {
1726 codec_ = media::kCodecVP9;
1727 dll_path = dll_path.Append(kVP9DecoderDLLName);
1728 clsid = CLSID_WebmMfVp9Dec;
1729 }
1730 decoder_dll = ::LoadLibraryEx(dll_path.value().data(), NULL,
1731 LOAD_WITH_ALTERED_SEARCH_PATH);
1732 RETURN_ON_FAILURE(decoder_dll, "vpx decoder dll is not loaded", false);
1733 } else {
1734 RETURN_ON_FAILURE(false, "Unsupported codec.", false);
1735 }
1736
1737 HRESULT hr = CreateCOMObjectFromDll(decoder_dll,
1738 clsid,
1739 __uuidof(IMFTransform),
1740 decoder_.ReceiveVoid());
1741 RETURN_ON_HR_FAILURE(hr, "Failed to create decoder instance", false);
1742
1743 RETURN_ON_FAILURE(CheckDecoderDxvaSupport(),
1744 "Failed to check decoder DXVA support", false);
1745
1746 ULONG_PTR device_manager_to_use = NULL;
1747 if (use_dx11_) {
1748 CHECK(create_dxgi_device_manager_);
1749 RETURN_AND_NOTIFY_ON_FAILURE(CreateDX11DevManager(),
1750 "Failed to initialize DX11 device and manager",
1751 PLATFORM_FAILURE,
1752 false);
1753 device_manager_to_use = reinterpret_cast<ULONG_PTR>(
1754 d3d11_device_manager_.get());
1755 } else {
1756 RETURN_AND_NOTIFY_ON_FAILURE(CreateD3DDevManager(),
1757 "Failed to initialize D3D device and manager",
1758 PLATFORM_FAILURE,
1759 false);
1760 device_manager_to_use = reinterpret_cast<ULONG_PTR>(device_manager_.get());
1761 }
1762
1763 hr = decoder_->ProcessMessage(
1764 MFT_MESSAGE_SET_D3D_MANAGER,
1765 device_manager_to_use);
1766 if (use_dx11_) {
1767 RETURN_ON_HR_FAILURE(hr, "Failed to pass DX11 manager to decoder", false);
1768 } else {
1769 RETURN_ON_HR_FAILURE(hr, "Failed to pass D3D manager to decoder", false);
1770 }
1771
1772 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay();
1773
1774 EGLint config_attribs[] = {
1775 EGL_BUFFER_SIZE, 32,
1776 EGL_RED_SIZE, 8,
1777 EGL_GREEN_SIZE, 8,
1778 EGL_BLUE_SIZE, 8,
1779 EGL_SURFACE_TYPE, EGL_PBUFFER_BIT,
1780 EGL_ALPHA_SIZE, 0,
1781 EGL_NONE
1782 };
1783
1784 EGLint num_configs;
1785
1786 if (!eglChooseConfig(
1787 egl_display,
1788 config_attribs,
1789 &egl_config_,
1790 1,
1791 &num_configs))
1792 return false;
1793
1794 return SetDecoderMediaTypes();
1795 }
1796
1797 bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() {
1798 base::win::ScopedComPtr<IMFAttributes> attributes;
1799 HRESULT hr = decoder_->GetAttributes(attributes.Receive());
1800 RETURN_ON_HR_FAILURE(hr, "Failed to get decoder attributes", false);
1801
1802 UINT32 dxva = 0;
1803 hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva);
1804 RETURN_ON_HR_FAILURE(hr, "Failed to check if decoder supports DXVA", false);
1805
1806 if (codec_ == media::kCodecH264) {
1807 hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE);
1808 RETURN_ON_HR_FAILURE(hr, "Failed to enable DXVA H/W decoding", false);
1809 }
1810
1811 hr = attributes->SetUINT32(CODECAPI_AVLowLatencyMode, TRUE);
1812 if (SUCCEEDED(hr)) {
1813 DVLOG(1) << "Successfully set Low latency mode on decoder.";
1814 } else {
1815 DVLOG(1) << "Failed to set Low latency mode on decoder. Error: " << hr;
1816 }
1817
1818 auto gl_context = get_gl_context_cb_.Run();
1819 RETURN_ON_FAILURE(gl_context, "Couldn't get GL context", false);
1820
1821 // The decoder should use DX11 iff
1822 // 1. The underlying H/W decoder supports it.
1823 // 2. We have a pointer to the MFCreateDXGIDeviceManager function needed for
1824 // this. This should always be true for Windows 8+.
1825 // 3. ANGLE is using DX11.
1826 if (create_dxgi_device_manager_ &&
1827 (gl_context->GetGLRenderer().find("Direct3D11") != std::string::npos)) {
1828 UINT32 dx11_aware = 0;
1829 attributes->GetUINT32(MF_SA_D3D11_AWARE, &dx11_aware);
1830 use_dx11_ = !!dx11_aware;
1831 }
1832
1833 use_keyed_mutex_ =
1834 use_dx11_ && gfx::GLSurfaceEGL::HasEGLExtension("EGL_ANGLE_keyed_mutex");
1835
1836 return true;
1837 }
1838
1839 bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() {
1840 RETURN_ON_FAILURE(SetDecoderInputMediaType(),
1841 "Failed to set decoder input media type", false);
1842 return SetDecoderOutputMediaType(MFVideoFormat_NV12);
1843 }
1844
1845 bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() {
1846 base::win::ScopedComPtr<IMFMediaType> media_type;
1847 HRESULT hr = MFCreateMediaType(media_type.Receive());
1848 RETURN_ON_HR_FAILURE(hr, "MFCreateMediaType failed", false);
1849
1850 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
1851 RETURN_ON_HR_FAILURE(hr, "Failed to set major input type", false);
1852
1853 if (codec_ == media::kCodecH264) {
1854 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
1855 } else if (codec_ == media::kCodecVP8) {
1856 hr = media_type->SetGUID(MF_MT_SUBTYPE, MEDIASUBTYPE_VP80);
1857 } else if (codec_ == media::kCodecVP9) {
1858 hr = media_type->SetGUID(MF_MT_SUBTYPE, MEDIASUBTYPE_VP90);
1859 } else {
1860 NOTREACHED();
1861 RETURN_ON_FAILURE(false, "Unsupported codec on input media type.", false);
1862 }
1863 RETURN_ON_HR_FAILURE(hr, "Failed to set subtype", false);
1864
1865 // Not sure about this. msdn recommends setting this value on the input
1866 // media type.
1867 hr = media_type->SetUINT32(MF_MT_INTERLACE_MODE,
1868 MFVideoInterlace_MixedInterlaceOrProgressive);
1869 RETURN_ON_HR_FAILURE(hr, "Failed to set interlace mode", false);
1870
1871 hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags
1872 RETURN_ON_HR_FAILURE(hr, "Failed to set decoder input type", false);
1873 return true;
1874 }
1875
1876 bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType(
1877 const GUID& subtype) {
1878 return SetTransformOutputType(decoder_.get(), subtype, 0, 0);
1879 }
1880
1881 bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg,
1882 int32_t param) {
1883 HRESULT hr = decoder_->ProcessMessage(msg, param);
1884 return SUCCEEDED(hr);
1885 }
1886
1887 // Gets the minimum buffer sizes for input and output samples. The MFT will not
1888 // allocate buffer for input nor output, so we have to do it ourselves and make
1889 // sure they're the correct size. We only provide decoding if DXVA is enabled.
1890 bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() {
1891 HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_);
1892 RETURN_ON_HR_FAILURE(hr, "Failed to get input stream info", false);
1893
1894 hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_);
1895 RETURN_ON_HR_FAILURE(hr, "Failed to get decoder output stream info", false);
1896
1897 DVLOG(1) << "Input stream info: ";
1898 DVLOG(1) << "Max latency: " << input_stream_info_.hnsMaxLatency;
1899 if (codec_ == media::kCodecH264) {
1900 // There should be three flags, one for requiring a whole frame be in a
1901 // single sample, one for requiring there be one buffer only in a single
1902 // sample, and one that specifies a fixed sample size. (as in cbSize)
1903 CHECK_EQ(input_stream_info_.dwFlags, 0x7u);
1904 }
1905
1906 DVLOG(1) << "Min buffer size: " << input_stream_info_.cbSize;
1907 DVLOG(1) << "Max lookahead: " << input_stream_info_.cbMaxLookahead;
1908 DVLOG(1) << "Alignment: " << input_stream_info_.cbAlignment;
1909
1910 DVLOG(1) << "Output stream info: ";
1911 // The flags here should be the same and mean the same thing, except when
1912 // DXVA is enabled, there is an extra 0x100 flag meaning decoder will
1913 // allocate its own sample.
1914 DVLOG(1) << "Flags: "
1915 << std::hex << std::showbase << output_stream_info_.dwFlags;
1916 if (codec_ == media::kCodecH264) {
1917 CHECK_EQ(output_stream_info_.dwFlags, 0x107u);
1918 }
1919 DVLOG(1) << "Min buffer size: " << output_stream_info_.cbSize;
1920 DVLOG(1) << "Alignment: " << output_stream_info_.cbAlignment;
1921 return true;
1922 }
1923
1924 void DXVAVideoDecodeAccelerator::DoDecode() {
1925 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
1926 // This function is also called from FlushInternal in a loop which could
1927 // result in the state transitioning to kStopped due to no decoded output.
1928 State state = GetState();
1929 RETURN_AND_NOTIFY_ON_FAILURE(
1930 (state == kNormal || state == kFlushing || state == kStopped),
1931 "DoDecode: not in normal/flushing/stopped state", ILLEGAL_STATE,);
1932
1933 MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0};
1934 DWORD status = 0;
1935
1936 HRESULT hr = decoder_->ProcessOutput(0, // No flags
1937 1, // # of out streams to pull from
1938 &output_data_buffer,
1939 &status);
1940 IMFCollection* events = output_data_buffer.pEvents;
1941 if (events != NULL) {
1942 DVLOG(1) << "Got events from ProcessOuput, but discarding";
1943 events->Release();
1944 }
1945 if (FAILED(hr)) {
1946 // A stream change needs further ProcessInput calls to get back decoder
1947 // output which is why we need to set the state to stopped.
1948 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
1949 if (!SetDecoderOutputMediaType(MFVideoFormat_NV12)) {
1950 // Decoder didn't let us set NV12 output format. Not sure as to why
1951 // this can happen. Give up in disgust.
1952 NOTREACHED() << "Failed to set decoder output media type to NV12";
1953 SetState(kStopped);
1954 } else {
1955 DVLOG(1) << "Received output format change from the decoder."
1956 " Recursively invoking DoDecode";
1957 DoDecode();
1958 }
1959 return;
1960 } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
1961 // No more output from the decoder. Stop playback.
1962 SetState(kStopped);
1963 return;
1964 } else {
1965 NOTREACHED() << "Unhandled error in DoDecode()";
1966 return;
1967 }
1968 }
1969 TRACE_EVENT_ASYNC_END0("gpu", "DXVAVideoDecodeAccelerator.Decoding", this);
1970
1971 TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode",
1972 inputs_before_decode_);
1973
1974 inputs_before_decode_ = 0;
1975
1976 RETURN_AND_NOTIFY_ON_FAILURE(ProcessOutputSample(output_data_buffer.pSample),
1977 "Failed to process output sample.", PLATFORM_FAILURE,);
1978 }
1979
1980 bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) {
1981 RETURN_ON_FAILURE(sample, "Decode succeeded with NULL output sample", false);
1982
1983 LONGLONG input_buffer_id = 0;
1984 RETURN_ON_HR_FAILURE(sample->GetSampleTime(&input_buffer_id),
1985 "Failed to get input buffer id associated with sample",
1986 false);
1987
1988 {
1989 base::AutoLock lock(decoder_lock_);
1990 DCHECK(pending_output_samples_.empty());
1991 pending_output_samples_.push_back(
1992 PendingSampleInfo(input_buffer_id, sample));
1993 }
1994
1995 if (pictures_requested_) {
1996 DVLOG(1) << "Waiting for picture slots from the client.";
1997 main_thread_task_runner_->PostTask(
1998 FROM_HERE,
1999 base::Bind(&DXVAVideoDecodeAccelerator::ProcessPendingSamples,
2000 weak_this_factory_.GetWeakPtr()));
2001 return true;
2002 }
2003
2004 int width = 0;
2005 int height = 0;
2006 if (!GetVideoFrameDimensions(sample, &width, &height)) {
2007 RETURN_ON_FAILURE(false, "Failed to get D3D surface from output sample",
2008 false);
2009 }
2010
2011 // Go ahead and request picture buffers.
2012 main_thread_task_runner_->PostTask(
2013 FROM_HERE,
2014 base::Bind(&DXVAVideoDecodeAccelerator::RequestPictureBuffers,
2015 weak_this_factory_.GetWeakPtr(),
2016 width,
2017 height));
2018
2019 pictures_requested_ = true;
2020 return true;
2021 }
2022
2023 void DXVAVideoDecodeAccelerator::ProcessPendingSamples() {
2024 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
2025
2026 if (!output_picture_buffers_.size())
2027 return;
2028
2029 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
2030 "Failed to make context current",
2031 PLATFORM_FAILURE, );
2032
2033 OutputBuffers::iterator index;
2034
2035 for (index = output_picture_buffers_.begin();
2036 index != output_picture_buffers_.end() &&
2037 OutputSamplesPresent();
2038 ++index) {
2039 if (index->second->available()) {
2040 PendingSampleInfo* pending_sample = NULL;
2041 {
2042 base::AutoLock lock(decoder_lock_);
2043 PendingSampleInfo& sample_info = pending_output_samples_.front();
2044 if (sample_info.picture_buffer_id != -1)
2045 continue;
2046 pending_sample = &sample_info;
2047 }
2048
2049 int width = 0;
2050 int height = 0;
2051 if (!GetVideoFrameDimensions(pending_sample->output_sample.get(),
2052 &width, &height)) {
2053 RETURN_AND_NOTIFY_ON_FAILURE(false,
2054 "Failed to get D3D surface from output sample", PLATFORM_FAILURE,);
2055 }
2056
2057 if (width != index->second->size().width() ||
2058 height != index->second->size().height()) {
2059 HandleResolutionChanged(width, height);
2060 return;
2061 }
2062
2063 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer;
2064 HRESULT hr = pending_sample->output_sample->GetBufferByIndex(
2065 0, output_buffer.Receive());
2066 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
2067 "Failed to get buffer from output sample", PLATFORM_FAILURE,);
2068
2069 base::win::ScopedComPtr<IDirect3DSurface9> surface;
2070 base::win::ScopedComPtr<ID3D11Texture2D> d3d11_texture;
2071
2072 if (use_dx11_) {
2073 base::win::ScopedComPtr<IMFDXGIBuffer> dxgi_buffer;
2074 hr = dxgi_buffer.QueryFrom(output_buffer.get());
2075 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
2076 "Failed to get DXGIBuffer from output sample", PLATFORM_FAILURE,);
2077 hr = dxgi_buffer->GetResource(
2078 __uuidof(ID3D11Texture2D),
2079 reinterpret_cast<void**>(d3d11_texture.Receive()));
2080 } else {
2081 hr = MFGetService(output_buffer.get(), MR_BUFFER_SERVICE,
2082 IID_PPV_ARGS(surface.Receive()));
2083 }
2084 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
2085 "Failed to get surface from output sample", PLATFORM_FAILURE,);
2086
2087 pending_sample->picture_buffer_id = index->second->id();
2088
2089 RETURN_AND_NOTIFY_ON_FAILURE(
2090 index->second->CopyOutputSampleDataToPictureBuffer(
2091 this,
2092 surface.get(),
2093 d3d11_texture.get(),
2094 pending_sample->input_buffer_id),
2095 "Failed to copy output sample", PLATFORM_FAILURE,);
2096
2097 index->second->set_available(false);
2098 }
2099 }
2100 }
2101
2102 void DXVAVideoDecodeAccelerator::StopOnError(
2103 media::VideoDecodeAccelerator::Error error) {
2104 if (!main_thread_task_runner_->BelongsToCurrentThread()) {
2105 main_thread_task_runner_->PostTask(
2106 FROM_HERE,
2107 base::Bind(&DXVAVideoDecodeAccelerator::StopOnError,
2108 weak_this_factory_.GetWeakPtr(),
2109 error));
2110 return;
2111 }
2112
2113 if (client_)
2114 client_->NotifyError(error);
2115 client_ = NULL;
2116
2117 if (GetState() != kUninitialized) {
2118 Invalidate();
2119 }
2120 }
2121
2122 void DXVAVideoDecodeAccelerator::Invalidate() {
2123 if (GetState() == kUninitialized)
2124 return;
2125
2126 // Best effort to make the GL context current.
2127 make_context_current_cb_.Run();
2128
2129 decoder_thread_.Stop();
2130 weak_this_factory_.InvalidateWeakPtrs();
2131 output_picture_buffers_.clear();
2132 stale_output_picture_buffers_.clear();
2133 pending_output_samples_.clear();
2134 // We want to continue processing pending input after detecting a config
2135 // change.
2136 if (GetState() != kConfigChange)
2137 pending_input_buffers_.clear();
2138 decoder_.Release();
2139 pictures_requested_ = false;
2140
2141 config_change_detector_.reset();
2142
2143 if (use_dx11_) {
2144 if (video_format_converter_mft_.get()) {
2145 video_format_converter_mft_->ProcessMessage(
2146 MFT_MESSAGE_NOTIFY_END_STREAMING, 0);
2147 video_format_converter_mft_.Release();
2148 }
2149 d3d11_device_context_.Release();
2150 d3d11_device_.Release();
2151 d3d11_device_manager_.Release();
2152 d3d11_query_.Release();
2153 dx11_video_format_converter_media_type_needs_init_ = true;
2154 multi_threaded_.Release();
2155 } else {
2156 d3d9_.Release();
2157 d3d9_device_ex_.Release();
2158 device_manager_.Release();
2159 query_.Release();
2160 }
2161
2162 SetState(kUninitialized);
2163 }
2164
2165 void DXVAVideoDecodeAccelerator::NotifyInputBufferRead(int input_buffer_id) {
2166 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
2167 if (client_)
2168 client_->NotifyEndOfBitstreamBuffer(input_buffer_id);
2169 }
2170
2171 void DXVAVideoDecodeAccelerator::NotifyFlushDone() {
2172 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
2173 if (client_ && pending_flush_) {
2174 pending_flush_ = false;
2175 {
2176 base::AutoLock lock(decoder_lock_);
2177 sent_drain_message_ = false;
2178 }
2179
2180 client_->NotifyFlushDone();
2181 }
2182 }
2183
2184 void DXVAVideoDecodeAccelerator::NotifyResetDone() {
2185 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
2186 if (client_)
2187 client_->NotifyResetDone();
2188 }
2189
2190 void DXVAVideoDecodeAccelerator::RequestPictureBuffers(int width, int height) {
2191 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
2192 // This task could execute after the decoder has been torn down.
2193 if (GetState() != kUninitialized && client_) {
2194 client_->ProvidePictureBuffers(kNumPictureBuffers, 1,
2195 gfx::Size(width, height), GL_TEXTURE_2D);
2196 }
2197 }
2198
2199 void DXVAVideoDecodeAccelerator::NotifyPictureReady(
2200 int picture_buffer_id,
2201 int input_buffer_id) {
2202 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
2203 // This task could execute after the decoder has been torn down.
2204 if (GetState() != kUninitialized && client_) {
2205 // TODO(henryhsu): Use correct visible size instead of (0, 0). We can't use
2206 // coded size here so use (0, 0) intentionally to have the client choose.
2207 media::Picture picture(picture_buffer_id, input_buffer_id,
2208 gfx::Rect(0, 0), false);
2209 client_->PictureReady(picture);
2210 }
2211 }
2212
2213 void DXVAVideoDecodeAccelerator::NotifyInputBuffersDropped() {
2214 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
2215 if (!client_)
2216 return;
2217
2218 for (PendingInputs::iterator it = pending_input_buffers_.begin();
2219 it != pending_input_buffers_.end(); ++it) {
2220 LONGLONG input_buffer_id = 0;
2221 RETURN_ON_HR_FAILURE((*it)->GetSampleTime(&input_buffer_id),
2222 "Failed to get buffer id associated with sample",);
2223 client_->NotifyEndOfBitstreamBuffer(input_buffer_id);
2224 }
2225 pending_input_buffers_.clear();
2226 }
2227
2228 void DXVAVideoDecodeAccelerator::DecodePendingInputBuffers() {
2229 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
2230 State state = GetState();
2231 RETURN_AND_NOTIFY_ON_FAILURE((state != kUninitialized),
2232 "Invalid state: " << state, ILLEGAL_STATE,);
2233
2234 if (pending_input_buffers_.empty() || OutputSamplesPresent())
2235 return;
2236
2237 PendingInputs pending_input_buffers_copy;
2238 std::swap(pending_input_buffers_, pending_input_buffers_copy);
2239
2240 for (PendingInputs::iterator it = pending_input_buffers_copy.begin();
2241 it != pending_input_buffers_copy.end(); ++it) {
2242 DecodeInternal(*it);
2243 }
2244 }
2245
2246 void DXVAVideoDecodeAccelerator::FlushInternal() {
2247 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
2248
2249 // We allow only one output frame to be present at any given time. If we have
2250 // an output frame, then we cannot complete the flush at this time.
2251 if (OutputSamplesPresent())
2252 return;
2253
2254 // First drain the pending input because once the drain message is sent below,
2255 // the decoder will ignore further input until it's drained.
2256 if (!pending_input_buffers_.empty()) {
2257 decoder_thread_task_runner_->PostTask(
2258 FROM_HERE,
2259 base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers,
2260 base::Unretained(this)));
2261 decoder_thread_task_runner_->PostTask(
2262 FROM_HERE,
2263 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
2264 base::Unretained(this)));
2265 return;
2266 }
2267
2268 {
2269 base::AutoLock lock(decoder_lock_);
2270 if (!sent_drain_message_) {
2271 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0),
2272 "Failed to send drain message",
2273 PLATFORM_FAILURE,);
2274 sent_drain_message_ = true;
2275 }
2276 }
2277
2278 // Attempt to retrieve an output frame from the decoder. If we have one,
2279 // return and proceed when the output frame is processed. If we don't have a
2280 // frame then we are done.
2281 DoDecode();
2282 if (OutputSamplesPresent())
2283 return;
2284
2285 SetState(kFlushing);
2286
2287 main_thread_task_runner_->PostTask(
2288 FROM_HERE,
2289 base::Bind(&DXVAVideoDecodeAccelerator::NotifyFlushDone,
2290 weak_this_factory_.GetWeakPtr()));
2291
2292 SetState(kNormal);
2293 }
2294
2295 void DXVAVideoDecodeAccelerator::DecodeInternal(
2296 const base::win::ScopedComPtr<IMFSample>& sample) {
2297 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
2298
2299 if (GetState() == kUninitialized)
2300 return;
2301
2302 if (OutputSamplesPresent() || !pending_input_buffers_.empty()) {
2303 pending_input_buffers_.push_back(sample);
2304 return;
2305 }
2306
2307 // Check if the resolution, bit rate, etc changed in the stream. If yes we
2308 // reinitialize the decoder to ensure that the stream decodes correctly.
2309 bool config_changed = false;
2310
2311 HRESULT hr = CheckConfigChanged(sample.get(), &config_changed);
2312 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to check video stream config",
2313 PLATFORM_FAILURE,);
2314
2315 if (config_changed) {
2316 pending_input_buffers_.push_back(sample);
2317 main_thread_task_runner_->PostTask(
2318 FROM_HERE,
2319 base::Bind(&DXVAVideoDecodeAccelerator::ConfigChanged,
2320 weak_this_factory_.GetWeakPtr(),
2321 config_));
2322 return;
2323 }
2324
2325 if (!inputs_before_decode_) {
2326 TRACE_EVENT_ASYNC_BEGIN0("gpu", "DXVAVideoDecodeAccelerator.Decoding",
2327 this);
2328 }
2329 inputs_before_decode_++;
2330
2331 hr = decoder_->ProcessInput(0, sample.get(), 0);
2332 // As per msdn if the decoder returns MF_E_NOTACCEPTING then it means that it
2333 // has enough data to produce one or more output samples. In this case the
2334 // recommended options are to
2335 // 1. Generate new output by calling IMFTransform::ProcessOutput until it
2336 // returns MF_E_TRANSFORM_NEED_MORE_INPUT.
2337 // 2. Flush the input data
2338 // We implement the first option, i.e to retrieve the output sample and then
2339 // process the input again. Failure in either of these steps is treated as a
2340 // decoder failure.
2341 if (hr == MF_E_NOTACCEPTING) {
2342 DoDecode();
2343 // If the DoDecode call resulted in an output frame then we should not
2344 // process any more input until that frame is copied to the target surface.
2345 if (!OutputSamplesPresent()) {
2346 State state = GetState();
2347 RETURN_AND_NOTIFY_ON_FAILURE((state == kStopped || state == kNormal ||
2348 state == kFlushing),
2349 "Failed to process output. Unexpected decoder state: " << state,
2350 PLATFORM_FAILURE,);
2351 hr = decoder_->ProcessInput(0, sample.get(), 0);
2352 }
2353 // If we continue to get the MF_E_NOTACCEPTING error we do the following:-
2354 // 1. Add the input sample to the pending queue.
2355 // 2. If we don't have any output samples we post the
2356 // DecodePendingInputBuffers task to process the pending input samples.
2357 // If we have an output sample then the above task is posted when the
2358 // output samples are sent to the client.
2359 // This is because we only support 1 pending output sample at any
2360 // given time due to the limitation with the Microsoft media foundation
2361 // decoder where it recycles the output Decoder surfaces.
2362 if (hr == MF_E_NOTACCEPTING) {
2363 pending_input_buffers_.push_back(sample);
2364 decoder_thread_task_runner_->PostTask(
2365 FROM_HERE,
2366 base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers,
2367 base::Unretained(this)));
2368 return;
2369 }
2370 }
2371 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to process input sample",
2372 PLATFORM_FAILURE,);
2373
2374 DoDecode();
2375
2376 State state = GetState();
2377 RETURN_AND_NOTIFY_ON_FAILURE((state == kStopped || state == kNormal ||
2378 state == kFlushing),
2379 "Failed to process output. Unexpected decoder state: " << state,
2380 ILLEGAL_STATE,);
2381
2382 LONGLONG input_buffer_id = 0;
2383 RETURN_ON_HR_FAILURE(sample->GetSampleTime(&input_buffer_id),
2384 "Failed to get input buffer id associated with sample",);
2385 // The Microsoft Media foundation decoder internally buffers up to 30 frames
2386 // before returning a decoded frame. We need to inform the client that this
2387 // input buffer is processed as it may stop sending us further input.
2388 // Note: This may break clients which expect every input buffer to be
2389 // associated with a decoded output buffer.
2390 // TODO(ananta)
2391 // Do some more investigation into whether it is possible to get the MFT
2392 // decoder to emit an output packet for every input packet.
2393 // http://code.google.com/p/chromium/issues/detail?id=108121
2394 // http://code.google.com/p/chromium/issues/detail?id=150925
2395 main_thread_task_runner_->PostTask(
2396 FROM_HERE,
2397 base::Bind(&DXVAVideoDecodeAccelerator::NotifyInputBufferRead,
2398 weak_this_factory_.GetWeakPtr(),
2399 input_buffer_id));
2400 }
2401
2402 void DXVAVideoDecodeAccelerator::HandleResolutionChanged(int width,
2403 int height) {
2404 dx11_video_format_converter_media_type_needs_init_ = true;
2405
2406 main_thread_task_runner_->PostTask(
2407 FROM_HERE,
2408 base::Bind(&DXVAVideoDecodeAccelerator::DismissStaleBuffers,
2409 weak_this_factory_.GetWeakPtr(), false));
2410
2411 main_thread_task_runner_->PostTask(
2412 FROM_HERE,
2413 base::Bind(&DXVAVideoDecodeAccelerator::RequestPictureBuffers,
2414 weak_this_factory_.GetWeakPtr(),
2415 width,
2416 height));
2417 }
2418
2419 void DXVAVideoDecodeAccelerator::DismissStaleBuffers(bool force) {
2420 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
2421 "Failed to make context current",
2422 PLATFORM_FAILURE, );
2423
2424 OutputBuffers::iterator index;
2425
2426 for (index = output_picture_buffers_.begin();
2427 index != output_picture_buffers_.end();
2428 ++index) {
2429 if (force || index->second->available()) {
2430 DVLOG(1) << "Dismissing picture id: " << index->second->id();
2431 client_->DismissPictureBuffer(index->second->id());
2432 } else {
2433 // Move to |stale_output_picture_buffers_| for deferred deletion.
2434 stale_output_picture_buffers_.insert(
2435 std::make_pair(index->first, index->second));
2436 }
2437 }
2438
2439 output_picture_buffers_.clear();
2440 }
2441
2442 void DXVAVideoDecodeAccelerator::DeferredDismissStaleBuffer(
2443 int32_t picture_buffer_id) {
2444 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
2445 "Failed to make context current",
2446 PLATFORM_FAILURE, );
2447
2448 OutputBuffers::iterator it = stale_output_picture_buffers_.find(
2449 picture_buffer_id);
2450 DCHECK(it != stale_output_picture_buffers_.end());
2451 DVLOG(1) << "Dismissing picture id: " << it->second->id();
2452 client_->DismissPictureBuffer(it->second->id());
2453 stale_output_picture_buffers_.erase(it);
2454 }
2455
2456 DXVAVideoDecodeAccelerator::State
2457 DXVAVideoDecodeAccelerator::GetState() {
2458 static_assert(sizeof(State) == sizeof(long), "mismatched type sizes");
2459 State state = static_cast<State>(
2460 InterlockedAdd(reinterpret_cast<volatile long*>(&state_), 0));
2461 return state;
2462 }
2463
2464 void DXVAVideoDecodeAccelerator::SetState(State new_state) {
2465 if (!main_thread_task_runner_->BelongsToCurrentThread()) {
2466 main_thread_task_runner_->PostTask(
2467 FROM_HERE,
2468 base::Bind(&DXVAVideoDecodeAccelerator::SetState,
2469 weak_this_factory_.GetWeakPtr(),
2470 new_state));
2471 return;
2472 }
2473
2474 static_assert(sizeof(State) == sizeof(long), "mismatched type sizes");
2475 ::InterlockedExchange(reinterpret_cast<volatile long*>(&state_),
2476 new_state);
2477 DCHECK_EQ(state_, new_state);
2478 }
2479
2480 void DXVAVideoDecodeAccelerator::StartDecoderThread() {
2481 decoder_thread_.init_com_with_mta(false);
2482 decoder_thread_.Start();
2483 decoder_thread_task_runner_ = decoder_thread_.task_runner();
2484 }
2485
2486 bool DXVAVideoDecodeAccelerator::OutputSamplesPresent() {
2487 base::AutoLock lock(decoder_lock_);
2488 return !pending_output_samples_.empty();
2489 }
2490
2491 void DXVAVideoDecodeAccelerator::CopySurface(IDirect3DSurface9* src_surface,
2492 IDirect3DSurface9* dest_surface,
2493 int picture_buffer_id,
2494 int input_buffer_id) {
2495 if (!decoder_thread_task_runner_->BelongsToCurrentThread()) {
2496 decoder_thread_task_runner_->PostTask(
2497 FROM_HERE,
2498 base::Bind(&DXVAVideoDecodeAccelerator::CopySurface,
2499 base::Unretained(this),
2500 src_surface,
2501 dest_surface,
2502 picture_buffer_id,
2503 input_buffer_id));
2504 return;
2505 }
2506
2507 HRESULT hr = d3d9_device_ex_->StretchRect(src_surface, NULL, dest_surface,
2508 NULL, D3DTEXF_NONE);
2509 RETURN_ON_HR_FAILURE(hr, "Colorspace conversion via StretchRect failed",);
2510
2511 // Ideally, this should be done immediately before the draw call that uses
2512 // the texture. Flush it once here though.
2513 hr = query_->Issue(D3DISSUE_END);
2514 RETURN_ON_HR_FAILURE(hr, "Failed to issue END",);
2515
2516 // If we are sharing the ANGLE device we don't need to wait for the Flush to
2517 // complete.
2518 if (using_angle_device_) {
2519 main_thread_task_runner_->PostTask(
2520 FROM_HERE,
2521 base::Bind(&DXVAVideoDecodeAccelerator::CopySurfaceComplete,
2522 weak_this_factory_.GetWeakPtr(),
2523 src_surface,
2524 dest_surface,
2525 picture_buffer_id,
2526 input_buffer_id));
2527 return;
2528 }
2529
2530 // Flush the decoder device to ensure that the decoded frame is copied to the
2531 // target surface.
2532 decoder_thread_task_runner_->PostDelayedTask(
2533 FROM_HERE,
2534 base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder,
2535 base::Unretained(this), 0, src_surface, dest_surface,
2536 picture_buffer_id, input_buffer_id),
2537 base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs));
2538 }
2539
2540 void DXVAVideoDecodeAccelerator::CopySurfaceComplete(
2541 IDirect3DSurface9* src_surface,
2542 IDirect3DSurface9* dest_surface,
2543 int picture_buffer_id,
2544 int input_buffer_id) {
2545 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
2546
2547 // The output buffers may have changed in the following scenarios:-
2548 // 1. A resolution change.
2549 // 2. Decoder instance was destroyed.
2550 // Ignore copy surface notifications for such buffers.
2551 // copy surface notifications for such buffers.
2552 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id);
2553 if (it == output_picture_buffers_.end())
2554 return;
2555
2556 // If the picture buffer is marked as available it probably means that there
2557 // was a Reset operation which dropped the output frame.
2558 DXVAPictureBuffer* picture_buffer = it->second.get();
2559 if (picture_buffer->available())
2560 return;
2561
2562 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(),
2563 "Failed to make context current",
2564 PLATFORM_FAILURE, );
2565
2566 DCHECK(!output_picture_buffers_.empty());
2567
2568 bool result = picture_buffer->CopySurfaceComplete(src_surface, dest_surface);
2569 RETURN_AND_NOTIFY_ON_FAILURE(result, "Failed to complete copying surface",
2570 PLATFORM_FAILURE, );
2571
2572 NotifyPictureReady(picture_buffer->id(), input_buffer_id);
2573
2574 {
2575 base::AutoLock lock(decoder_lock_);
2576 if (!pending_output_samples_.empty())
2577 pending_output_samples_.pop_front();
2578 }
2579
2580 if (pending_flush_) {
2581 decoder_thread_task_runner_->PostTask(
2582 FROM_HERE,
2583 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal,
2584 base::Unretained(this)));
2585 return;
2586 }
2587 decoder_thread_task_runner_->PostTask(
2588 FROM_HERE,
2589 base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers,
2590 base::Unretained(this)));
2591 }
2592
2593 void DXVAVideoDecodeAccelerator::CopyTexture(
2594 ID3D11Texture2D* src_texture,
2595 ID3D11Texture2D* dest_texture,
2596 base::win::ScopedComPtr<IDXGIKeyedMutex> dest_keyed_mutex,
2597 uint64_t keyed_mutex_value,
2598 IMFSample* video_frame,
2599 int picture_buffer_id,
2600 int input_buffer_id) {
2601 HRESULT hr = E_FAIL;
2602
2603 DCHECK(use_dx11_);
2604
2605 if (!decoder_thread_task_runner_->BelongsToCurrentThread()) {
2606 // The media foundation H.264 decoder outputs YUV12 textures which we
2607 // cannot copy into ANGLE as they expect ARGB textures. In D3D land
2608 // the StretchRect API in the IDirect3DDevice9Ex interface did the color
2609 // space conversion for us. Sadly in DX11 land the API does not provide
2610 // a straightforward way to do this.
2611 // We use the video processor MFT.
2612 // https://msdn.microsoft.com/en-us/library/hh162913(v=vs.85).aspx
2613 // This object implements a media foundation transform (IMFTransform)
2614 // which follows the same contract as the decoder. The color space
2615 // conversion as per msdn is done in the GPU.
2616
2617 D3D11_TEXTURE2D_DESC source_desc;
2618 src_texture->GetDesc(&source_desc);
2619
2620 // Set up the input and output types for the video processor MFT.
2621 if (!InitializeDX11VideoFormatConverterMediaType(source_desc.Width,
2622 source_desc.Height)) {
2623 RETURN_AND_NOTIFY_ON_FAILURE(
2624 false, "Failed to initialize media types for convesion.",
2625 PLATFORM_FAILURE,);
2626 }
2627
2628 // The input to the video processor is the output sample.
2629 base::win::ScopedComPtr<IMFSample> input_sample_for_conversion;
2630 {
2631 base::AutoLock lock(decoder_lock_);
2632 PendingSampleInfo& sample_info = pending_output_samples_.front();
2633 input_sample_for_conversion = sample_info.output_sample;
2634 }
2635
2636 decoder_thread_task_runner_->PostTask(
2637 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::CopyTexture,
2638 base::Unretained(this), src_texture, dest_texture,
2639 dest_keyed_mutex, keyed_mutex_value,
2640 input_sample_for_conversion.Detach(),
2641 picture_buffer_id, input_buffer_id));
2642 return;
2643 }
2644
2645 DCHECK(video_frame);
2646
2647 base::win::ScopedComPtr<IMFSample> input_sample;
2648 input_sample.Attach(video_frame);
2649
2650 DCHECK(video_format_converter_mft_.get());
2651
2652 if (dest_keyed_mutex) {
2653 HRESULT hr =
2654 dest_keyed_mutex->AcquireSync(keyed_mutex_value, kAcquireSyncWaitMs);
2655 RETURN_AND_NOTIFY_ON_FAILURE(
2656 hr == S_OK, "D3D11 failed to acquire keyed mutex for texture.",
2657 PLATFORM_FAILURE, );
2658 }
2659 // The video processor MFT requires output samples to be allocated by the
2660 // caller. We create a sample with a buffer backed with the ID3D11Texture2D
2661 // interface exposed by ANGLE. This works nicely as this ensures that the
2662 // video processor coverts the color space of the output frame and copies
2663 // the result into the ANGLE texture.
2664 base::win::ScopedComPtr<IMFSample> output_sample;
2665 hr = MFCreateSample(output_sample.Receive());
2666 if (FAILED(hr)) {
2667 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
2668 "Failed to create output sample.", PLATFORM_FAILURE,);
2669 }
2670
2671 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer;
2672 hr = MFCreateDXGISurfaceBuffer(
2673 __uuidof(ID3D11Texture2D), dest_texture, 0, FALSE,
2674 output_buffer.Receive());
2675 if (FAILED(hr)) {
2676 base::debug::Alias(&hr);
2677 // TODO(ananta)
2678 // Remove this CHECK when the change to use DX11 for H/W decoding
2679 // stablizes.
2680 CHECK(false);
2681 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
2682 "Failed to create output sample.", PLATFORM_FAILURE,);
2683 }
2684
2685 output_sample->AddBuffer(output_buffer.get());
2686
2687 hr = video_format_converter_mft_->ProcessInput(0, video_frame, 0);
2688 if (FAILED(hr)) {
2689 DCHECK(false);
2690 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
2691 "Failed to convert output sample format.", PLATFORM_FAILURE,);
2692 }
2693
2694 DWORD status = 0;
2695 MFT_OUTPUT_DATA_BUFFER format_converter_output = {};
2696 format_converter_output.pSample = output_sample.get();
2697 hr = video_format_converter_mft_->ProcessOutput(
2698 0, // No flags
2699 1, // # of out streams to pull from
2700 &format_converter_output,
2701 &status);
2702
2703 if (FAILED(hr)) {
2704 base::debug::Alias(&hr);
2705 // TODO(ananta)
2706 // Remove this CHECK when the change to use DX11 for H/W decoding
2707 // stablizes.
2708 CHECK(false);
2709 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
2710 "Failed to convert output sample format.", PLATFORM_FAILURE,);
2711 }
2712
2713 if (dest_keyed_mutex) {
2714 HRESULT hr = dest_keyed_mutex->ReleaseSync(keyed_mutex_value + 1);
2715 RETURN_AND_NOTIFY_ON_FAILURE(hr == S_OK, "Failed to release keyed mutex.",
2716 PLATFORM_FAILURE, );
2717
2718 main_thread_task_runner_->PostTask(
2719 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::CopySurfaceComplete,
2720 weak_this_factory_.GetWeakPtr(), nullptr, nullptr,
2721 picture_buffer_id, input_buffer_id));
2722 } else {
2723 d3d11_device_context_->Flush();
2724 d3d11_device_context_->End(d3d11_query_.get());
2725
2726 decoder_thread_task_runner_->PostDelayedTask(
2727 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder,
2728 base::Unretained(this), 0,
2729 reinterpret_cast<IDirect3DSurface9*>(NULL),
2730 reinterpret_cast<IDirect3DSurface9*>(NULL),
2731 picture_buffer_id, input_buffer_id),
2732 base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs));
2733 }
2734 }
2735
2736 void DXVAVideoDecodeAccelerator::FlushDecoder(
2737 int iterations,
2738 IDirect3DSurface9* src_surface,
2739 IDirect3DSurface9* dest_surface,
2740 int picture_buffer_id,
2741 int input_buffer_id) {
2742 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread());
2743
2744 // The DXVA decoder has its own device which it uses for decoding. ANGLE
2745 // has its own device which we don't have access to.
2746 // The above code attempts to copy the decoded picture into a surface
2747 // which is owned by ANGLE. As there are multiple devices involved in
2748 // this, the StretchRect call above is not synchronous.
2749 // We attempt to flush the batched operations to ensure that the picture is
2750 // copied to the surface owned by ANGLE.
2751 // We need to do this in a loop and call flush multiple times.
2752 // We have seen the GetData call for flushing the command buffer fail to
2753 // return success occassionally on multi core machines, leading to an
2754 // infinite loop.
2755 // Workaround is to have an upper limit of 4 on the number of iterations to
2756 // wait for the Flush to finish.
2757
2758 HRESULT hr = E_FAIL;
2759 if (use_dx11_) {
2760 BOOL query_data = 0;
2761 hr = d3d11_device_context_->GetData(d3d11_query_.get(), &query_data,
2762 sizeof(BOOL), 0);
2763 if (FAILED(hr)) {
2764 base::debug::Alias(&hr);
2765 // TODO(ananta)
2766 // Remove this CHECK when the change to use DX11 for H/W decoding
2767 // stablizes.
2768 CHECK(false);
2769 }
2770 } else {
2771 hr = query_->GetData(NULL, 0, D3DGETDATA_FLUSH);
2772 }
2773
2774 if ((hr == S_FALSE) && (++iterations < kMaxIterationsForD3DFlush)) {
2775 decoder_thread_task_runner_->PostDelayedTask(
2776 FROM_HERE,
2777 base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder,
2778 base::Unretained(this), iterations, src_surface,
2779 dest_surface, picture_buffer_id, input_buffer_id),
2780 base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs));
2781 return;
2782 }
2783
2784 main_thread_task_runner_->PostTask(
2785 FROM_HERE,
2786 base::Bind(&DXVAVideoDecodeAccelerator::CopySurfaceComplete,
2787 weak_this_factory_.GetWeakPtr(),
2788 src_surface,
2789 dest_surface,
2790 picture_buffer_id,
2791 input_buffer_id));
2792 }
2793
2794 bool DXVAVideoDecodeAccelerator::InitializeDX11VideoFormatConverterMediaType(
2795 int width, int height) {
2796 if (!dx11_video_format_converter_media_type_needs_init_)
2797 return true;
2798
2799 CHECK(video_format_converter_mft_.get());
2800
2801 HRESULT hr = video_format_converter_mft_->ProcessMessage(
2802 MFT_MESSAGE_SET_D3D_MANAGER,
2803 reinterpret_cast<ULONG_PTR>(
2804 d3d11_device_manager_.get()));
2805
2806 if (FAILED(hr)) {
2807 base::debug::Alias(&hr);
2808 // TODO(ananta)
2809 // Remove this CHECK when the change to use DX11 for H/W decoding
2810 // stablizes.
2811 CHECK(false);
2812 }
2813 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr,
2814 "Failed to initialize video format converter", PLATFORM_FAILURE, false);
2815
2816 video_format_converter_mft_->ProcessMessage(
2817 MFT_MESSAGE_NOTIFY_END_STREAMING, 0);
2818
2819 base::win::ScopedComPtr<IMFMediaType> media_type;
2820 hr = MFCreateMediaType(media_type.Receive());
2821 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "MFCreateMediaType failed",
2822 PLATFORM_FAILURE, false);
2823
2824 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
2825 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set major input type",
2826 PLATFORM_FAILURE, false);
2827
2828 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_NV12);
2829 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set input sub type",
2830 PLATFORM_FAILURE, false);
2831
2832 hr = MFSetAttributeSize(media_type.get(), MF_MT_FRAME_SIZE, width, height);
2833 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set media type attributes",
2834 PLATFORM_FAILURE, false);
2835
2836 hr = video_format_converter_mft_->SetInputType(0, media_type.get(), 0);
2837 if (FAILED(hr)) {
2838 base::debug::Alias(&hr);
2839 // TODO(ananta)
2840 // Remove this CHECK when the change to use DX11 for H/W decoding
2841 // stablizes.
2842 CHECK(false);
2843 }
2844 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set converter input type",
2845 PLATFORM_FAILURE, false);
2846
2847 // It appears that we fail to set MFVideoFormat_ARGB32 as the output media
2848 // type in certain configurations. Try to fallback to MFVideoFormat_RGB32
2849 // in such cases. If both fail, then bail.
2850 bool media_type_set =
2851 SetTransformOutputType(video_format_converter_mft_.get(),
2852 MFVideoFormat_ARGB32,
2853 width,
2854 height);
2855 if (!media_type_set) {
2856 media_type_set =
2857 SetTransformOutputType(video_format_converter_mft_.get(),
2858 MFVideoFormat_RGB32,
2859 width,
2860 height);
2861 }
2862
2863 if (!media_type_set) {
2864 // Remove this once this stabilizes in the field.
2865 CHECK(false);
2866 LOG(ERROR) << "Failed to find a matching RGB output type in the converter";
2867 return false;
2868 }
2869
2870 dx11_video_format_converter_media_type_needs_init_ = false;
2871 return true;
2872 }
2873
2874 bool DXVAVideoDecodeAccelerator::GetVideoFrameDimensions(
2875 IMFSample* sample,
2876 int* width,
2877 int* height) {
2878 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer;
2879 HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive());
2880 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from output sample", false);
2881
2882 if (use_dx11_) {
2883 base::win::ScopedComPtr<IMFDXGIBuffer> dxgi_buffer;
2884 base::win::ScopedComPtr<ID3D11Texture2D> d3d11_texture;
2885 hr = dxgi_buffer.QueryFrom(output_buffer.get());
2886 RETURN_ON_HR_FAILURE(hr, "Failed to get DXGIBuffer from output sample",
2887 false);
2888 hr = dxgi_buffer->GetResource(
2889 __uuidof(ID3D11Texture2D),
2890 reinterpret_cast<void**>(d3d11_texture.Receive()));
2891 RETURN_ON_HR_FAILURE(hr, "Failed to get D3D11Texture from output buffer",
2892 false);
2893 D3D11_TEXTURE2D_DESC d3d11_texture_desc;
2894 d3d11_texture->GetDesc(&d3d11_texture_desc);
2895 *width = d3d11_texture_desc.Width;
2896 *height = d3d11_texture_desc.Height;
2897 } else {
2898 base::win::ScopedComPtr<IDirect3DSurface9> surface;
2899 hr = MFGetService(output_buffer.get(), MR_BUFFER_SERVICE,
2900 IID_PPV_ARGS(surface.Receive()));
2901 RETURN_ON_HR_FAILURE(hr, "Failed to get D3D surface from output sample",
2902 false);
2903 D3DSURFACE_DESC surface_desc;
2904 hr = surface->GetDesc(&surface_desc);
2905 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false);
2906 *width = surface_desc.Width;
2907 *height = surface_desc.Height;
2908 }
2909 return true;
2910 }
2911
2912 bool DXVAVideoDecodeAccelerator::SetTransformOutputType(
2913 IMFTransform* transform,
2914 const GUID& output_type,
2915 int width,
2916 int height) {
2917 HRESULT hr = E_FAIL;
2918 base::win::ScopedComPtr<IMFMediaType> media_type;
2919
2920 for (uint32_t i = 0;
2921 SUCCEEDED(transform->GetOutputAvailableType(
2922 0, i, media_type.Receive()));
2923 ++i) {
2924 GUID out_subtype = {0};
2925 hr = media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype);
2926 RETURN_ON_HR_FAILURE(hr, "Failed to get output major type", false);
2927
2928 if (out_subtype == output_type) {
2929 if (width && height) {
2930 hr = MFSetAttributeSize(media_type.get(), MF_MT_FRAME_SIZE, width,
2931 height);
2932 RETURN_ON_HR_FAILURE(hr, "Failed to set media type attributes", false);
2933 }
2934 hr = transform->SetOutputType(0, media_type.get(), 0); // No flags
2935 RETURN_ON_HR_FAILURE(hr, "Failed to set output type", false);
2936 return true;
2937 }
2938 media_type.Release();
2939 }
2940 return false;
2941 }
2942
2943 HRESULT DXVAVideoDecodeAccelerator::CheckConfigChanged(
2944 IMFSample* sample, bool* config_changed) {
2945 if (codec_ != media::kCodecH264)
2946 return S_FALSE;
2947
2948 base::win::ScopedComPtr<IMFMediaBuffer> buffer;
2949 HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive());
2950 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from input sample", hr);
2951
2952 MediaBufferScopedPointer scoped_media_buffer(buffer.get());
2953
2954 if (!config_change_detector_->DetectConfig(
2955 scoped_media_buffer.get(),
2956 scoped_media_buffer.current_length())) {
2957 RETURN_ON_HR_FAILURE(E_FAIL, "Failed to detect H.264 stream config",
2958 E_FAIL);
2959 }
2960 *config_changed = config_change_detector_->config_changed();
2961 return S_OK;
2962 }
2963
2964 void DXVAVideoDecodeAccelerator::ConfigChanged(
2965 const Config& config) {
2966 DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
2967
2968 SetState(kConfigChange);
2969 DismissStaleBuffers(true);
2970 Invalidate();
2971 Initialize(config_, client_);
2972 decoder_thread_task_runner_->PostTask(
2973 FROM_HERE,
2974 base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers,
2975 base::Unretained(this)));
2976 }
2977
2978 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698