| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/common/gpu/media/dxva_video_decode_accelerator_win.h" | |
| 6 | |
| 7 #include <memory> | |
| 8 | |
| 9 #if !defined(OS_WIN) | |
| 10 #error This file should only be built on Windows. | |
| 11 #endif // !defined(OS_WIN) | |
| 12 | |
| 13 #include <codecapi.h> | |
| 14 #include <dxgi1_2.h> | |
| 15 #include <ks.h> | |
| 16 #include <mfapi.h> | |
| 17 #include <mferror.h> | |
| 18 #include <ntverp.h> | |
| 19 #include <stddef.h> | |
| 20 #include <string.h> | |
| 21 #include <wmcodecdsp.h> | |
| 22 | |
| 23 #include "base/base_paths_win.h" | |
| 24 #include "base/bind.h" | |
| 25 #include "base/callback.h" | |
| 26 #include "base/debug/alias.h" | |
| 27 #include "base/file_version_info.h" | |
| 28 #include "base/files/file_path.h" | |
| 29 #include "base/logging.h" | |
| 30 #include "base/macros.h" | |
| 31 #include "base/memory/shared_memory.h" | |
| 32 #include "base/message_loop/message_loop.h" | |
| 33 #include "base/path_service.h" | |
| 34 #include "base/trace_event/trace_event.h" | |
| 35 #include "base/win/windows_version.h" | |
| 36 #include "build/build_config.h" | |
| 37 #include "media/base/win/mf_initializer.h" | |
| 38 #include "media/video/video_decode_accelerator.h" | |
| 39 #include "third_party/angle/include/EGL/egl.h" | |
| 40 #include "third_party/angle/include/EGL/eglext.h" | |
| 41 #include "ui/gl/gl_bindings.h" | |
| 42 #include "ui/gl/gl_context.h" | |
| 43 #include "ui/gl/gl_fence.h" | |
| 44 #include "ui/gl/gl_surface_egl.h" | |
| 45 | |
| 46 namespace { | |
| 47 | |
| 48 // Path is appended on to the PROGRAM_FILES base path. | |
| 49 const wchar_t kVPXDecoderDLLPath[] = L"Intel\\Media SDK\\"; | |
| 50 | |
| 51 const wchar_t kVP8DecoderDLLName[] = | |
| 52 #if defined(ARCH_CPU_X86) | |
| 53 L"mfx_mft_vp8vd_32.dll"; | |
| 54 #elif defined(ARCH_CPU_X86_64) | |
| 55 L"mfx_mft_vp8vd_64.dll"; | |
| 56 #else | |
| 57 #error Unsupported Windows CPU Architecture | |
| 58 #endif | |
| 59 | |
| 60 const wchar_t kVP9DecoderDLLName[] = | |
| 61 #if defined(ARCH_CPU_X86) | |
| 62 L"mfx_mft_vp9vd_32.dll"; | |
| 63 #elif defined(ARCH_CPU_X86_64) | |
| 64 L"mfx_mft_vp9vd_64.dll"; | |
| 65 #else | |
| 66 #error Unsupported Windows CPU Architecture | |
| 67 #endif | |
| 68 | |
| 69 const CLSID CLSID_WebmMfVp8Dec = { | |
| 70 0x451e3cb7, | |
| 71 0x2622, | |
| 72 0x4ba5, | |
| 73 { 0x8e, 0x1d, 0x44, 0xb3, 0xc4, 0x1d, 0x09, 0x24 } | |
| 74 }; | |
| 75 | |
| 76 const CLSID CLSID_WebmMfVp9Dec = { | |
| 77 0x07ab4bd2, | |
| 78 0x1979, | |
| 79 0x4fcd, | |
| 80 { 0xa6, 0x97, 0xdf, 0x9a, 0xd1, 0x5b, 0x34, 0xfe } | |
| 81 }; | |
| 82 | |
| 83 const CLSID MEDIASUBTYPE_VP80 = { | |
| 84 0x30385056, | |
| 85 0x0000, | |
| 86 0x0010, | |
| 87 { 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 } | |
| 88 }; | |
| 89 | |
| 90 const CLSID MEDIASUBTYPE_VP90 = { | |
| 91 0x30395056, | |
| 92 0x0000, | |
| 93 0x0010, | |
| 94 { 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 } | |
| 95 }; | |
| 96 | |
| 97 // The CLSID of the video processor media foundation transform which we use for | |
| 98 // texture color conversion in DX11. | |
| 99 // Defined in mfidl.h in the Windows 10 SDK. ntverp.h provides VER_PRODUCTBUILD | |
| 100 // to detect which SDK we are compiling with. | |
| 101 #if VER_PRODUCTBUILD < 10011 // VER_PRODUCTBUILD for 10.0.10158.0 SDK. | |
| 102 DEFINE_GUID(CLSID_VideoProcessorMFT, | |
| 103 0x88753b26, 0x5b24, 0x49bd, 0xb2, 0xe7, 0xc, 0x44, 0x5c, 0x78, | |
| 104 0xc9, 0x82); | |
| 105 #endif | |
| 106 | |
| 107 // MF_XVP_PLAYBACK_MODE | |
| 108 // Data type: UINT32 (treat as BOOL) | |
| 109 // If this attribute is TRUE, the video processor will run in playback mode | |
| 110 // where it allows callers to allocate output samples and allows last frame | |
| 111 // regeneration (repaint). | |
| 112 DEFINE_GUID(MF_XVP_PLAYBACK_MODE, 0x3c5d293f, 0xad67, 0x4e29, 0xaf, 0x12, | |
| 113 0xcf, 0x3e, 0x23, 0x8a, 0xcc, 0xe9); | |
| 114 | |
| 115 // Defines the GUID for the Intel H264 DXVA device. | |
| 116 static const GUID DXVA2_Intel_ModeH264_E = { | |
| 117 0x604F8E68, 0x4951, 0x4c54,{ 0x88, 0xFE, 0xAB, 0xD2, 0x5C, 0x15, 0xB3, 0xD6} | |
| 118 }; | |
| 119 | |
| 120 // R600, R700, Evergreen and Cayman AMD cards. These support DXVA via UVD3 | |
| 121 // or earlier, and don't handle resolutions higher than 1920 x 1088 well. | |
| 122 static const DWORD g_AMDUVD3GPUList[] = { | |
| 123 0x9400, 0x9401, 0x9402, 0x9403, 0x9405, 0x940a, 0x940b, 0x940f, 0x94c0, | |
| 124 0x94c1, 0x94c3, 0x94c4, 0x94c5, 0x94c6, 0x94c7, 0x94c8, 0x94c9, 0x94cb, | |
| 125 0x94cc, 0x94cd, 0x9580, 0x9581, 0x9583, 0x9586, 0x9587, 0x9588, 0x9589, | |
| 126 0x958a, 0x958b, 0x958c, 0x958d, 0x958e, 0x958f, 0x9500, 0x9501, 0x9504, | |
| 127 0x9505, 0x9506, 0x9507, 0x9508, 0x9509, 0x950f, 0x9511, 0x9515, 0x9517, | |
| 128 0x9519, 0x95c0, 0x95c2, 0x95c4, 0x95c5, 0x95c6, 0x95c7, 0x95c9, 0x95cc, | |
| 129 0x95cd, 0x95ce, 0x95cf, 0x9590, 0x9591, 0x9593, 0x9595, 0x9596, 0x9597, | |
| 130 0x9598, 0x9599, 0x959b, 0x9610, 0x9611, 0x9612, 0x9613, 0x9614, 0x9615, | |
| 131 0x9616, 0x9710, 0x9711, 0x9712, 0x9713, 0x9714, 0x9715, 0x9440, 0x9441, | |
| 132 0x9442, 0x9443, 0x9444, 0x9446, 0x944a, 0x944b, 0x944c, 0x944e, 0x9450, | |
| 133 0x9452, 0x9456, 0x945a, 0x945b, 0x945e, 0x9460, 0x9462, 0x946a, 0x946b, | |
| 134 0x947a, 0x947b, 0x9480, 0x9487, 0x9488, 0x9489, 0x948a, 0x948f, 0x9490, | |
| 135 0x9491, 0x9495, 0x9498, 0x949c, 0x949e, 0x949f, 0x9540, 0x9541, 0x9542, | |
| 136 0x954e, 0x954f, 0x9552, 0x9553, 0x9555, 0x9557, 0x955f, 0x94a0, 0x94a1, | |
| 137 0x94a3, 0x94b1, 0x94b3, 0x94b4, 0x94b5, 0x94b9, 0x68e0, 0x68e1, 0x68e4, | |
| 138 0x68e5, 0x68e8, 0x68e9, 0x68f1, 0x68f2, 0x68f8, 0x68f9, 0x68fa, 0x68fe, | |
| 139 0x68c0, 0x68c1, 0x68c7, 0x68c8, 0x68c9, 0x68d8, 0x68d9, 0x68da, 0x68de, | |
| 140 0x68a0, 0x68a1, 0x68a8, 0x68a9, 0x68b0, 0x68b8, 0x68b9, 0x68ba, 0x68be, | |
| 141 0x68bf, 0x6880, 0x6888, 0x6889, 0x688a, 0x688c, 0x688d, 0x6898, 0x6899, | |
| 142 0x689b, 0x689e, 0x689c, 0x689d, 0x9802, 0x9803, 0x9804, 0x9805, 0x9806, | |
| 143 0x9807, 0x9808, 0x9809, 0x980a, 0x9640, 0x9641, 0x9647, 0x9648, 0x964a, | |
| 144 0x964b, 0x964c, 0x964e, 0x964f, 0x9642, 0x9643, 0x9644, 0x9645, 0x9649, | |
| 145 0x6720, 0x6721, 0x6722, 0x6723, 0x6724, 0x6725, 0x6726, 0x6727, 0x6728, | |
| 146 0x6729, 0x6738, 0x6739, 0x673e, 0x6740, 0x6741, 0x6742, 0x6743, 0x6744, | |
| 147 0x6745, 0x6746, 0x6747, 0x6748, 0x6749, 0x674a, 0x6750, 0x6751, 0x6758, | |
| 148 0x6759, 0x675b, 0x675d, 0x675f, 0x6840, 0x6841, 0x6842, 0x6843, 0x6849, | |
| 149 0x6850, 0x6858, 0x6859, 0x6760, 0x6761, 0x6762, 0x6763, 0x6764, 0x6765, | |
| 150 0x6766, 0x6767, 0x6768, 0x6770, 0x6771, 0x6772, 0x6778, 0x6779, 0x677b, | |
| 151 0x6700, 0x6701, 0x6702, 0x6703, 0x6704, 0x6705, 0x6706, 0x6707, 0x6708, | |
| 152 0x6709, 0x6718, 0x6719, 0x671c, 0x671d, 0x671f, 0x683D, 0x9900, 0x9901, | |
| 153 0x9903, 0x9904, 0x9905, 0x9906, 0x9907, 0x9908, 0x9909, 0x990a, 0x990b, | |
| 154 0x990c, 0x990d, 0x990e, 0x990f, 0x9910, 0x9913, 0x9917, 0x9918, 0x9919, | |
| 155 0x9990, 0x9991, 0x9992, 0x9993, 0x9994, 0x9995, 0x9996, 0x9997, 0x9998, | |
| 156 0x9999, 0x999a, 0x999b, 0x999c, 0x999d, 0x99a0, 0x99a2, 0x99a4, | |
| 157 }; | |
| 158 | |
| 159 // Legacy Intel GPUs (Second generation) which have trouble with resolutions | |
| 160 // higher than 1920 x 1088 | |
| 161 static const DWORD g_IntelLegacyGPUList[] = { | |
| 162 0x102, 0x106, 0x116, 0x126, | |
| 163 }; | |
| 164 | |
| 165 // Provides scoped access to the underlying buffer in an IMFMediaBuffer | |
| 166 // instance. | |
| 167 class MediaBufferScopedPointer { | |
| 168 public: | |
| 169 MediaBufferScopedPointer(IMFMediaBuffer* media_buffer) | |
| 170 : media_buffer_(media_buffer), | |
| 171 buffer_(nullptr), | |
| 172 max_length_(0), | |
| 173 current_length_(0) { | |
| 174 HRESULT hr = media_buffer_->Lock(&buffer_, &max_length_, ¤t_length_); | |
| 175 CHECK(SUCCEEDED(hr)); | |
| 176 } | |
| 177 | |
| 178 ~MediaBufferScopedPointer() { | |
| 179 HRESULT hr = media_buffer_->Unlock(); | |
| 180 CHECK(SUCCEEDED(hr)); | |
| 181 } | |
| 182 | |
| 183 uint8_t* get() { | |
| 184 return buffer_; | |
| 185 } | |
| 186 | |
| 187 DWORD current_length() const { | |
| 188 return current_length_; | |
| 189 } | |
| 190 | |
| 191 private: | |
| 192 base::win::ScopedComPtr<IMFMediaBuffer> media_buffer_; | |
| 193 uint8_t* buffer_; | |
| 194 DWORD max_length_; | |
| 195 DWORD current_length_; | |
| 196 | |
| 197 DISALLOW_COPY_AND_ASSIGN(MediaBufferScopedPointer); | |
| 198 }; | |
| 199 | |
| 200 } // namespace | |
| 201 | |
| 202 namespace content { | |
| 203 | |
| 204 static const media::VideoCodecProfile kSupportedProfiles[] = { | |
| 205 media::H264PROFILE_BASELINE, | |
| 206 media::H264PROFILE_MAIN, | |
| 207 media::H264PROFILE_HIGH, | |
| 208 media::VP8PROFILE_ANY, | |
| 209 media::VP9PROFILE_PROFILE0, | |
| 210 media::VP9PROFILE_PROFILE1, | |
| 211 media::VP9PROFILE_PROFILE2, | |
| 212 media::VP9PROFILE_PROFILE3 | |
| 213 }; | |
| 214 | |
| 215 CreateDXGIDeviceManager DXVAVideoDecodeAccelerator::create_dxgi_device_manager_ | |
| 216 = NULL; | |
| 217 | |
| 218 #define RETURN_ON_FAILURE(result, log, ret) \ | |
| 219 do { \ | |
| 220 if (!(result)) { \ | |
| 221 DLOG(ERROR) << log; \ | |
| 222 return ret; \ | |
| 223 } \ | |
| 224 } while (0) | |
| 225 | |
| 226 #define RETURN_ON_HR_FAILURE(result, log, ret) \ | |
| 227 RETURN_ON_FAILURE(SUCCEEDED(result), \ | |
| 228 log << ", HRESULT: 0x" << std::hex << result, \ | |
| 229 ret); | |
| 230 | |
| 231 #define RETURN_AND_NOTIFY_ON_FAILURE(result, log, error_code, ret) \ | |
| 232 do { \ | |
| 233 if (!(result)) { \ | |
| 234 DVLOG(1) << log; \ | |
| 235 StopOnError(error_code); \ | |
| 236 return ret; \ | |
| 237 } \ | |
| 238 } while (0) | |
| 239 | |
| 240 #define RETURN_AND_NOTIFY_ON_HR_FAILURE(result, log, error_code, ret) \ | |
| 241 RETURN_AND_NOTIFY_ON_FAILURE(SUCCEEDED(result), \ | |
| 242 log << ", HRESULT: 0x" << std::hex << result, \ | |
| 243 error_code, ret); | |
| 244 | |
| 245 enum { | |
| 246 // Maximum number of iterations we allow before aborting the attempt to flush | |
| 247 // the batched queries to the driver and allow torn/corrupt frames to be | |
| 248 // rendered. | |
| 249 kFlushDecoderSurfaceTimeoutMs = 1, | |
| 250 // Maximum iterations where we try to flush the d3d device. | |
| 251 kMaxIterationsForD3DFlush = 4, | |
| 252 // Maximum iterations where we try to flush the ANGLE device before reusing | |
| 253 // the texture. | |
| 254 kMaxIterationsForANGLEReuseFlush = 16, | |
| 255 // We only request 5 picture buffers from the client which are used to hold | |
| 256 // the decoded samples. These buffers are then reused when the client tells | |
| 257 // us that it is done with the buffer. | |
| 258 kNumPictureBuffers = 5, | |
| 259 // The keyed mutex should always be released before the other thread | |
| 260 // attempts to acquire it, so AcquireSync should always return immediately. | |
| 261 kAcquireSyncWaitMs = 0, | |
| 262 }; | |
| 263 | |
| 264 static IMFSample* CreateEmptySample() { | |
| 265 base::win::ScopedComPtr<IMFSample> sample; | |
| 266 HRESULT hr = MFCreateSample(sample.Receive()); | |
| 267 RETURN_ON_HR_FAILURE(hr, "MFCreateSample failed", NULL); | |
| 268 return sample.Detach(); | |
| 269 } | |
| 270 | |
| 271 // Creates a Media Foundation sample with one buffer of length |buffer_length| | |
| 272 // on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0. | |
| 273 static IMFSample* CreateEmptySampleWithBuffer(uint32_t buffer_length, | |
| 274 int align) { | |
| 275 CHECK_GT(buffer_length, 0U); | |
| 276 | |
| 277 base::win::ScopedComPtr<IMFSample> sample; | |
| 278 sample.Attach(CreateEmptySample()); | |
| 279 | |
| 280 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
| 281 HRESULT hr = E_FAIL; | |
| 282 if (align == 0) { | |
| 283 // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer | |
| 284 // with the align argument being 0. | |
| 285 hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive()); | |
| 286 } else { | |
| 287 hr = MFCreateAlignedMemoryBuffer(buffer_length, | |
| 288 align - 1, | |
| 289 buffer.Receive()); | |
| 290 } | |
| 291 RETURN_ON_HR_FAILURE(hr, "Failed to create memory buffer for sample", NULL); | |
| 292 | |
| 293 hr = sample->AddBuffer(buffer.get()); | |
| 294 RETURN_ON_HR_FAILURE(hr, "Failed to add buffer to sample", NULL); | |
| 295 | |
| 296 buffer->SetCurrentLength(0); | |
| 297 return sample.Detach(); | |
| 298 } | |
| 299 | |
| 300 // Creates a Media Foundation sample with one buffer containing a copy of the | |
| 301 // given Annex B stream data. | |
| 302 // If duration and sample time are not known, provide 0. | |
| 303 // |min_size| specifies the minimum size of the buffer (might be required by | |
| 304 // the decoder for input). If no alignment is required, provide 0. | |
| 305 static IMFSample* CreateInputSample(const uint8_t* stream, | |
| 306 uint32_t size, | |
| 307 uint32_t min_size, | |
| 308 int alignment) { | |
| 309 CHECK(stream); | |
| 310 CHECK_GT(size, 0U); | |
| 311 base::win::ScopedComPtr<IMFSample> sample; | |
| 312 sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size), | |
| 313 alignment)); | |
| 314 RETURN_ON_FAILURE(sample.get(), "Failed to create empty sample", NULL); | |
| 315 | |
| 316 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
| 317 HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive()); | |
| 318 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample", NULL); | |
| 319 | |
| 320 DWORD max_length = 0; | |
| 321 DWORD current_length = 0; | |
| 322 uint8_t* destination = NULL; | |
| 323 hr = buffer->Lock(&destination, &max_length, ¤t_length); | |
| 324 RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL); | |
| 325 | |
| 326 CHECK_EQ(current_length, 0u); | |
| 327 CHECK_GE(max_length, size); | |
| 328 memcpy(destination, stream, size); | |
| 329 | |
| 330 hr = buffer->SetCurrentLength(size); | |
| 331 RETURN_ON_HR_FAILURE(hr, "Failed to set buffer length", NULL); | |
| 332 | |
| 333 hr = buffer->Unlock(); | |
| 334 RETURN_ON_HR_FAILURE(hr, "Failed to unlock buffer", NULL); | |
| 335 | |
| 336 return sample.Detach(); | |
| 337 } | |
| 338 | |
| 339 // Helper function to create a COM object instance from a DLL. The alternative | |
| 340 // is to use the CoCreateInstance API which requires the COM apartment to be | |
| 341 // initialized which is not the case on the GPU main thread. We want to avoid | |
| 342 // initializing COM as it may have sideeffects. | |
| 343 HRESULT CreateCOMObjectFromDll(HMODULE dll, const CLSID& clsid, const IID& iid, | |
| 344 void** object) { | |
| 345 if (!dll || !object) | |
| 346 return E_INVALIDARG; | |
| 347 | |
| 348 using GetClassObject = HRESULT (WINAPI*)( | |
| 349 const CLSID& clsid, const IID& iid, void** object); | |
| 350 | |
| 351 GetClassObject get_class_object = reinterpret_cast<GetClassObject>( | |
| 352 GetProcAddress(dll, "DllGetClassObject")); | |
| 353 RETURN_ON_FAILURE( | |
| 354 get_class_object, "Failed to get DllGetClassObject pointer", E_FAIL); | |
| 355 | |
| 356 base::win::ScopedComPtr<IClassFactory> factory; | |
| 357 HRESULT hr = get_class_object( | |
| 358 clsid, | |
| 359 __uuidof(IClassFactory), | |
| 360 factory.ReceiveVoid()); | |
| 361 RETURN_ON_HR_FAILURE(hr, "DllGetClassObject failed", hr); | |
| 362 | |
| 363 hr = factory->CreateInstance(NULL, iid, object); | |
| 364 return hr; | |
| 365 } | |
| 366 | |
| 367 // Helper function to query the ANGLE device object. The template argument T | |
| 368 // identifies the device interface being queried. IDirect3DDevice9Ex for d3d9 | |
| 369 // and ID3D11Device for dx11. | |
| 370 template<class T> | |
| 371 base::win::ScopedComPtr<T> QueryDeviceObjectFromANGLE(int object_type) { | |
| 372 base::win::ScopedComPtr<T> device_object; | |
| 373 | |
| 374 EGLDisplay egl_display = nullptr; | |
| 375 intptr_t egl_device = 0; | |
| 376 intptr_t device = 0; | |
| 377 | |
| 378 { | |
| 379 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. GetHardwareDisplay"); | |
| 380 egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay(); | |
| 381 } | |
| 382 | |
| 383 RETURN_ON_FAILURE( | |
| 384 gfx::GLSurfaceEGL::HasEGLExtension("EGL_EXT_device_query"), | |
| 385 "EGL_EXT_device_query missing", | |
| 386 device_object); | |
| 387 | |
| 388 PFNEGLQUERYDISPLAYATTRIBEXTPROC QueryDisplayAttribEXT = nullptr; | |
| 389 | |
| 390 { | |
| 391 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. eglGetProcAddress"); | |
| 392 | |
| 393 QueryDisplayAttribEXT = | |
| 394 reinterpret_cast<PFNEGLQUERYDISPLAYATTRIBEXTPROC>(eglGetProcAddress( | |
| 395 "eglQueryDisplayAttribEXT")); | |
| 396 | |
| 397 RETURN_ON_FAILURE( | |
| 398 QueryDisplayAttribEXT, | |
| 399 "Failed to get the eglQueryDisplayAttribEXT function from ANGLE", | |
| 400 device_object); | |
| 401 } | |
| 402 | |
| 403 PFNEGLQUERYDEVICEATTRIBEXTPROC QueryDeviceAttribEXT = nullptr; | |
| 404 | |
| 405 { | |
| 406 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. eglGetProcAddress"); | |
| 407 | |
| 408 QueryDeviceAttribEXT = | |
| 409 reinterpret_cast<PFNEGLQUERYDEVICEATTRIBEXTPROC>(eglGetProcAddress( | |
| 410 "eglQueryDeviceAttribEXT")); | |
| 411 | |
| 412 RETURN_ON_FAILURE( | |
| 413 QueryDeviceAttribEXT, | |
| 414 "Failed to get the eglQueryDeviceAttribEXT function from ANGLE", | |
| 415 device_object); | |
| 416 } | |
| 417 | |
| 418 { | |
| 419 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. QueryDisplayAttribEXT"); | |
| 420 | |
| 421 RETURN_ON_FAILURE( | |
| 422 QueryDisplayAttribEXT(egl_display, EGL_DEVICE_EXT, &egl_device), | |
| 423 "The eglQueryDisplayAttribEXT function failed to get the EGL device", | |
| 424 device_object); | |
| 425 } | |
| 426 | |
| 427 RETURN_ON_FAILURE( | |
| 428 egl_device, | |
| 429 "Failed to get the EGL device", | |
| 430 device_object); | |
| 431 | |
| 432 { | |
| 433 TRACE_EVENT0("gpu", "QueryDeviceObjectFromANGLE. QueryDisplayAttribEXT"); | |
| 434 | |
| 435 RETURN_ON_FAILURE( | |
| 436 QueryDeviceAttribEXT( | |
| 437 reinterpret_cast<EGLDeviceEXT>(egl_device), object_type, &device), | |
| 438 "The eglQueryDeviceAttribEXT function failed to get the device", | |
| 439 device_object); | |
| 440 | |
| 441 RETURN_ON_FAILURE(device, "Failed to get the ANGLE device", device_object); | |
| 442 } | |
| 443 | |
| 444 device_object = reinterpret_cast<T*>(device); | |
| 445 return device_object; | |
| 446 } | |
| 447 | |
| 448 H264ConfigChangeDetector::H264ConfigChangeDetector() | |
| 449 : last_sps_id_(0), | |
| 450 last_pps_id_(0), | |
| 451 config_changed_(false), | |
| 452 pending_config_changed_(false) { | |
| 453 } | |
| 454 | |
| 455 H264ConfigChangeDetector::~H264ConfigChangeDetector() { | |
| 456 } | |
| 457 | |
| 458 bool H264ConfigChangeDetector::DetectConfig(const uint8_t* stream, | |
| 459 unsigned int size) { | |
| 460 std::vector<uint8_t> sps; | |
| 461 std::vector<uint8_t> pps; | |
| 462 media::H264NALU nalu; | |
| 463 bool idr_seen = false; | |
| 464 | |
| 465 if (!parser_.get()) | |
| 466 parser_.reset(new media::H264Parser); | |
| 467 | |
| 468 parser_->SetStream(stream, size); | |
| 469 config_changed_ = false; | |
| 470 | |
| 471 while (true) { | |
| 472 media::H264Parser::Result result = parser_->AdvanceToNextNALU(&nalu); | |
| 473 | |
| 474 if (result == media::H264Parser::kEOStream) | |
| 475 break; | |
| 476 | |
| 477 if (result == media::H264Parser::kUnsupportedStream) { | |
| 478 DLOG(ERROR) << "Unsupported H.264 stream"; | |
| 479 return false; | |
| 480 } | |
| 481 | |
| 482 if (result != media::H264Parser::kOk) { | |
| 483 DLOG(ERROR) << "Failed to parse H.264 stream"; | |
| 484 return false; | |
| 485 } | |
| 486 | |
| 487 switch (nalu.nal_unit_type) { | |
| 488 case media::H264NALU::kSPS: | |
| 489 result = parser_->ParseSPS(&last_sps_id_); | |
| 490 if (result == media::H264Parser::kUnsupportedStream) { | |
| 491 DLOG(ERROR) << "Unsupported SPS"; | |
| 492 return false; | |
| 493 } | |
| 494 | |
| 495 if (result != media::H264Parser::kOk) { | |
| 496 DLOG(ERROR) << "Could not parse SPS"; | |
| 497 return false; | |
| 498 } | |
| 499 | |
| 500 sps.assign(nalu.data, nalu.data + nalu.size); | |
| 501 break; | |
| 502 | |
| 503 case media::H264NALU::kPPS: | |
| 504 result = parser_->ParsePPS(&last_pps_id_); | |
| 505 if (result == media::H264Parser::kUnsupportedStream) { | |
| 506 DLOG(ERROR) << "Unsupported PPS"; | |
| 507 return false; | |
| 508 } | |
| 509 if (result != media::H264Parser::kOk) { | |
| 510 DLOG(ERROR) << "Could not parse PPS"; | |
| 511 return false; | |
| 512 } | |
| 513 pps.assign(nalu.data, nalu.data + nalu.size); | |
| 514 break; | |
| 515 | |
| 516 case media::H264NALU::kIDRSlice: | |
| 517 idr_seen = true; | |
| 518 // If we previously detected a configuration change, and see an IDR | |
| 519 // slice next time around, we need to flag a configuration change. | |
| 520 if (pending_config_changed_) { | |
| 521 config_changed_ = true; | |
| 522 pending_config_changed_ = false; | |
| 523 } | |
| 524 break; | |
| 525 | |
| 526 default: | |
| 527 break; | |
| 528 } | |
| 529 } | |
| 530 | |
| 531 if (!sps.empty() && sps != last_sps_) { | |
| 532 if (!last_sps_.empty()) { | |
| 533 // Flag configuration changes after we see an IDR slice. | |
| 534 if (idr_seen) { | |
| 535 config_changed_ = true; | |
| 536 } else { | |
| 537 pending_config_changed_ = true; | |
| 538 } | |
| 539 } | |
| 540 last_sps_.swap(sps); | |
| 541 } | |
| 542 | |
| 543 if (!pps.empty() && pps != last_pps_) { | |
| 544 if (!last_pps_.empty()) { | |
| 545 // Flag configuration changes after we see an IDR slice. | |
| 546 if (idr_seen) { | |
| 547 config_changed_ = true; | |
| 548 } else { | |
| 549 pending_config_changed_ = true; | |
| 550 } | |
| 551 } | |
| 552 last_pps_.swap(pps); | |
| 553 } | |
| 554 return true; | |
| 555 } | |
| 556 | |
| 557 // Maintains information about a DXVA picture buffer, i.e. whether it is | |
| 558 // available for rendering, the texture information, etc. | |
| 559 struct DXVAVideoDecodeAccelerator::DXVAPictureBuffer { | |
| 560 public: | |
| 561 static linked_ptr<DXVAPictureBuffer> Create( | |
| 562 const DXVAVideoDecodeAccelerator& decoder, | |
| 563 const media::PictureBuffer& buffer, | |
| 564 EGLConfig egl_config); | |
| 565 ~DXVAPictureBuffer(); | |
| 566 | |
| 567 bool InitializeTexture(const DXVAVideoDecodeAccelerator& decoder, | |
| 568 bool use_rgb); | |
| 569 | |
| 570 bool ReusePictureBuffer(); | |
| 571 void ResetReuseFence(); | |
| 572 // Copies the output sample data to the picture buffer provided by the | |
| 573 // client. | |
| 574 // The dest_surface parameter contains the decoded bits. | |
| 575 bool CopyOutputSampleDataToPictureBuffer( | |
| 576 DXVAVideoDecodeAccelerator* decoder, | |
| 577 IDirect3DSurface9* dest_surface, | |
| 578 ID3D11Texture2D* dx11_texture, | |
| 579 int input_buffer_id); | |
| 580 | |
| 581 bool available() const { | |
| 582 return available_; | |
| 583 } | |
| 584 | |
| 585 void set_available(bool available) { | |
| 586 available_ = available; | |
| 587 } | |
| 588 | |
| 589 int id() const { | |
| 590 return picture_buffer_.id(); | |
| 591 } | |
| 592 | |
| 593 gfx::Size size() const { | |
| 594 return picture_buffer_.size(); | |
| 595 } | |
| 596 | |
| 597 bool waiting_to_reuse() const { return waiting_to_reuse_; } | |
| 598 | |
| 599 gfx::GLFence* reuse_fence() { return reuse_fence_.get(); } | |
| 600 | |
| 601 // Called when the source surface |src_surface| is copied to the destination | |
| 602 // |dest_surface| | |
| 603 bool CopySurfaceComplete(IDirect3DSurface9* src_surface, | |
| 604 IDirect3DSurface9* dest_surface); | |
| 605 | |
| 606 private: | |
| 607 explicit DXVAPictureBuffer(const media::PictureBuffer& buffer); | |
| 608 | |
| 609 bool available_; | |
| 610 | |
| 611 // This is true if the decoder is currently waiting on the fence before | |
| 612 // reusing the buffer. | |
| 613 bool waiting_to_reuse_; | |
| 614 media::PictureBuffer picture_buffer_; | |
| 615 EGLSurface decoding_surface_; | |
| 616 std::unique_ptr<gfx::GLFence> reuse_fence_; | |
| 617 | |
| 618 HANDLE texture_share_handle_; | |
| 619 base::win::ScopedComPtr<IDirect3DTexture9> decoding_texture_; | |
| 620 base::win::ScopedComPtr<ID3D11Texture2D> dx11_decoding_texture_; | |
| 621 | |
| 622 base::win::ScopedComPtr<IDXGIKeyedMutex> egl_keyed_mutex_; | |
| 623 base::win::ScopedComPtr<IDXGIKeyedMutex> dx11_keyed_mutex_; | |
| 624 | |
| 625 // This is the last value that was used to release the keyed mutex. | |
| 626 uint64_t keyed_mutex_value_; | |
| 627 | |
| 628 // The following |IDirect3DSurface9| interface pointers are used to hold | |
| 629 // references on the surfaces during the course of a StretchRect operation | |
| 630 // to copy the source surface to the target. The references are released | |
| 631 // when the StretchRect operation i.e. the copy completes. | |
| 632 base::win::ScopedComPtr<IDirect3DSurface9> decoder_surface_; | |
| 633 base::win::ScopedComPtr<IDirect3DSurface9> target_surface_; | |
| 634 | |
| 635 // This ID3D11Texture2D interface pointer is used to hold a reference to the | |
| 636 // decoder texture during the course of a copy operation. This reference is | |
| 637 // released when the copy completes. | |
| 638 base::win::ScopedComPtr<ID3D11Texture2D> decoder_dx11_texture_; | |
| 639 | |
| 640 // Set to true if RGB is supported by the texture. | |
| 641 // Defaults to true. | |
| 642 bool use_rgb_; | |
| 643 | |
| 644 DISALLOW_COPY_AND_ASSIGN(DXVAPictureBuffer); | |
| 645 }; | |
| 646 | |
| 647 // static | |
| 648 linked_ptr<DXVAVideoDecodeAccelerator::DXVAPictureBuffer> | |
| 649 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::Create( | |
| 650 const DXVAVideoDecodeAccelerator& decoder, | |
| 651 const media::PictureBuffer& buffer, | |
| 652 EGLConfig egl_config) { | |
| 653 linked_ptr<DXVAPictureBuffer> picture_buffer(new DXVAPictureBuffer(buffer)); | |
| 654 | |
| 655 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay(); | |
| 656 | |
| 657 EGLint use_rgb = 1; | |
| 658 eglGetConfigAttrib(egl_display, egl_config, EGL_BIND_TO_TEXTURE_RGB, | |
| 659 &use_rgb); | |
| 660 | |
| 661 if (!picture_buffer->InitializeTexture(decoder, !!use_rgb)) | |
| 662 return linked_ptr<DXVAPictureBuffer>(nullptr); | |
| 663 | |
| 664 EGLint attrib_list[] = { | |
| 665 EGL_WIDTH, buffer.size().width(), | |
| 666 EGL_HEIGHT, buffer.size().height(), | |
| 667 EGL_TEXTURE_FORMAT, use_rgb ? EGL_TEXTURE_RGB : EGL_TEXTURE_RGBA, | |
| 668 EGL_TEXTURE_TARGET, EGL_TEXTURE_2D, | |
| 669 EGL_NONE | |
| 670 }; | |
| 671 | |
| 672 picture_buffer->decoding_surface_ = eglCreatePbufferFromClientBuffer( | |
| 673 egl_display, EGL_D3D_TEXTURE_2D_SHARE_HANDLE_ANGLE, | |
| 674 picture_buffer->texture_share_handle_, egl_config, attrib_list); | |
| 675 RETURN_ON_FAILURE(picture_buffer->decoding_surface_, | |
| 676 "Failed to create surface", | |
| 677 linked_ptr<DXVAPictureBuffer>(NULL)); | |
| 678 if (decoder.d3d11_device_ && decoder.use_keyed_mutex_) { | |
| 679 void* keyed_mutex = nullptr; | |
| 680 EGLBoolean ret = eglQuerySurfacePointerANGLE( | |
| 681 egl_display, picture_buffer->decoding_surface_, | |
| 682 EGL_DXGI_KEYED_MUTEX_ANGLE, &keyed_mutex); | |
| 683 RETURN_ON_FAILURE(keyed_mutex && ret == EGL_TRUE, | |
| 684 "Failed to query ANGLE keyed mutex", | |
| 685 linked_ptr<DXVAPictureBuffer>(nullptr)); | |
| 686 picture_buffer->egl_keyed_mutex_ = base::win::ScopedComPtr<IDXGIKeyedMutex>( | |
| 687 static_cast<IDXGIKeyedMutex*>(keyed_mutex)); | |
| 688 } | |
| 689 picture_buffer->use_rgb_ = !!use_rgb; | |
| 690 return picture_buffer; | |
| 691 } | |
| 692 | |
| 693 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::InitializeTexture( | |
| 694 const DXVAVideoDecodeAccelerator& decoder, | |
| 695 bool use_rgb) { | |
| 696 DCHECK(!texture_share_handle_); | |
| 697 if (decoder.d3d11_device_) { | |
| 698 D3D11_TEXTURE2D_DESC desc; | |
| 699 desc.Width = picture_buffer_.size().width(); | |
| 700 desc.Height = picture_buffer_.size().height(); | |
| 701 desc.MipLevels = 1; | |
| 702 desc.ArraySize = 1; | |
| 703 desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; | |
| 704 desc.SampleDesc.Count = 1; | |
| 705 desc.SampleDesc.Quality = 0; | |
| 706 desc.Usage = D3D11_USAGE_DEFAULT; | |
| 707 desc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET; | |
| 708 desc.CPUAccessFlags = 0; | |
| 709 desc.MiscFlags = decoder.use_keyed_mutex_ | |
| 710 ? D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX | |
| 711 : D3D11_RESOURCE_MISC_SHARED; | |
| 712 | |
| 713 HRESULT hr = decoder.d3d11_device_->CreateTexture2D( | |
| 714 &desc, nullptr, dx11_decoding_texture_.Receive()); | |
| 715 RETURN_ON_HR_FAILURE(hr, "Failed to create texture", false); | |
| 716 if (decoder.use_keyed_mutex_) { | |
| 717 hr = dx11_keyed_mutex_.QueryFrom(dx11_decoding_texture_.get()); | |
| 718 RETURN_ON_HR_FAILURE(hr, "Failed to get keyed mutex", false); | |
| 719 } | |
| 720 | |
| 721 base::win::ScopedComPtr<IDXGIResource> resource; | |
| 722 hr = resource.QueryFrom(dx11_decoding_texture_.get()); | |
| 723 DCHECK(SUCCEEDED(hr)); | |
| 724 hr = resource->GetSharedHandle(&texture_share_handle_); | |
| 725 RETURN_ON_FAILURE(SUCCEEDED(hr) && texture_share_handle_, | |
| 726 "Failed to query shared handle", false); | |
| 727 | |
| 728 } else { | |
| 729 HRESULT hr = E_FAIL; | |
| 730 hr = decoder.d3d9_device_ex_->CreateTexture( | |
| 731 picture_buffer_.size().width(), picture_buffer_.size().height(), 1, | |
| 732 D3DUSAGE_RENDERTARGET, use_rgb ? D3DFMT_X8R8G8B8 : D3DFMT_A8R8G8B8, | |
| 733 D3DPOOL_DEFAULT, decoding_texture_.Receive(), &texture_share_handle_); | |
| 734 RETURN_ON_HR_FAILURE(hr, "Failed to create texture", false); | |
| 735 RETURN_ON_FAILURE(texture_share_handle_, "Failed to query shared handle", | |
| 736 false); | |
| 737 } | |
| 738 return true; | |
| 739 } | |
| 740 | |
| 741 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer( | |
| 742 const media::PictureBuffer& buffer) | |
| 743 : available_(true), | |
| 744 waiting_to_reuse_(false), | |
| 745 picture_buffer_(buffer), | |
| 746 decoding_surface_(NULL), | |
| 747 texture_share_handle_(nullptr), | |
| 748 keyed_mutex_value_(0), | |
| 749 use_rgb_(true) {} | |
| 750 | |
| 751 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::~DXVAPictureBuffer() { | |
| 752 if (decoding_surface_) { | |
| 753 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay(); | |
| 754 | |
| 755 eglReleaseTexImage( | |
| 756 egl_display, | |
| 757 decoding_surface_, | |
| 758 EGL_BACK_BUFFER); | |
| 759 | |
| 760 eglDestroySurface( | |
| 761 egl_display, | |
| 762 decoding_surface_); | |
| 763 decoding_surface_ = NULL; | |
| 764 } | |
| 765 } | |
| 766 | |
| 767 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ReusePictureBuffer() { | |
| 768 DCHECK(decoding_surface_); | |
| 769 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay(); | |
| 770 eglReleaseTexImage( | |
| 771 egl_display, | |
| 772 decoding_surface_, | |
| 773 EGL_BACK_BUFFER); | |
| 774 decoder_surface_.Release(); | |
| 775 target_surface_.Release(); | |
| 776 decoder_dx11_texture_.Release(); | |
| 777 waiting_to_reuse_ = false; | |
| 778 set_available(true); | |
| 779 if (egl_keyed_mutex_) { | |
| 780 HRESULT hr = egl_keyed_mutex_->ReleaseSync(++keyed_mutex_value_); | |
| 781 RETURN_ON_FAILURE(hr == S_OK, "Could not release sync mutex", false); | |
| 782 } | |
| 783 return true; | |
| 784 } | |
| 785 | |
| 786 void DXVAVideoDecodeAccelerator::DXVAPictureBuffer::ResetReuseFence() { | |
| 787 if (!reuse_fence_ || !reuse_fence_->ResetSupported()) | |
| 788 reuse_fence_.reset(gfx::GLFence::Create()); | |
| 789 else | |
| 790 reuse_fence_->ResetState(); | |
| 791 waiting_to_reuse_ = true; | |
| 792 } | |
| 793 | |
| 794 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer:: | |
| 795 CopyOutputSampleDataToPictureBuffer( | |
| 796 DXVAVideoDecodeAccelerator* decoder, | |
| 797 IDirect3DSurface9* dest_surface, | |
| 798 ID3D11Texture2D* dx11_texture, | |
| 799 int input_buffer_id) { | |
| 800 DCHECK(dest_surface || dx11_texture); | |
| 801 if (dx11_texture) { | |
| 802 // Grab a reference on the decoder texture. This reference will be released | |
| 803 // when we receive a notification that the copy was completed or when the | |
| 804 // DXVAPictureBuffer instance is destroyed. | |
| 805 decoder_dx11_texture_ = dx11_texture; | |
| 806 decoder->CopyTexture(dx11_texture, dx11_decoding_texture_.get(), | |
| 807 dx11_keyed_mutex_, keyed_mutex_value_, NULL, id(), | |
| 808 input_buffer_id); | |
| 809 return true; | |
| 810 } | |
| 811 D3DSURFACE_DESC surface_desc; | |
| 812 HRESULT hr = dest_surface->GetDesc(&surface_desc); | |
| 813 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false); | |
| 814 | |
| 815 D3DSURFACE_DESC texture_desc; | |
| 816 decoding_texture_->GetLevelDesc(0, &texture_desc); | |
| 817 | |
| 818 if (texture_desc.Width != surface_desc.Width || | |
| 819 texture_desc.Height != surface_desc.Height) { | |
| 820 NOTREACHED() << "Decode surface of different dimension than texture"; | |
| 821 return false; | |
| 822 } | |
| 823 | |
| 824 hr = decoder->d3d9_->CheckDeviceFormatConversion( | |
| 825 D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, surface_desc.Format, | |
| 826 use_rgb_ ? D3DFMT_X8R8G8B8 : D3DFMT_A8R8G8B8); | |
| 827 RETURN_ON_HR_FAILURE(hr, "Device does not support format converision", false); | |
| 828 | |
| 829 // The same picture buffer can be reused for a different frame. Release the | |
| 830 // target surface and the decoder references here. | |
| 831 target_surface_.Release(); | |
| 832 decoder_surface_.Release(); | |
| 833 | |
| 834 // Grab a reference on the decoder surface and the target surface. These | |
| 835 // references will be released when we receive a notification that the | |
| 836 // copy was completed or when the DXVAPictureBuffer instance is destroyed. | |
| 837 // We hold references here as it is easier to manage their lifetimes. | |
| 838 hr = decoding_texture_->GetSurfaceLevel(0, target_surface_.Receive()); | |
| 839 RETURN_ON_HR_FAILURE(hr, "Failed to get surface from texture", false); | |
| 840 | |
| 841 decoder_surface_ = dest_surface; | |
| 842 | |
| 843 decoder->CopySurface(decoder_surface_.get(), target_surface_.get(), id(), | |
| 844 input_buffer_id); | |
| 845 return true; | |
| 846 } | |
| 847 | |
| 848 bool DXVAVideoDecodeAccelerator::DXVAPictureBuffer::CopySurfaceComplete( | |
| 849 IDirect3DSurface9* src_surface, | |
| 850 IDirect3DSurface9* dest_surface) { | |
| 851 DCHECK(!available()); | |
| 852 | |
| 853 GLint current_texture = 0; | |
| 854 glGetIntegerv(GL_TEXTURE_BINDING_2D, ¤t_texture); | |
| 855 | |
| 856 glBindTexture(GL_TEXTURE_2D, picture_buffer_.texture_ids()[0]); | |
| 857 | |
| 858 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); | |
| 859 | |
| 860 if (src_surface && dest_surface) { | |
| 861 DCHECK_EQ(src_surface, decoder_surface_.get()); | |
| 862 DCHECK_EQ(dest_surface, target_surface_.get()); | |
| 863 decoder_surface_.Release(); | |
| 864 target_surface_.Release(); | |
| 865 } else { | |
| 866 DCHECK(decoder_dx11_texture_.get()); | |
| 867 decoder_dx11_texture_.Release(); | |
| 868 } | |
| 869 if (egl_keyed_mutex_) { | |
| 870 keyed_mutex_value_++; | |
| 871 HRESULT result = | |
| 872 egl_keyed_mutex_->AcquireSync(keyed_mutex_value_, kAcquireSyncWaitMs); | |
| 873 RETURN_ON_FAILURE(result == S_OK, "Could not acquire sync mutex", false); | |
| 874 } | |
| 875 | |
| 876 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay(); | |
| 877 eglBindTexImage( | |
| 878 egl_display, | |
| 879 decoding_surface_, | |
| 880 EGL_BACK_BUFFER); | |
| 881 | |
| 882 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); | |
| 883 glBindTexture(GL_TEXTURE_2D, current_texture); | |
| 884 return true; | |
| 885 } | |
| 886 | |
| 887 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo( | |
| 888 int32_t buffer_id, | |
| 889 IMFSample* sample) | |
| 890 : input_buffer_id(buffer_id), picture_buffer_id(-1) { | |
| 891 output_sample.Attach(sample); | |
| 892 } | |
| 893 | |
| 894 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo( | |
| 895 const PendingSampleInfo& other) = default; | |
| 896 | |
| 897 DXVAVideoDecodeAccelerator::PendingSampleInfo::~PendingSampleInfo() {} | |
| 898 | |
| 899 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator( | |
| 900 const GetGLContextCallback& get_gl_context_cb, | |
| 901 const MakeGLContextCurrentCallback& make_context_current_cb, | |
| 902 bool enable_accelerated_vpx_decode) | |
| 903 : client_(NULL), | |
| 904 dev_manager_reset_token_(0), | |
| 905 dx11_dev_manager_reset_token_(0), | |
| 906 egl_config_(NULL), | |
| 907 state_(kUninitialized), | |
| 908 pictures_requested_(false), | |
| 909 inputs_before_decode_(0), | |
| 910 sent_drain_message_(false), | |
| 911 get_gl_context_cb_(get_gl_context_cb), | |
| 912 make_context_current_cb_(make_context_current_cb), | |
| 913 codec_(media::kUnknownVideoCodec), | |
| 914 decoder_thread_("DXVAVideoDecoderThread"), | |
| 915 pending_flush_(false), | |
| 916 use_dx11_(false), | |
| 917 use_keyed_mutex_(false), | |
| 918 dx11_video_format_converter_media_type_needs_init_(true), | |
| 919 using_angle_device_(false), | |
| 920 enable_accelerated_vpx_decode_(enable_accelerated_vpx_decode), | |
| 921 weak_this_factory_(this) { | |
| 922 weak_ptr_ = weak_this_factory_.GetWeakPtr(); | |
| 923 memset(&input_stream_info_, 0, sizeof(input_stream_info_)); | |
| 924 memset(&output_stream_info_, 0, sizeof(output_stream_info_)); | |
| 925 } | |
| 926 | |
| 927 DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() { | |
| 928 client_ = NULL; | |
| 929 } | |
| 930 | |
| 931 bool DXVAVideoDecodeAccelerator::Initialize(const Config& config, | |
| 932 Client* client) { | |
| 933 if (get_gl_context_cb_.is_null() || make_context_current_cb_.is_null()) { | |
| 934 NOTREACHED() << "GL callbacks are required for this VDA"; | |
| 935 return false; | |
| 936 } | |
| 937 | |
| 938 if (config.is_encrypted) { | |
| 939 NOTREACHED() << "Encrypted streams are not supported for this VDA"; | |
| 940 return false; | |
| 941 } | |
| 942 | |
| 943 client_ = client; | |
| 944 | |
| 945 main_thread_task_runner_ = base::MessageLoop::current()->task_runner(); | |
| 946 | |
| 947 bool profile_supported = false; | |
| 948 for (const auto& supported_profile : kSupportedProfiles) { | |
| 949 if (config.profile == supported_profile) { | |
| 950 profile_supported = true; | |
| 951 break; | |
| 952 } | |
| 953 } | |
| 954 if (!profile_supported) { | |
| 955 RETURN_AND_NOTIFY_ON_FAILURE(false, | |
| 956 "Unsupported h.264, vp8, or vp9 profile", PLATFORM_FAILURE, false); | |
| 957 } | |
| 958 | |
| 959 // Not all versions of Windows 7 and later include Media Foundation DLLs. | |
| 960 // Instead of crashing while delay loading the DLL when calling MFStartup() | |
| 961 // below, probe whether we can successfully load the DLL now. | |
| 962 // See http://crbug.com/339678 for details. | |
| 963 HMODULE dxgi_manager_dll = ::GetModuleHandle(L"MFPlat.dll"); | |
| 964 RETURN_ON_FAILURE(dxgi_manager_dll, "MFPlat.dll is required for decoding", | |
| 965 false); | |
| 966 | |
| 967 // On Windows 8+ mfplat.dll provides the MFCreateDXGIDeviceManager API. | |
| 968 // On Windows 7 mshtmlmedia.dll provides it. | |
| 969 | |
| 970 // TODO(ananta) | |
| 971 // The code below works, as in we can create the DX11 device manager for | |
| 972 // Windows 7. However the IMFTransform we use for texture conversion and | |
| 973 // copy does not exist on Windows 7. Look into an alternate approach | |
| 974 // and enable the code below. | |
| 975 #if defined(ENABLE_DX11_FOR_WIN7) | |
| 976 if (base::win::GetVersion() == base::win::VERSION_WIN7) { | |
| 977 dxgi_manager_dll = ::GetModuleHandle(L"mshtmlmedia.dll"); | |
| 978 RETURN_ON_FAILURE(dxgi_manager_dll, | |
| 979 "mshtmlmedia.dll is required for decoding", false); | |
| 980 } | |
| 981 #endif | |
| 982 // If we don't find the MFCreateDXGIDeviceManager API we fallback to D3D9 | |
| 983 // decoding. | |
| 984 if (dxgi_manager_dll && !create_dxgi_device_manager_) { | |
| 985 create_dxgi_device_manager_ = reinterpret_cast<CreateDXGIDeviceManager>( | |
| 986 ::GetProcAddress(dxgi_manager_dll, "MFCreateDXGIDeviceManager")); | |
| 987 } | |
| 988 | |
| 989 RETURN_AND_NOTIFY_ON_FAILURE( | |
| 990 gfx::g_driver_egl.ext.b_EGL_ANGLE_surface_d3d_texture_2d_share_handle, | |
| 991 "EGL_ANGLE_surface_d3d_texture_2d_share_handle unavailable", | |
| 992 PLATFORM_FAILURE, | |
| 993 false); | |
| 994 | |
| 995 RETURN_AND_NOTIFY_ON_FAILURE(gfx::GLFence::IsSupported(), | |
| 996 "GL fences are unsupported", PLATFORM_FAILURE, | |
| 997 false); | |
| 998 | |
| 999 State state = GetState(); | |
| 1000 RETURN_AND_NOTIFY_ON_FAILURE((state == kUninitialized), | |
| 1001 "Initialize: invalid state: " << state, ILLEGAL_STATE, false); | |
| 1002 | |
| 1003 media::InitializeMediaFoundation(); | |
| 1004 | |
| 1005 RETURN_AND_NOTIFY_ON_FAILURE(InitDecoder(config.profile), | |
| 1006 "Failed to initialize decoder", PLATFORM_FAILURE, false); | |
| 1007 | |
| 1008 RETURN_AND_NOTIFY_ON_FAILURE(GetStreamsInfoAndBufferReqs(), | |
| 1009 "Failed to get input/output stream info.", PLATFORM_FAILURE, false); | |
| 1010 | |
| 1011 RETURN_AND_NOTIFY_ON_FAILURE( | |
| 1012 SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0), | |
| 1013 "Send MFT_MESSAGE_NOTIFY_BEGIN_STREAMING notification failed", | |
| 1014 PLATFORM_FAILURE, false); | |
| 1015 | |
| 1016 RETURN_AND_NOTIFY_ON_FAILURE( | |
| 1017 SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0), | |
| 1018 "Send MFT_MESSAGE_NOTIFY_START_OF_STREAM notification failed", | |
| 1019 PLATFORM_FAILURE, false); | |
| 1020 | |
| 1021 config_ = config; | |
| 1022 | |
| 1023 config_change_detector_.reset(new H264ConfigChangeDetector); | |
| 1024 | |
| 1025 SetState(kNormal); | |
| 1026 | |
| 1027 StartDecoderThread(); | |
| 1028 return true; | |
| 1029 } | |
| 1030 | |
| 1031 bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() { | |
| 1032 TRACE_EVENT0("gpu", "DXVAVideoDecodeAccelerator_CreateD3DDevManager"); | |
| 1033 | |
| 1034 HRESULT hr = E_FAIL; | |
| 1035 | |
| 1036 hr = Direct3DCreate9Ex(D3D_SDK_VERSION, d3d9_.Receive()); | |
| 1037 RETURN_ON_HR_FAILURE(hr, "Direct3DCreate9Ex failed", false); | |
| 1038 | |
| 1039 hr = d3d9_->CheckDeviceFormatConversion( | |
| 1040 D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, | |
| 1041 static_cast<D3DFORMAT>(MAKEFOURCC('N', 'V', '1', '2')), | |
| 1042 D3DFMT_X8R8G8B8); | |
| 1043 RETURN_ON_HR_FAILURE(hr, | |
| 1044 "D3D9 driver does not support H/W format conversion", false); | |
| 1045 | |
| 1046 base::win::ScopedComPtr<IDirect3DDevice9> angle_device = | |
| 1047 QueryDeviceObjectFromANGLE<IDirect3DDevice9>(EGL_D3D9_DEVICE_ANGLE); | |
| 1048 if (angle_device.get()) | |
| 1049 using_angle_device_ = true; | |
| 1050 | |
| 1051 if (using_angle_device_) { | |
| 1052 hr = d3d9_device_ex_.QueryFrom(angle_device.get()); | |
| 1053 RETURN_ON_HR_FAILURE(hr, | |
| 1054 "QueryInterface for IDirect3DDevice9Ex from angle device failed", | |
| 1055 false); | |
| 1056 } else { | |
| 1057 D3DPRESENT_PARAMETERS present_params = {0}; | |
| 1058 present_params.BackBufferWidth = 1; | |
| 1059 present_params.BackBufferHeight = 1; | |
| 1060 present_params.BackBufferFormat = D3DFMT_UNKNOWN; | |
| 1061 present_params.BackBufferCount = 1; | |
| 1062 present_params.SwapEffect = D3DSWAPEFFECT_DISCARD; | |
| 1063 present_params.hDeviceWindow = NULL; | |
| 1064 present_params.Windowed = TRUE; | |
| 1065 present_params.Flags = D3DPRESENTFLAG_VIDEO; | |
| 1066 present_params.FullScreen_RefreshRateInHz = 0; | |
| 1067 present_params.PresentationInterval = 0; | |
| 1068 | |
| 1069 hr = d3d9_->CreateDeviceEx(D3DADAPTER_DEFAULT, | |
| 1070 D3DDEVTYPE_HAL, | |
| 1071 NULL, | |
| 1072 D3DCREATE_FPU_PRESERVE | | |
| 1073 D3DCREATE_MIXED_VERTEXPROCESSING | | |
| 1074 D3DCREATE_MULTITHREADED, | |
| 1075 &present_params, | |
| 1076 NULL, | |
| 1077 d3d9_device_ex_.Receive()); | |
| 1078 RETURN_ON_HR_FAILURE(hr, "Failed to create D3D device", false); | |
| 1079 } | |
| 1080 | |
| 1081 hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token_, | |
| 1082 device_manager_.Receive()); | |
| 1083 RETURN_ON_HR_FAILURE(hr, "DXVA2CreateDirect3DDeviceManager9 failed", false); | |
| 1084 | |
| 1085 hr = device_manager_->ResetDevice(d3d9_device_ex_.get(), | |
| 1086 dev_manager_reset_token_); | |
| 1087 RETURN_ON_HR_FAILURE(hr, "Failed to reset device", false); | |
| 1088 | |
| 1089 hr = d3d9_device_ex_->CreateQuery(D3DQUERYTYPE_EVENT, query_.Receive()); | |
| 1090 RETURN_ON_HR_FAILURE(hr, "Failed to create D3D device query", false); | |
| 1091 // Ensure query_ API works (to avoid an infinite loop later in | |
| 1092 // CopyOutputSampleDataToPictureBuffer). | |
| 1093 hr = query_->Issue(D3DISSUE_END); | |
| 1094 RETURN_ON_HR_FAILURE(hr, "Failed to issue END test query", false); | |
| 1095 return true; | |
| 1096 } | |
| 1097 | |
| 1098 bool DXVAVideoDecodeAccelerator::CreateDX11DevManager() { | |
| 1099 HRESULT hr = create_dxgi_device_manager_(&dx11_dev_manager_reset_token_, | |
| 1100 d3d11_device_manager_.Receive()); | |
| 1101 RETURN_ON_HR_FAILURE(hr, "MFCreateDXGIDeviceManager failed", false); | |
| 1102 | |
| 1103 // This array defines the set of DirectX hardware feature levels we support. | |
| 1104 // The ordering MUST be preserved. All applications are assumed to support | |
| 1105 // 9.1 unless otherwise stated by the application. | |
| 1106 D3D_FEATURE_LEVEL feature_levels[] = { | |
| 1107 D3D_FEATURE_LEVEL_11_1, | |
| 1108 D3D_FEATURE_LEVEL_11_0, | |
| 1109 D3D_FEATURE_LEVEL_10_1, | |
| 1110 D3D_FEATURE_LEVEL_10_0, | |
| 1111 D3D_FEATURE_LEVEL_9_3, | |
| 1112 D3D_FEATURE_LEVEL_9_2, | |
| 1113 D3D_FEATURE_LEVEL_9_1 | |
| 1114 }; | |
| 1115 | |
| 1116 UINT flags = D3D11_CREATE_DEVICE_VIDEO_SUPPORT; | |
| 1117 | |
| 1118 #if defined _DEBUG | |
| 1119 flags |= D3D11_CREATE_DEVICE_DEBUG; | |
| 1120 #endif | |
| 1121 | |
| 1122 D3D_FEATURE_LEVEL feature_level_out = D3D_FEATURE_LEVEL_11_0; | |
| 1123 hr = D3D11CreateDevice(NULL, | |
| 1124 D3D_DRIVER_TYPE_HARDWARE, | |
| 1125 NULL, | |
| 1126 flags, | |
| 1127 feature_levels, | |
| 1128 arraysize(feature_levels), | |
| 1129 D3D11_SDK_VERSION, | |
| 1130 d3d11_device_.Receive(), | |
| 1131 &feature_level_out, | |
| 1132 d3d11_device_context_.Receive()); | |
| 1133 RETURN_ON_HR_FAILURE(hr, "Failed to create DX11 device", false); | |
| 1134 | |
| 1135 // Enable multithreaded mode on the device. This ensures that accesses to | |
| 1136 // context are synchronized across threads. We have multiple threads | |
| 1137 // accessing the context, the media foundation decoder threads and the | |
| 1138 // decoder thread via the video format conversion transform. | |
| 1139 hr = multi_threaded_.QueryFrom(d3d11_device_.get()); | |
| 1140 RETURN_ON_HR_FAILURE(hr, "Failed to query ID3D10Multithread", false); | |
| 1141 multi_threaded_->SetMultithreadProtected(TRUE); | |
| 1142 | |
| 1143 hr = d3d11_device_manager_->ResetDevice(d3d11_device_.get(), | |
| 1144 dx11_dev_manager_reset_token_); | |
| 1145 RETURN_ON_HR_FAILURE(hr, "Failed to reset device", false); | |
| 1146 | |
| 1147 D3D11_QUERY_DESC query_desc; | |
| 1148 query_desc.Query = D3D11_QUERY_EVENT; | |
| 1149 query_desc.MiscFlags = 0; | |
| 1150 hr = d3d11_device_->CreateQuery( | |
| 1151 &query_desc, | |
| 1152 d3d11_query_.Receive()); | |
| 1153 RETURN_ON_HR_FAILURE(hr, "Failed to create DX11 device query", false); | |
| 1154 | |
| 1155 HMODULE video_processor_dll = ::GetModuleHandle(L"msvproc.dll"); | |
| 1156 RETURN_ON_FAILURE(video_processor_dll, "Failed to load video processor", | |
| 1157 false); | |
| 1158 | |
| 1159 hr = CreateCOMObjectFromDll( | |
| 1160 video_processor_dll, | |
| 1161 CLSID_VideoProcessorMFT, | |
| 1162 __uuidof(IMFTransform), | |
| 1163 video_format_converter_mft_.ReceiveVoid()); | |
| 1164 if (FAILED(hr)) { | |
| 1165 base::debug::Alias(&hr); | |
| 1166 // TODO(ananta) | |
| 1167 // Remove this CHECK when the change to use DX11 for H/W decoding | |
| 1168 // stablizes. | |
| 1169 CHECK(false); | |
| 1170 } | |
| 1171 | |
| 1172 RETURN_ON_HR_FAILURE(hr, "Failed to create video format converter", false); | |
| 1173 | |
| 1174 base::win::ScopedComPtr<IMFAttributes> converter_attributes; | |
| 1175 hr = video_format_converter_mft_->GetAttributes( | |
| 1176 converter_attributes.Receive()); | |
| 1177 RETURN_ON_HR_FAILURE(hr, "Failed to get converter attributes", false); | |
| 1178 | |
| 1179 hr = converter_attributes->SetUINT32(MF_XVP_PLAYBACK_MODE, TRUE); | |
| 1180 RETURN_ON_HR_FAILURE( | |
| 1181 hr, | |
| 1182 "Failed to set MF_XVP_PLAYBACK_MODE attribute on converter", | |
| 1183 false); | |
| 1184 | |
| 1185 hr = converter_attributes->SetUINT32(MF_LOW_LATENCY, FALSE); | |
| 1186 RETURN_ON_HR_FAILURE( | |
| 1187 hr, | |
| 1188 "Failed to set MF_LOW_LATENCY attribute on converter", | |
| 1189 false); | |
| 1190 return true; | |
| 1191 } | |
| 1192 | |
| 1193 void DXVAVideoDecodeAccelerator::Decode( | |
| 1194 const media::BitstreamBuffer& bitstream_buffer) { | |
| 1195 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 1196 | |
| 1197 // SharedMemory will take over the ownership of handle. | |
| 1198 base::SharedMemory shm(bitstream_buffer.handle(), true); | |
| 1199 | |
| 1200 State state = GetState(); | |
| 1201 RETURN_AND_NOTIFY_ON_FAILURE((state == kNormal || state == kStopped || | |
| 1202 state == kFlushing), | |
| 1203 "Invalid state: " << state, ILLEGAL_STATE,); | |
| 1204 if (bitstream_buffer.id() < 0) { | |
| 1205 RETURN_AND_NOTIFY_ON_FAILURE( | |
| 1206 false, "Invalid bitstream_buffer, id: " << bitstream_buffer.id(), | |
| 1207 INVALID_ARGUMENT, ); | |
| 1208 } | |
| 1209 | |
| 1210 base::win::ScopedComPtr<IMFSample> sample; | |
| 1211 RETURN_AND_NOTIFY_ON_FAILURE(shm.Map(bitstream_buffer.size()), | |
| 1212 "Failed in base::SharedMemory::Map", | |
| 1213 PLATFORM_FAILURE, ); | |
| 1214 | |
| 1215 sample.Attach(CreateInputSample( | |
| 1216 reinterpret_cast<const uint8_t*>(shm.memory()), bitstream_buffer.size(), | |
| 1217 std::min<uint32_t>(bitstream_buffer.size(), input_stream_info_.cbSize), | |
| 1218 input_stream_info_.cbAlignment)); | |
| 1219 RETURN_AND_NOTIFY_ON_FAILURE(sample.get(), "Failed to create input sample", | |
| 1220 PLATFORM_FAILURE, ); | |
| 1221 | |
| 1222 RETURN_AND_NOTIFY_ON_HR_FAILURE(sample->SetSampleTime(bitstream_buffer.id()), | |
| 1223 "Failed to associate input buffer id with sample", PLATFORM_FAILURE,); | |
| 1224 | |
| 1225 decoder_thread_task_runner_->PostTask( | |
| 1226 FROM_HERE, | |
| 1227 base::Bind(&DXVAVideoDecodeAccelerator::DecodeInternal, | |
| 1228 base::Unretained(this), sample)); | |
| 1229 } | |
| 1230 | |
| 1231 void DXVAVideoDecodeAccelerator::AssignPictureBuffers( | |
| 1232 const std::vector<media::PictureBuffer>& buffers) { | |
| 1233 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 1234 | |
| 1235 State state = GetState(); | |
| 1236 RETURN_AND_NOTIFY_ON_FAILURE((state != kUninitialized), | |
| 1237 "Invalid state: " << state, ILLEGAL_STATE,); | |
| 1238 RETURN_AND_NOTIFY_ON_FAILURE((kNumPictureBuffers >= buffers.size()), | |
| 1239 "Failed to provide requested picture buffers. (Got " << buffers.size() << | |
| 1240 ", requested " << kNumPictureBuffers << ")", INVALID_ARGUMENT,); | |
| 1241 | |
| 1242 // Copy the picture buffers provided by the client to the available list, | |
| 1243 // and mark these buffers as available for use. | |
| 1244 for (size_t buffer_index = 0; buffer_index < buffers.size(); | |
| 1245 ++buffer_index) { | |
| 1246 DCHECK_LE(1u, buffers[buffer_index].texture_ids().size()); | |
| 1247 linked_ptr<DXVAPictureBuffer> picture_buffer = | |
| 1248 DXVAPictureBuffer::Create(*this, buffers[buffer_index], egl_config_); | |
| 1249 RETURN_AND_NOTIFY_ON_FAILURE(picture_buffer.get(), | |
| 1250 "Failed to allocate picture buffer", PLATFORM_FAILURE,); | |
| 1251 | |
| 1252 bool inserted = output_picture_buffers_.insert(std::make_pair( | |
| 1253 buffers[buffer_index].id(), picture_buffer)).second; | |
| 1254 DCHECK(inserted); | |
| 1255 } | |
| 1256 | |
| 1257 ProcessPendingSamples(); | |
| 1258 if (pending_flush_) { | |
| 1259 decoder_thread_task_runner_->PostTask( | |
| 1260 FROM_HERE, | |
| 1261 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal, | |
| 1262 base::Unretained(this))); | |
| 1263 } | |
| 1264 } | |
| 1265 | |
| 1266 void DXVAVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_buffer_id) { | |
| 1267 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 1268 | |
| 1269 State state = GetState(); | |
| 1270 RETURN_AND_NOTIFY_ON_FAILURE((state != kUninitialized), | |
| 1271 "Invalid state: " << state, ILLEGAL_STATE,); | |
| 1272 | |
| 1273 if (output_picture_buffers_.empty() && stale_output_picture_buffers_.empty()) | |
| 1274 return; | |
| 1275 | |
| 1276 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id); | |
| 1277 // If we didn't find the picture id in the |output_picture_buffers_| map we | |
| 1278 // try the |stale_output_picture_buffers_| map, as this may have been an | |
| 1279 // output picture buffer from before a resolution change, that at resolution | |
| 1280 // change time had yet to be displayed. The client is calling us back to tell | |
| 1281 // us that we can now recycle this picture buffer, so if we were waiting to | |
| 1282 // dispose of it we now can. | |
| 1283 if (it == output_picture_buffers_.end()) { | |
| 1284 if (!stale_output_picture_buffers_.empty()) { | |
| 1285 it = stale_output_picture_buffers_.find(picture_buffer_id); | |
| 1286 RETURN_AND_NOTIFY_ON_FAILURE(it != stale_output_picture_buffers_.end(), | |
| 1287 "Invalid picture id: " << picture_buffer_id, INVALID_ARGUMENT,); | |
| 1288 main_thread_task_runner_->PostTask( | |
| 1289 FROM_HERE, | |
| 1290 base::Bind(&DXVAVideoDecodeAccelerator::DeferredDismissStaleBuffer, | |
| 1291 weak_this_factory_.GetWeakPtr(), picture_buffer_id)); | |
| 1292 } | |
| 1293 return; | |
| 1294 } | |
| 1295 | |
| 1296 if (it->second->available() || it->second->waiting_to_reuse()) | |
| 1297 return; | |
| 1298 | |
| 1299 if (use_keyed_mutex_ || using_angle_device_) { | |
| 1300 RETURN_AND_NOTIFY_ON_FAILURE(it->second->ReusePictureBuffer(), | |
| 1301 "Failed to reuse picture buffer", | |
| 1302 PLATFORM_FAILURE, ); | |
| 1303 | |
| 1304 ProcessPendingSamples(); | |
| 1305 if (pending_flush_) { | |
| 1306 decoder_thread_task_runner_->PostTask( | |
| 1307 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal, | |
| 1308 base::Unretained(this))); | |
| 1309 } | |
| 1310 } else { | |
| 1311 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(), | |
| 1312 "Failed to make context current", | |
| 1313 PLATFORM_FAILURE, ); | |
| 1314 it->second->ResetReuseFence(); | |
| 1315 | |
| 1316 WaitForOutputBuffer(picture_buffer_id, 0); | |
| 1317 } | |
| 1318 } | |
| 1319 | |
| 1320 void DXVAVideoDecodeAccelerator::WaitForOutputBuffer(int32_t picture_buffer_id, | |
| 1321 int count) { | |
| 1322 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 1323 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id); | |
| 1324 if (it == output_picture_buffers_.end()) | |
| 1325 return; | |
| 1326 | |
| 1327 DXVAPictureBuffer* picture_buffer = it->second.get(); | |
| 1328 | |
| 1329 DCHECK(!picture_buffer->available()); | |
| 1330 DCHECK(picture_buffer->waiting_to_reuse()); | |
| 1331 | |
| 1332 gfx::GLFence* fence = picture_buffer->reuse_fence(); | |
| 1333 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(), | |
| 1334 "Failed to make context current", | |
| 1335 PLATFORM_FAILURE, ); | |
| 1336 if (count <= kMaxIterationsForANGLEReuseFlush && !fence->HasCompleted()) { | |
| 1337 main_thread_task_runner_->PostDelayedTask( | |
| 1338 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::WaitForOutputBuffer, | |
| 1339 weak_this_factory_.GetWeakPtr(), | |
| 1340 picture_buffer_id, count + 1), | |
| 1341 base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs)); | |
| 1342 return; | |
| 1343 } | |
| 1344 RETURN_AND_NOTIFY_ON_FAILURE(picture_buffer->ReusePictureBuffer(), | |
| 1345 "Failed to reuse picture buffer", | |
| 1346 PLATFORM_FAILURE, ); | |
| 1347 | |
| 1348 ProcessPendingSamples(); | |
| 1349 if (pending_flush_) { | |
| 1350 decoder_thread_task_runner_->PostTask( | |
| 1351 FROM_HERE, | |
| 1352 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal, | |
| 1353 base::Unretained(this))); | |
| 1354 } | |
| 1355 } | |
| 1356 | |
| 1357 void DXVAVideoDecodeAccelerator::Flush() { | |
| 1358 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 1359 | |
| 1360 DVLOG(1) << "DXVAVideoDecodeAccelerator::Flush"; | |
| 1361 | |
| 1362 State state = GetState(); | |
| 1363 RETURN_AND_NOTIFY_ON_FAILURE((state == kNormal || state == kStopped), | |
| 1364 "Unexpected decoder state: " << state, ILLEGAL_STATE,); | |
| 1365 | |
| 1366 SetState(kFlushing); | |
| 1367 | |
| 1368 pending_flush_ = true; | |
| 1369 | |
| 1370 decoder_thread_task_runner_->PostTask( | |
| 1371 FROM_HERE, | |
| 1372 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal, | |
| 1373 base::Unretained(this))); | |
| 1374 } | |
| 1375 | |
| 1376 void DXVAVideoDecodeAccelerator::Reset() { | |
| 1377 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 1378 | |
| 1379 DVLOG(1) << "DXVAVideoDecodeAccelerator::Reset"; | |
| 1380 | |
| 1381 State state = GetState(); | |
| 1382 RETURN_AND_NOTIFY_ON_FAILURE((state == kNormal || state == kStopped), | |
| 1383 "Reset: invalid state: " << state, ILLEGAL_STATE,); | |
| 1384 | |
| 1385 decoder_thread_.Stop(); | |
| 1386 | |
| 1387 SetState(kResetting); | |
| 1388 | |
| 1389 // If we have pending output frames waiting for display then we drop those | |
| 1390 // frames and set the corresponding picture buffer as available. | |
| 1391 PendingOutputSamples::iterator index; | |
| 1392 for (index = pending_output_samples_.begin(); | |
| 1393 index != pending_output_samples_.end(); | |
| 1394 ++index) { | |
| 1395 if (index->picture_buffer_id != -1) { | |
| 1396 OutputBuffers::iterator it = output_picture_buffers_.find( | |
| 1397 index->picture_buffer_id); | |
| 1398 if (it != output_picture_buffers_.end()) { | |
| 1399 DXVAPictureBuffer* picture_buffer = it->second.get(); | |
| 1400 picture_buffer->ReusePictureBuffer(); | |
| 1401 } | |
| 1402 } | |
| 1403 } | |
| 1404 | |
| 1405 pending_output_samples_.clear(); | |
| 1406 | |
| 1407 NotifyInputBuffersDropped(); | |
| 1408 | |
| 1409 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0), | |
| 1410 "Reset: Failed to send message.", PLATFORM_FAILURE,); | |
| 1411 | |
| 1412 main_thread_task_runner_->PostTask( | |
| 1413 FROM_HERE, | |
| 1414 base::Bind(&DXVAVideoDecodeAccelerator::NotifyResetDone, | |
| 1415 weak_this_factory_.GetWeakPtr())); | |
| 1416 | |
| 1417 StartDecoderThread(); | |
| 1418 SetState(kNormal); | |
| 1419 } | |
| 1420 | |
| 1421 void DXVAVideoDecodeAccelerator::Destroy() { | |
| 1422 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 1423 Invalidate(); | |
| 1424 delete this; | |
| 1425 } | |
| 1426 | |
| 1427 bool DXVAVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread( | |
| 1428 const base::WeakPtr<Client>& decode_client, | |
| 1429 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) { | |
| 1430 return false; | |
| 1431 } | |
| 1432 | |
| 1433 GLenum DXVAVideoDecodeAccelerator::GetSurfaceInternalFormat() const { | |
| 1434 return GL_BGRA_EXT; | |
| 1435 } | |
| 1436 | |
| 1437 // static | |
| 1438 media::VideoDecodeAccelerator::SupportedProfiles | |
| 1439 DXVAVideoDecodeAccelerator::GetSupportedProfiles() { | |
| 1440 TRACE_EVENT0("gpu,startup", | |
| 1441 "DXVAVideoDecodeAccelerator::GetSupportedProfiles"); | |
| 1442 | |
| 1443 // TODO(henryhsu): Need to ensure the profiles are actually supported. | |
| 1444 SupportedProfiles profiles; | |
| 1445 for (const auto& supported_profile : kSupportedProfiles) { | |
| 1446 std::pair<int, int> min_resolution = GetMinResolution(supported_profile); | |
| 1447 std::pair<int, int> max_resolution = GetMaxResolution(supported_profile); | |
| 1448 | |
| 1449 SupportedProfile profile; | |
| 1450 profile.profile = supported_profile; | |
| 1451 profile.min_resolution.SetSize(min_resolution.first, min_resolution.second); | |
| 1452 profile.max_resolution.SetSize(max_resolution.first, max_resolution.second); | |
| 1453 profiles.push_back(profile); | |
| 1454 } | |
| 1455 return profiles; | |
| 1456 } | |
| 1457 | |
| 1458 // static | |
| 1459 void DXVAVideoDecodeAccelerator::PreSandboxInitialization() { | |
| 1460 ::LoadLibrary(L"MFPlat.dll"); | |
| 1461 ::LoadLibrary(L"msmpeg2vdec.dll"); | |
| 1462 ::LoadLibrary(L"mf.dll"); | |
| 1463 ::LoadLibrary(L"dxva2.dll"); | |
| 1464 | |
| 1465 if (base::win::GetVersion() > base::win::VERSION_WIN7) { | |
| 1466 LoadLibrary(L"msvproc.dll"); | |
| 1467 } else { | |
| 1468 #if defined(ENABLE_DX11_FOR_WIN7) | |
| 1469 LoadLibrary(L"mshtmlmedia.dll"); | |
| 1470 #endif | |
| 1471 } | |
| 1472 } | |
| 1473 | |
| 1474 // static | |
| 1475 std::pair<int, int> DXVAVideoDecodeAccelerator::GetMinResolution( | |
| 1476 media::VideoCodecProfile profile) { | |
| 1477 TRACE_EVENT0("gpu,startup", | |
| 1478 "DXVAVideoDecodeAccelerator::GetMinResolution"); | |
| 1479 std::pair<int, int> min_resolution; | |
| 1480 if (profile >= media::H264PROFILE_BASELINE && | |
| 1481 profile <= media::H264PROFILE_HIGH) { | |
| 1482 // Windows Media Foundation H.264 decoding does not support decoding videos | |
| 1483 // with any dimension smaller than 48 pixels: | |
| 1484 // http://msdn.microsoft.com/en-us/library/windows/desktop/dd797815 | |
| 1485 min_resolution = std::make_pair(48, 48); | |
| 1486 } else { | |
| 1487 // TODO(ananta) | |
| 1488 // Detect this properly for VP8/VP9 profiles. | |
| 1489 min_resolution = std::make_pair(16, 16); | |
| 1490 } | |
| 1491 return min_resolution; | |
| 1492 } | |
| 1493 | |
| 1494 // static | |
| 1495 std::pair<int, int> DXVAVideoDecodeAccelerator::GetMaxResolution( | |
| 1496 const media::VideoCodecProfile profile) { | |
| 1497 TRACE_EVENT0("gpu,startup", | |
| 1498 "DXVAVideoDecodeAccelerator::GetMaxResolution"); | |
| 1499 std::pair<int, int> max_resolution; | |
| 1500 if (profile >= media::H264PROFILE_BASELINE && | |
| 1501 profile <= media::H264PROFILE_HIGH) { | |
| 1502 max_resolution = GetMaxH264Resolution(); | |
| 1503 } else { | |
| 1504 // TODO(ananta) | |
| 1505 // Detect this properly for VP8/VP9 profiles. | |
| 1506 max_resolution = std::make_pair(4096, 2160); | |
| 1507 } | |
| 1508 return max_resolution; | |
| 1509 } | |
| 1510 | |
| 1511 std::pair<int, int> DXVAVideoDecodeAccelerator::GetMaxH264Resolution() { | |
| 1512 TRACE_EVENT0("gpu,startup", | |
| 1513 "DXVAVideoDecodeAccelerator::GetMaxH264Resolution"); | |
| 1514 // The H.264 resolution detection operation is expensive. This static flag | |
| 1515 // allows us to run the detection once. | |
| 1516 static bool resolution_detected = false; | |
| 1517 // Use 1088 to account for 16x16 macroblocks. | |
| 1518 static std::pair<int, int> max_resolution = std::make_pair(1920, 1088); | |
| 1519 if (resolution_detected) | |
| 1520 return max_resolution; | |
| 1521 | |
| 1522 resolution_detected = true; | |
| 1523 | |
| 1524 // On Windows 7 the maximum resolution supported by media foundation is | |
| 1525 // 1920 x 1088. | |
| 1526 if (base::win::GetVersion() == base::win::VERSION_WIN7) | |
| 1527 return max_resolution; | |
| 1528 | |
| 1529 // To detect if a driver supports the desired resolutions, we try and create | |
| 1530 // a DXVA decoder instance for that resolution and profile. If that succeeds | |
| 1531 // we assume that the driver supports H/W H.264 decoding for that resolution. | |
| 1532 HRESULT hr = E_FAIL; | |
| 1533 base::win::ScopedComPtr<ID3D11Device> device; | |
| 1534 | |
| 1535 { | |
| 1536 TRACE_EVENT0("gpu,startup", | |
| 1537 "GetMaxH264Resolution. QueryDeviceObjectFromANGLE"); | |
| 1538 | |
| 1539 device = QueryDeviceObjectFromANGLE<ID3D11Device>(EGL_D3D11_DEVICE_ANGLE); | |
| 1540 if (!device.get()) | |
| 1541 return max_resolution; | |
| 1542 } | |
| 1543 | |
| 1544 base::win::ScopedComPtr<ID3D11VideoDevice> video_device; | |
| 1545 hr = device.QueryInterface(IID_ID3D11VideoDevice, | |
| 1546 video_device.ReceiveVoid()); | |
| 1547 if (FAILED(hr)) | |
| 1548 return max_resolution; | |
| 1549 | |
| 1550 GUID decoder_guid = {}; | |
| 1551 | |
| 1552 { | |
| 1553 TRACE_EVENT0("gpu,startup", | |
| 1554 "GetMaxH264Resolution. H.264 guid search begin"); | |
| 1555 // Enumerate supported video profiles and look for the H264 profile. | |
| 1556 bool found = false; | |
| 1557 UINT profile_count = video_device->GetVideoDecoderProfileCount(); | |
| 1558 for (UINT profile_idx = 0; profile_idx < profile_count; profile_idx++) { | |
| 1559 GUID profile_id = {}; | |
| 1560 hr = video_device->GetVideoDecoderProfile(profile_idx, &profile_id); | |
| 1561 if (SUCCEEDED(hr) && | |
| 1562 (profile_id == DXVA2_ModeH264_E || | |
| 1563 profile_id == DXVA2_Intel_ModeH264_E)) { | |
| 1564 decoder_guid = profile_id; | |
| 1565 found = true; | |
| 1566 break; | |
| 1567 } | |
| 1568 } | |
| 1569 if (!found) | |
| 1570 return max_resolution; | |
| 1571 } | |
| 1572 | |
| 1573 // Legacy AMD drivers with UVD3 or earlier and some Intel GPU's crash while | |
| 1574 // creating surfaces larger than 1920 x 1088. | |
| 1575 if (IsLegacyGPU(device.get())) | |
| 1576 return max_resolution; | |
| 1577 | |
| 1578 // We look for the following resolutions in the driver. | |
| 1579 // TODO(ananta) | |
| 1580 // Look into whether this list needs to be expanded. | |
| 1581 static std::pair<int, int> resolution_array[] = { | |
| 1582 // Use 1088 to account for 16x16 macroblocks. | |
| 1583 std::make_pair(1920, 1088), | |
| 1584 std::make_pair(2560, 1440), | |
| 1585 std::make_pair(3840, 2160), | |
| 1586 std::make_pair(4096, 2160), | |
| 1587 std::make_pair(4096, 2304), | |
| 1588 }; | |
| 1589 | |
| 1590 { | |
| 1591 TRACE_EVENT0("gpu,startup", | |
| 1592 "GetMaxH264Resolution. Resolution search begin"); | |
| 1593 | |
| 1594 for (size_t res_idx = 0; res_idx < arraysize(resolution_array); | |
| 1595 res_idx++) { | |
| 1596 D3D11_VIDEO_DECODER_DESC desc = {}; | |
| 1597 desc.Guid = decoder_guid; | |
| 1598 desc.SampleWidth = resolution_array[res_idx].first; | |
| 1599 desc.SampleHeight = resolution_array[res_idx].second; | |
| 1600 desc.OutputFormat = DXGI_FORMAT_NV12; | |
| 1601 UINT config_count = 0; | |
| 1602 hr = video_device->GetVideoDecoderConfigCount(&desc, &config_count); | |
| 1603 if (FAILED(hr) || config_count == 0) | |
| 1604 return max_resolution; | |
| 1605 | |
| 1606 D3D11_VIDEO_DECODER_CONFIG config = {}; | |
| 1607 hr = video_device->GetVideoDecoderConfig(&desc, 0, &config); | |
| 1608 if (FAILED(hr)) | |
| 1609 return max_resolution; | |
| 1610 | |
| 1611 base::win::ScopedComPtr<ID3D11VideoDecoder> video_decoder; | |
| 1612 hr = video_device->CreateVideoDecoder(&desc, &config, | |
| 1613 video_decoder.Receive()); | |
| 1614 if (!video_decoder.get()) | |
| 1615 return max_resolution; | |
| 1616 | |
| 1617 max_resolution = resolution_array[res_idx]; | |
| 1618 } | |
| 1619 } | |
| 1620 return max_resolution; | |
| 1621 } | |
| 1622 | |
| 1623 // static | |
| 1624 bool DXVAVideoDecodeAccelerator::IsLegacyGPU(ID3D11Device* device) { | |
| 1625 static const int kAMDGPUId1 = 0x1002; | |
| 1626 static const int kAMDGPUId2 = 0x1022; | |
| 1627 static const int kIntelGPU = 0x8086; | |
| 1628 | |
| 1629 static bool legacy_gpu = true; | |
| 1630 // This flag ensures that we determine the GPU type once. | |
| 1631 static bool legacy_gpu_determined = false; | |
| 1632 | |
| 1633 if (legacy_gpu_determined) | |
| 1634 return legacy_gpu; | |
| 1635 | |
| 1636 legacy_gpu_determined = true; | |
| 1637 | |
| 1638 base::win::ScopedComPtr<IDXGIDevice> dxgi_device; | |
| 1639 HRESULT hr = dxgi_device.QueryFrom(device); | |
| 1640 if (FAILED(hr)) | |
| 1641 return legacy_gpu; | |
| 1642 | |
| 1643 base::win::ScopedComPtr<IDXGIAdapter> adapter; | |
| 1644 hr = dxgi_device->GetAdapter(adapter.Receive()); | |
| 1645 if (FAILED(hr)) | |
| 1646 return legacy_gpu; | |
| 1647 | |
| 1648 DXGI_ADAPTER_DESC adapter_desc = {}; | |
| 1649 hr = adapter->GetDesc(&adapter_desc); | |
| 1650 if (FAILED(hr)) | |
| 1651 return legacy_gpu; | |
| 1652 | |
| 1653 // We check if the device is an Intel or an AMD device and whether it is in | |
| 1654 // the global list defined by the g_AMDUVD3GPUList and g_IntelLegacyGPUList | |
| 1655 // arrays above. If yes then the device is treated as a legacy device. | |
| 1656 if ((adapter_desc.VendorId == kAMDGPUId1) || | |
| 1657 adapter_desc.VendorId == kAMDGPUId2) { | |
| 1658 { | |
| 1659 TRACE_EVENT0("gpu,startup", | |
| 1660 "DXVAVideoDecodeAccelerator::IsLegacyGPU. AMD check"); | |
| 1661 for (size_t i = 0; i < arraysize(g_AMDUVD3GPUList); i++) { | |
| 1662 if (adapter_desc.DeviceId == g_AMDUVD3GPUList[i]) | |
| 1663 return legacy_gpu; | |
| 1664 } | |
| 1665 } | |
| 1666 } else if (adapter_desc.VendorId == kIntelGPU) { | |
| 1667 { | |
| 1668 TRACE_EVENT0("gpu,startup", | |
| 1669 "DXVAVideoDecodeAccelerator::IsLegacyGPU. Intel check"); | |
| 1670 for (size_t i = 0; i < arraysize(g_IntelLegacyGPUList); i++) { | |
| 1671 if (adapter_desc.DeviceId == g_IntelLegacyGPUList[i]) | |
| 1672 return legacy_gpu; | |
| 1673 } | |
| 1674 } | |
| 1675 } | |
| 1676 legacy_gpu = false; | |
| 1677 return legacy_gpu; | |
| 1678 } | |
| 1679 | |
| 1680 bool DXVAVideoDecodeAccelerator::InitDecoder(media::VideoCodecProfile profile) { | |
| 1681 HMODULE decoder_dll = NULL; | |
| 1682 | |
| 1683 CLSID clsid = {}; | |
| 1684 | |
| 1685 // Profile must fall within the valid range for one of the supported codecs. | |
| 1686 if (profile >= media::H264PROFILE_MIN && profile <= media::H264PROFILE_MAX) { | |
| 1687 // We mimic the steps CoCreateInstance uses to instantiate the object. This | |
| 1688 // was previously done because it failed inside the sandbox, and now is done | |
| 1689 // as a more minimal approach to avoid other side-effects CCI might have (as | |
| 1690 // we are still in a reduced sandbox). | |
| 1691 decoder_dll = ::GetModuleHandle(L"msmpeg2vdec.dll"); | |
| 1692 RETURN_ON_FAILURE(decoder_dll, | |
| 1693 "msmpeg2vdec.dll required for decoding is not loaded", | |
| 1694 false); | |
| 1695 | |
| 1696 // Check version of DLL, version 6.1.7140 is blacklisted due to high crash | |
| 1697 // rates in browsers loading that DLL. If that is the version installed we | |
| 1698 // fall back to software decoding. See crbug/403440. | |
| 1699 std::unique_ptr<FileVersionInfo> version_info( | |
| 1700 FileVersionInfo::CreateFileVersionInfoForModule(decoder_dll)); | |
| 1701 RETURN_ON_FAILURE(version_info, | |
| 1702 "unable to get version of msmpeg2vdec.dll", | |
| 1703 false); | |
| 1704 base::string16 file_version = version_info->file_version(); | |
| 1705 RETURN_ON_FAILURE(file_version.find(L"6.1.7140") == base::string16::npos, | |
| 1706 "blacklisted version of msmpeg2vdec.dll 6.1.7140", | |
| 1707 false); | |
| 1708 codec_ = media::kCodecH264; | |
| 1709 clsid = __uuidof(CMSH264DecoderMFT); | |
| 1710 } else if (enable_accelerated_vpx_decode_ && | |
| 1711 (profile == media::VP8PROFILE_ANY || | |
| 1712 profile == media::VP9PROFILE_PROFILE0 || | |
| 1713 profile == media::VP9PROFILE_PROFILE1 || | |
| 1714 profile == media::VP9PROFILE_PROFILE2 || | |
| 1715 profile == media::VP9PROFILE_PROFILE3)) { | |
| 1716 int program_files_key = base::DIR_PROGRAM_FILES; | |
| 1717 if (base::win::OSInfo::GetInstance()->wow64_status() == | |
| 1718 base::win::OSInfo::WOW64_ENABLED) { | |
| 1719 program_files_key = base::DIR_PROGRAM_FILES6432; | |
| 1720 } | |
| 1721 | |
| 1722 base::FilePath dll_path; | |
| 1723 RETURN_ON_FAILURE(PathService::Get(program_files_key, &dll_path), | |
| 1724 "failed to get path for Program Files", false); | |
| 1725 | |
| 1726 dll_path = dll_path.Append(kVPXDecoderDLLPath); | |
| 1727 if (profile == media::VP8PROFILE_ANY) { | |
| 1728 codec_ = media::kCodecVP8; | |
| 1729 dll_path = dll_path.Append(kVP8DecoderDLLName); | |
| 1730 clsid = CLSID_WebmMfVp8Dec; | |
| 1731 } else { | |
| 1732 codec_ = media::kCodecVP9; | |
| 1733 dll_path = dll_path.Append(kVP9DecoderDLLName); | |
| 1734 clsid = CLSID_WebmMfVp9Dec; | |
| 1735 } | |
| 1736 decoder_dll = ::LoadLibraryEx(dll_path.value().data(), NULL, | |
| 1737 LOAD_WITH_ALTERED_SEARCH_PATH); | |
| 1738 RETURN_ON_FAILURE(decoder_dll, "vpx decoder dll is not loaded", false); | |
| 1739 } else { | |
| 1740 RETURN_ON_FAILURE(false, "Unsupported codec.", false); | |
| 1741 } | |
| 1742 | |
| 1743 HRESULT hr = CreateCOMObjectFromDll(decoder_dll, | |
| 1744 clsid, | |
| 1745 __uuidof(IMFTransform), | |
| 1746 decoder_.ReceiveVoid()); | |
| 1747 RETURN_ON_HR_FAILURE(hr, "Failed to create decoder instance", false); | |
| 1748 | |
| 1749 RETURN_ON_FAILURE(CheckDecoderDxvaSupport(), | |
| 1750 "Failed to check decoder DXVA support", false); | |
| 1751 | |
| 1752 ULONG_PTR device_manager_to_use = NULL; | |
| 1753 if (use_dx11_) { | |
| 1754 CHECK(create_dxgi_device_manager_); | |
| 1755 RETURN_AND_NOTIFY_ON_FAILURE(CreateDX11DevManager(), | |
| 1756 "Failed to initialize DX11 device and manager", | |
| 1757 PLATFORM_FAILURE, | |
| 1758 false); | |
| 1759 device_manager_to_use = reinterpret_cast<ULONG_PTR>( | |
| 1760 d3d11_device_manager_.get()); | |
| 1761 } else { | |
| 1762 RETURN_AND_NOTIFY_ON_FAILURE(CreateD3DDevManager(), | |
| 1763 "Failed to initialize D3D device and manager", | |
| 1764 PLATFORM_FAILURE, | |
| 1765 false); | |
| 1766 device_manager_to_use = reinterpret_cast<ULONG_PTR>(device_manager_.get()); | |
| 1767 } | |
| 1768 | |
| 1769 hr = decoder_->ProcessMessage( | |
| 1770 MFT_MESSAGE_SET_D3D_MANAGER, | |
| 1771 device_manager_to_use); | |
| 1772 if (use_dx11_) { | |
| 1773 RETURN_ON_HR_FAILURE(hr, "Failed to pass DX11 manager to decoder", false); | |
| 1774 } else { | |
| 1775 RETURN_ON_HR_FAILURE(hr, "Failed to pass D3D manager to decoder", false); | |
| 1776 } | |
| 1777 | |
| 1778 EGLDisplay egl_display = gfx::GLSurfaceEGL::GetHardwareDisplay(); | |
| 1779 | |
| 1780 EGLint config_attribs[] = { | |
| 1781 EGL_BUFFER_SIZE, 32, | |
| 1782 EGL_RED_SIZE, 8, | |
| 1783 EGL_GREEN_SIZE, 8, | |
| 1784 EGL_BLUE_SIZE, 8, | |
| 1785 EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, | |
| 1786 EGL_ALPHA_SIZE, 0, | |
| 1787 EGL_NONE | |
| 1788 }; | |
| 1789 | |
| 1790 EGLint num_configs; | |
| 1791 | |
| 1792 if (!eglChooseConfig( | |
| 1793 egl_display, | |
| 1794 config_attribs, | |
| 1795 &egl_config_, | |
| 1796 1, | |
| 1797 &num_configs)) | |
| 1798 return false; | |
| 1799 | |
| 1800 return SetDecoderMediaTypes(); | |
| 1801 } | |
| 1802 | |
| 1803 bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() { | |
| 1804 base::win::ScopedComPtr<IMFAttributes> attributes; | |
| 1805 HRESULT hr = decoder_->GetAttributes(attributes.Receive()); | |
| 1806 RETURN_ON_HR_FAILURE(hr, "Failed to get decoder attributes", false); | |
| 1807 | |
| 1808 UINT32 dxva = 0; | |
| 1809 hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva); | |
| 1810 RETURN_ON_HR_FAILURE(hr, "Failed to check if decoder supports DXVA", false); | |
| 1811 | |
| 1812 if (codec_ == media::kCodecH264) { | |
| 1813 hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE); | |
| 1814 RETURN_ON_HR_FAILURE(hr, "Failed to enable DXVA H/W decoding", false); | |
| 1815 } | |
| 1816 | |
| 1817 hr = attributes->SetUINT32(CODECAPI_AVLowLatencyMode, TRUE); | |
| 1818 if (SUCCEEDED(hr)) { | |
| 1819 DVLOG(1) << "Successfully set Low latency mode on decoder."; | |
| 1820 } else { | |
| 1821 DVLOG(1) << "Failed to set Low latency mode on decoder. Error: " << hr; | |
| 1822 } | |
| 1823 | |
| 1824 auto gl_context = get_gl_context_cb_.Run(); | |
| 1825 RETURN_ON_FAILURE(gl_context, "Couldn't get GL context", false); | |
| 1826 | |
| 1827 // The decoder should use DX11 iff | |
| 1828 // 1. The underlying H/W decoder supports it. | |
| 1829 // 2. We have a pointer to the MFCreateDXGIDeviceManager function needed for | |
| 1830 // this. This should always be true for Windows 8+. | |
| 1831 // 3. ANGLE is using DX11. | |
| 1832 if (create_dxgi_device_manager_ && | |
| 1833 (gl_context->GetGLRenderer().find("Direct3D11") != std::string::npos)) { | |
| 1834 UINT32 dx11_aware = 0; | |
| 1835 attributes->GetUINT32(MF_SA_D3D11_AWARE, &dx11_aware); | |
| 1836 use_dx11_ = !!dx11_aware; | |
| 1837 } | |
| 1838 | |
| 1839 use_keyed_mutex_ = | |
| 1840 use_dx11_ && gfx::GLSurfaceEGL::HasEGLExtension("EGL_ANGLE_keyed_mutex"); | |
| 1841 | |
| 1842 return true; | |
| 1843 } | |
| 1844 | |
| 1845 bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() { | |
| 1846 RETURN_ON_FAILURE(SetDecoderInputMediaType(), | |
| 1847 "Failed to set decoder input media type", false); | |
| 1848 return SetDecoderOutputMediaType(MFVideoFormat_NV12); | |
| 1849 } | |
| 1850 | |
| 1851 bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() { | |
| 1852 base::win::ScopedComPtr<IMFMediaType> media_type; | |
| 1853 HRESULT hr = MFCreateMediaType(media_type.Receive()); | |
| 1854 RETURN_ON_HR_FAILURE(hr, "MFCreateMediaType failed", false); | |
| 1855 | |
| 1856 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); | |
| 1857 RETURN_ON_HR_FAILURE(hr, "Failed to set major input type", false); | |
| 1858 | |
| 1859 if (codec_ == media::kCodecH264) { | |
| 1860 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264); | |
| 1861 } else if (codec_ == media::kCodecVP8) { | |
| 1862 hr = media_type->SetGUID(MF_MT_SUBTYPE, MEDIASUBTYPE_VP80); | |
| 1863 } else if (codec_ == media::kCodecVP9) { | |
| 1864 hr = media_type->SetGUID(MF_MT_SUBTYPE, MEDIASUBTYPE_VP90); | |
| 1865 } else { | |
| 1866 NOTREACHED(); | |
| 1867 RETURN_ON_FAILURE(false, "Unsupported codec on input media type.", false); | |
| 1868 } | |
| 1869 RETURN_ON_HR_FAILURE(hr, "Failed to set subtype", false); | |
| 1870 | |
| 1871 // Not sure about this. msdn recommends setting this value on the input | |
| 1872 // media type. | |
| 1873 hr = media_type->SetUINT32(MF_MT_INTERLACE_MODE, | |
| 1874 MFVideoInterlace_MixedInterlaceOrProgressive); | |
| 1875 RETURN_ON_HR_FAILURE(hr, "Failed to set interlace mode", false); | |
| 1876 | |
| 1877 hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags | |
| 1878 RETURN_ON_HR_FAILURE(hr, "Failed to set decoder input type", false); | |
| 1879 return true; | |
| 1880 } | |
| 1881 | |
| 1882 bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType( | |
| 1883 const GUID& subtype) { | |
| 1884 return SetTransformOutputType(decoder_.get(), subtype, 0, 0); | |
| 1885 } | |
| 1886 | |
| 1887 bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg, | |
| 1888 int32_t param) { | |
| 1889 HRESULT hr = decoder_->ProcessMessage(msg, param); | |
| 1890 return SUCCEEDED(hr); | |
| 1891 } | |
| 1892 | |
| 1893 // Gets the minimum buffer sizes for input and output samples. The MFT will not | |
| 1894 // allocate buffer for input nor output, so we have to do it ourselves and make | |
| 1895 // sure they're the correct size. We only provide decoding if DXVA is enabled. | |
| 1896 bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() { | |
| 1897 HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_); | |
| 1898 RETURN_ON_HR_FAILURE(hr, "Failed to get input stream info", false); | |
| 1899 | |
| 1900 hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_); | |
| 1901 RETURN_ON_HR_FAILURE(hr, "Failed to get decoder output stream info", false); | |
| 1902 | |
| 1903 DVLOG(1) << "Input stream info: "; | |
| 1904 DVLOG(1) << "Max latency: " << input_stream_info_.hnsMaxLatency; | |
| 1905 if (codec_ == media::kCodecH264) { | |
| 1906 // There should be three flags, one for requiring a whole frame be in a | |
| 1907 // single sample, one for requiring there be one buffer only in a single | |
| 1908 // sample, and one that specifies a fixed sample size. (as in cbSize) | |
| 1909 CHECK_EQ(input_stream_info_.dwFlags, 0x7u); | |
| 1910 } | |
| 1911 | |
| 1912 DVLOG(1) << "Min buffer size: " << input_stream_info_.cbSize; | |
| 1913 DVLOG(1) << "Max lookahead: " << input_stream_info_.cbMaxLookahead; | |
| 1914 DVLOG(1) << "Alignment: " << input_stream_info_.cbAlignment; | |
| 1915 | |
| 1916 DVLOG(1) << "Output stream info: "; | |
| 1917 // The flags here should be the same and mean the same thing, except when | |
| 1918 // DXVA is enabled, there is an extra 0x100 flag meaning decoder will | |
| 1919 // allocate its own sample. | |
| 1920 DVLOG(1) << "Flags: " | |
| 1921 << std::hex << std::showbase << output_stream_info_.dwFlags; | |
| 1922 if (codec_ == media::kCodecH264) { | |
| 1923 CHECK_EQ(output_stream_info_.dwFlags, 0x107u); | |
| 1924 } | |
| 1925 DVLOG(1) << "Min buffer size: " << output_stream_info_.cbSize; | |
| 1926 DVLOG(1) << "Alignment: " << output_stream_info_.cbAlignment; | |
| 1927 return true; | |
| 1928 } | |
| 1929 | |
| 1930 void DXVAVideoDecodeAccelerator::DoDecode() { | |
| 1931 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
| 1932 // This function is also called from FlushInternal in a loop which could | |
| 1933 // result in the state transitioning to kStopped due to no decoded output. | |
| 1934 State state = GetState(); | |
| 1935 RETURN_AND_NOTIFY_ON_FAILURE( | |
| 1936 (state == kNormal || state == kFlushing || state == kStopped), | |
| 1937 "DoDecode: not in normal/flushing/stopped state", ILLEGAL_STATE,); | |
| 1938 | |
| 1939 MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0}; | |
| 1940 DWORD status = 0; | |
| 1941 | |
| 1942 HRESULT hr = decoder_->ProcessOutput(0, // No flags | |
| 1943 1, // # of out streams to pull from | |
| 1944 &output_data_buffer, | |
| 1945 &status); | |
| 1946 IMFCollection* events = output_data_buffer.pEvents; | |
| 1947 if (events != NULL) { | |
| 1948 DVLOG(1) << "Got events from ProcessOuput, but discarding"; | |
| 1949 events->Release(); | |
| 1950 } | |
| 1951 if (FAILED(hr)) { | |
| 1952 // A stream change needs further ProcessInput calls to get back decoder | |
| 1953 // output which is why we need to set the state to stopped. | |
| 1954 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { | |
| 1955 if (!SetDecoderOutputMediaType(MFVideoFormat_NV12)) { | |
| 1956 // Decoder didn't let us set NV12 output format. Not sure as to why | |
| 1957 // this can happen. Give up in disgust. | |
| 1958 NOTREACHED() << "Failed to set decoder output media type to NV12"; | |
| 1959 SetState(kStopped); | |
| 1960 } else { | |
| 1961 DVLOG(1) << "Received output format change from the decoder." | |
| 1962 " Recursively invoking DoDecode"; | |
| 1963 DoDecode(); | |
| 1964 } | |
| 1965 return; | |
| 1966 } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { | |
| 1967 // No more output from the decoder. Stop playback. | |
| 1968 SetState(kStopped); | |
| 1969 return; | |
| 1970 } else { | |
| 1971 NOTREACHED() << "Unhandled error in DoDecode()"; | |
| 1972 return; | |
| 1973 } | |
| 1974 } | |
| 1975 TRACE_EVENT_ASYNC_END0("gpu", "DXVAVideoDecodeAccelerator.Decoding", this); | |
| 1976 | |
| 1977 TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode", | |
| 1978 inputs_before_decode_); | |
| 1979 | |
| 1980 inputs_before_decode_ = 0; | |
| 1981 | |
| 1982 RETURN_AND_NOTIFY_ON_FAILURE(ProcessOutputSample(output_data_buffer.pSample), | |
| 1983 "Failed to process output sample.", PLATFORM_FAILURE,); | |
| 1984 } | |
| 1985 | |
| 1986 bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) { | |
| 1987 RETURN_ON_FAILURE(sample, "Decode succeeded with NULL output sample", false); | |
| 1988 | |
| 1989 LONGLONG input_buffer_id = 0; | |
| 1990 RETURN_ON_HR_FAILURE(sample->GetSampleTime(&input_buffer_id), | |
| 1991 "Failed to get input buffer id associated with sample", | |
| 1992 false); | |
| 1993 | |
| 1994 { | |
| 1995 base::AutoLock lock(decoder_lock_); | |
| 1996 DCHECK(pending_output_samples_.empty()); | |
| 1997 pending_output_samples_.push_back( | |
| 1998 PendingSampleInfo(input_buffer_id, sample)); | |
| 1999 } | |
| 2000 | |
| 2001 if (pictures_requested_) { | |
| 2002 DVLOG(1) << "Waiting for picture slots from the client."; | |
| 2003 main_thread_task_runner_->PostTask( | |
| 2004 FROM_HERE, | |
| 2005 base::Bind(&DXVAVideoDecodeAccelerator::ProcessPendingSamples, | |
| 2006 weak_this_factory_.GetWeakPtr())); | |
| 2007 return true; | |
| 2008 } | |
| 2009 | |
| 2010 int width = 0; | |
| 2011 int height = 0; | |
| 2012 if (!GetVideoFrameDimensions(sample, &width, &height)) { | |
| 2013 RETURN_ON_FAILURE(false, "Failed to get D3D surface from output sample", | |
| 2014 false); | |
| 2015 } | |
| 2016 | |
| 2017 // Go ahead and request picture buffers. | |
| 2018 main_thread_task_runner_->PostTask( | |
| 2019 FROM_HERE, | |
| 2020 base::Bind(&DXVAVideoDecodeAccelerator::RequestPictureBuffers, | |
| 2021 weak_this_factory_.GetWeakPtr(), | |
| 2022 width, | |
| 2023 height)); | |
| 2024 | |
| 2025 pictures_requested_ = true; | |
| 2026 return true; | |
| 2027 } | |
| 2028 | |
| 2029 void DXVAVideoDecodeAccelerator::ProcessPendingSamples() { | |
| 2030 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 2031 | |
| 2032 if (!output_picture_buffers_.size()) | |
| 2033 return; | |
| 2034 | |
| 2035 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(), | |
| 2036 "Failed to make context current", | |
| 2037 PLATFORM_FAILURE, ); | |
| 2038 | |
| 2039 OutputBuffers::iterator index; | |
| 2040 | |
| 2041 for (index = output_picture_buffers_.begin(); | |
| 2042 index != output_picture_buffers_.end() && | |
| 2043 OutputSamplesPresent(); | |
| 2044 ++index) { | |
| 2045 if (index->second->available()) { | |
| 2046 PendingSampleInfo* pending_sample = NULL; | |
| 2047 { | |
| 2048 base::AutoLock lock(decoder_lock_); | |
| 2049 PendingSampleInfo& sample_info = pending_output_samples_.front(); | |
| 2050 if (sample_info.picture_buffer_id != -1) | |
| 2051 continue; | |
| 2052 pending_sample = &sample_info; | |
| 2053 } | |
| 2054 | |
| 2055 int width = 0; | |
| 2056 int height = 0; | |
| 2057 if (!GetVideoFrameDimensions(pending_sample->output_sample.get(), | |
| 2058 &width, &height)) { | |
| 2059 RETURN_AND_NOTIFY_ON_FAILURE(false, | |
| 2060 "Failed to get D3D surface from output sample", PLATFORM_FAILURE,); | |
| 2061 } | |
| 2062 | |
| 2063 if (width != index->second->size().width() || | |
| 2064 height != index->second->size().height()) { | |
| 2065 HandleResolutionChanged(width, height); | |
| 2066 return; | |
| 2067 } | |
| 2068 | |
| 2069 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | |
| 2070 HRESULT hr = pending_sample->output_sample->GetBufferByIndex( | |
| 2071 0, output_buffer.Receive()); | |
| 2072 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
| 2073 "Failed to get buffer from output sample", PLATFORM_FAILURE,); | |
| 2074 | |
| 2075 base::win::ScopedComPtr<IDirect3DSurface9> surface; | |
| 2076 base::win::ScopedComPtr<ID3D11Texture2D> d3d11_texture; | |
| 2077 | |
| 2078 if (use_dx11_) { | |
| 2079 base::win::ScopedComPtr<IMFDXGIBuffer> dxgi_buffer; | |
| 2080 hr = dxgi_buffer.QueryFrom(output_buffer.get()); | |
| 2081 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
| 2082 "Failed to get DXGIBuffer from output sample", PLATFORM_FAILURE,); | |
| 2083 hr = dxgi_buffer->GetResource( | |
| 2084 __uuidof(ID3D11Texture2D), | |
| 2085 reinterpret_cast<void**>(d3d11_texture.Receive())); | |
| 2086 } else { | |
| 2087 hr = MFGetService(output_buffer.get(), MR_BUFFER_SERVICE, | |
| 2088 IID_PPV_ARGS(surface.Receive())); | |
| 2089 } | |
| 2090 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
| 2091 "Failed to get surface from output sample", PLATFORM_FAILURE,); | |
| 2092 | |
| 2093 pending_sample->picture_buffer_id = index->second->id(); | |
| 2094 | |
| 2095 RETURN_AND_NOTIFY_ON_FAILURE( | |
| 2096 index->second->CopyOutputSampleDataToPictureBuffer( | |
| 2097 this, | |
| 2098 surface.get(), | |
| 2099 d3d11_texture.get(), | |
| 2100 pending_sample->input_buffer_id), | |
| 2101 "Failed to copy output sample", PLATFORM_FAILURE,); | |
| 2102 | |
| 2103 index->second->set_available(false); | |
| 2104 } | |
| 2105 } | |
| 2106 } | |
| 2107 | |
| 2108 void DXVAVideoDecodeAccelerator::StopOnError( | |
| 2109 media::VideoDecodeAccelerator::Error error) { | |
| 2110 if (!main_thread_task_runner_->BelongsToCurrentThread()) { | |
| 2111 main_thread_task_runner_->PostTask( | |
| 2112 FROM_HERE, | |
| 2113 base::Bind(&DXVAVideoDecodeAccelerator::StopOnError, | |
| 2114 weak_this_factory_.GetWeakPtr(), | |
| 2115 error)); | |
| 2116 return; | |
| 2117 } | |
| 2118 | |
| 2119 if (client_) | |
| 2120 client_->NotifyError(error); | |
| 2121 client_ = NULL; | |
| 2122 | |
| 2123 if (GetState() != kUninitialized) { | |
| 2124 Invalidate(); | |
| 2125 } | |
| 2126 } | |
| 2127 | |
| 2128 void DXVAVideoDecodeAccelerator::Invalidate() { | |
| 2129 if (GetState() == kUninitialized) | |
| 2130 return; | |
| 2131 | |
| 2132 // Best effort to make the GL context current. | |
| 2133 make_context_current_cb_.Run(); | |
| 2134 | |
| 2135 decoder_thread_.Stop(); | |
| 2136 weak_this_factory_.InvalidateWeakPtrs(); | |
| 2137 output_picture_buffers_.clear(); | |
| 2138 stale_output_picture_buffers_.clear(); | |
| 2139 pending_output_samples_.clear(); | |
| 2140 // We want to continue processing pending input after detecting a config | |
| 2141 // change. | |
| 2142 if (GetState() != kConfigChange) | |
| 2143 pending_input_buffers_.clear(); | |
| 2144 decoder_.Release(); | |
| 2145 pictures_requested_ = false; | |
| 2146 | |
| 2147 config_change_detector_.reset(); | |
| 2148 | |
| 2149 if (use_dx11_) { | |
| 2150 if (video_format_converter_mft_.get()) { | |
| 2151 video_format_converter_mft_->ProcessMessage( | |
| 2152 MFT_MESSAGE_NOTIFY_END_STREAMING, 0); | |
| 2153 video_format_converter_mft_.Release(); | |
| 2154 } | |
| 2155 d3d11_device_context_.Release(); | |
| 2156 d3d11_device_.Release(); | |
| 2157 d3d11_device_manager_.Release(); | |
| 2158 d3d11_query_.Release(); | |
| 2159 dx11_video_format_converter_media_type_needs_init_ = true; | |
| 2160 multi_threaded_.Release(); | |
| 2161 } else { | |
| 2162 d3d9_.Release(); | |
| 2163 d3d9_device_ex_.Release(); | |
| 2164 device_manager_.Release(); | |
| 2165 query_.Release(); | |
| 2166 } | |
| 2167 | |
| 2168 SetState(kUninitialized); | |
| 2169 } | |
| 2170 | |
| 2171 void DXVAVideoDecodeAccelerator::NotifyInputBufferRead(int input_buffer_id) { | |
| 2172 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 2173 if (client_) | |
| 2174 client_->NotifyEndOfBitstreamBuffer(input_buffer_id); | |
| 2175 } | |
| 2176 | |
| 2177 void DXVAVideoDecodeAccelerator::NotifyFlushDone() { | |
| 2178 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 2179 if (client_ && pending_flush_) { | |
| 2180 pending_flush_ = false; | |
| 2181 { | |
| 2182 base::AutoLock lock(decoder_lock_); | |
| 2183 sent_drain_message_ = false; | |
| 2184 } | |
| 2185 | |
| 2186 client_->NotifyFlushDone(); | |
| 2187 } | |
| 2188 } | |
| 2189 | |
| 2190 void DXVAVideoDecodeAccelerator::NotifyResetDone() { | |
| 2191 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 2192 if (client_) | |
| 2193 client_->NotifyResetDone(); | |
| 2194 } | |
| 2195 | |
| 2196 void DXVAVideoDecodeAccelerator::RequestPictureBuffers(int width, int height) { | |
| 2197 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 2198 // This task could execute after the decoder has been torn down. | |
| 2199 if (GetState() != kUninitialized && client_) { | |
| 2200 client_->ProvidePictureBuffers(kNumPictureBuffers, 1, | |
| 2201 gfx::Size(width, height), GL_TEXTURE_2D); | |
| 2202 } | |
| 2203 } | |
| 2204 | |
| 2205 void DXVAVideoDecodeAccelerator::NotifyPictureReady( | |
| 2206 int picture_buffer_id, | |
| 2207 int input_buffer_id) { | |
| 2208 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 2209 // This task could execute after the decoder has been torn down. | |
| 2210 if (GetState() != kUninitialized && client_) { | |
| 2211 // TODO(henryhsu): Use correct visible size instead of (0, 0). We can't use | |
| 2212 // coded size here so use (0, 0) intentionally to have the client choose. | |
| 2213 media::Picture picture(picture_buffer_id, input_buffer_id, | |
| 2214 gfx::Rect(0, 0), false); | |
| 2215 client_->PictureReady(picture); | |
| 2216 } | |
| 2217 } | |
| 2218 | |
| 2219 void DXVAVideoDecodeAccelerator::NotifyInputBuffersDropped() { | |
| 2220 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 2221 if (!client_) | |
| 2222 return; | |
| 2223 | |
| 2224 for (PendingInputs::iterator it = pending_input_buffers_.begin(); | |
| 2225 it != pending_input_buffers_.end(); ++it) { | |
| 2226 LONGLONG input_buffer_id = 0; | |
| 2227 RETURN_ON_HR_FAILURE((*it)->GetSampleTime(&input_buffer_id), | |
| 2228 "Failed to get buffer id associated with sample",); | |
| 2229 client_->NotifyEndOfBitstreamBuffer(input_buffer_id); | |
| 2230 } | |
| 2231 pending_input_buffers_.clear(); | |
| 2232 } | |
| 2233 | |
| 2234 void DXVAVideoDecodeAccelerator::DecodePendingInputBuffers() { | |
| 2235 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
| 2236 State state = GetState(); | |
| 2237 RETURN_AND_NOTIFY_ON_FAILURE((state != kUninitialized), | |
| 2238 "Invalid state: " << state, ILLEGAL_STATE,); | |
| 2239 | |
| 2240 if (pending_input_buffers_.empty() || OutputSamplesPresent()) | |
| 2241 return; | |
| 2242 | |
| 2243 PendingInputs pending_input_buffers_copy; | |
| 2244 std::swap(pending_input_buffers_, pending_input_buffers_copy); | |
| 2245 | |
| 2246 for (PendingInputs::iterator it = pending_input_buffers_copy.begin(); | |
| 2247 it != pending_input_buffers_copy.end(); ++it) { | |
| 2248 DecodeInternal(*it); | |
| 2249 } | |
| 2250 } | |
| 2251 | |
| 2252 void DXVAVideoDecodeAccelerator::FlushInternal() { | |
| 2253 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
| 2254 | |
| 2255 // We allow only one output frame to be present at any given time. If we have | |
| 2256 // an output frame, then we cannot complete the flush at this time. | |
| 2257 if (OutputSamplesPresent()) | |
| 2258 return; | |
| 2259 | |
| 2260 // First drain the pending input because once the drain message is sent below, | |
| 2261 // the decoder will ignore further input until it's drained. | |
| 2262 if (!pending_input_buffers_.empty()) { | |
| 2263 decoder_thread_task_runner_->PostTask( | |
| 2264 FROM_HERE, | |
| 2265 base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers, | |
| 2266 base::Unretained(this))); | |
| 2267 decoder_thread_task_runner_->PostTask( | |
| 2268 FROM_HERE, | |
| 2269 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal, | |
| 2270 base::Unretained(this))); | |
| 2271 return; | |
| 2272 } | |
| 2273 | |
| 2274 { | |
| 2275 base::AutoLock lock(decoder_lock_); | |
| 2276 if (!sent_drain_message_) { | |
| 2277 RETURN_AND_NOTIFY_ON_FAILURE(SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0), | |
| 2278 "Failed to send drain message", | |
| 2279 PLATFORM_FAILURE,); | |
| 2280 sent_drain_message_ = true; | |
| 2281 } | |
| 2282 } | |
| 2283 | |
| 2284 // Attempt to retrieve an output frame from the decoder. If we have one, | |
| 2285 // return and proceed when the output frame is processed. If we don't have a | |
| 2286 // frame then we are done. | |
| 2287 DoDecode(); | |
| 2288 if (OutputSamplesPresent()) | |
| 2289 return; | |
| 2290 | |
| 2291 SetState(kFlushing); | |
| 2292 | |
| 2293 main_thread_task_runner_->PostTask( | |
| 2294 FROM_HERE, | |
| 2295 base::Bind(&DXVAVideoDecodeAccelerator::NotifyFlushDone, | |
| 2296 weak_this_factory_.GetWeakPtr())); | |
| 2297 | |
| 2298 SetState(kNormal); | |
| 2299 } | |
| 2300 | |
| 2301 void DXVAVideoDecodeAccelerator::DecodeInternal( | |
| 2302 const base::win::ScopedComPtr<IMFSample>& sample) { | |
| 2303 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
| 2304 | |
| 2305 if (GetState() == kUninitialized) | |
| 2306 return; | |
| 2307 | |
| 2308 if (OutputSamplesPresent() || !pending_input_buffers_.empty()) { | |
| 2309 pending_input_buffers_.push_back(sample); | |
| 2310 return; | |
| 2311 } | |
| 2312 | |
| 2313 // Check if the resolution, bit rate, etc changed in the stream. If yes we | |
| 2314 // reinitialize the decoder to ensure that the stream decodes correctly. | |
| 2315 bool config_changed = false; | |
| 2316 | |
| 2317 HRESULT hr = CheckConfigChanged(sample.get(), &config_changed); | |
| 2318 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to check video stream config", | |
| 2319 PLATFORM_FAILURE,); | |
| 2320 | |
| 2321 if (config_changed) { | |
| 2322 pending_input_buffers_.push_back(sample); | |
| 2323 main_thread_task_runner_->PostTask( | |
| 2324 FROM_HERE, | |
| 2325 base::Bind(&DXVAVideoDecodeAccelerator::ConfigChanged, | |
| 2326 weak_this_factory_.GetWeakPtr(), | |
| 2327 config_)); | |
| 2328 return; | |
| 2329 } | |
| 2330 | |
| 2331 if (!inputs_before_decode_) { | |
| 2332 TRACE_EVENT_ASYNC_BEGIN0("gpu", "DXVAVideoDecodeAccelerator.Decoding", | |
| 2333 this); | |
| 2334 } | |
| 2335 inputs_before_decode_++; | |
| 2336 | |
| 2337 hr = decoder_->ProcessInput(0, sample.get(), 0); | |
| 2338 // As per msdn if the decoder returns MF_E_NOTACCEPTING then it means that it | |
| 2339 // has enough data to produce one or more output samples. In this case the | |
| 2340 // recommended options are to | |
| 2341 // 1. Generate new output by calling IMFTransform::ProcessOutput until it | |
| 2342 // returns MF_E_TRANSFORM_NEED_MORE_INPUT. | |
| 2343 // 2. Flush the input data | |
| 2344 // We implement the first option, i.e to retrieve the output sample and then | |
| 2345 // process the input again. Failure in either of these steps is treated as a | |
| 2346 // decoder failure. | |
| 2347 if (hr == MF_E_NOTACCEPTING) { | |
| 2348 DoDecode(); | |
| 2349 // If the DoDecode call resulted in an output frame then we should not | |
| 2350 // process any more input until that frame is copied to the target surface. | |
| 2351 if (!OutputSamplesPresent()) { | |
| 2352 State state = GetState(); | |
| 2353 RETURN_AND_NOTIFY_ON_FAILURE((state == kStopped || state == kNormal || | |
| 2354 state == kFlushing), | |
| 2355 "Failed to process output. Unexpected decoder state: " << state, | |
| 2356 PLATFORM_FAILURE,); | |
| 2357 hr = decoder_->ProcessInput(0, sample.get(), 0); | |
| 2358 } | |
| 2359 // If we continue to get the MF_E_NOTACCEPTING error we do the following:- | |
| 2360 // 1. Add the input sample to the pending queue. | |
| 2361 // 2. If we don't have any output samples we post the | |
| 2362 // DecodePendingInputBuffers task to process the pending input samples. | |
| 2363 // If we have an output sample then the above task is posted when the | |
| 2364 // output samples are sent to the client. | |
| 2365 // This is because we only support 1 pending output sample at any | |
| 2366 // given time due to the limitation with the Microsoft media foundation | |
| 2367 // decoder where it recycles the output Decoder surfaces. | |
| 2368 if (hr == MF_E_NOTACCEPTING) { | |
| 2369 pending_input_buffers_.push_back(sample); | |
| 2370 decoder_thread_task_runner_->PostTask( | |
| 2371 FROM_HERE, | |
| 2372 base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers, | |
| 2373 base::Unretained(this))); | |
| 2374 return; | |
| 2375 } | |
| 2376 } | |
| 2377 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to process input sample", | |
| 2378 PLATFORM_FAILURE,); | |
| 2379 | |
| 2380 DoDecode(); | |
| 2381 | |
| 2382 State state = GetState(); | |
| 2383 RETURN_AND_NOTIFY_ON_FAILURE((state == kStopped || state == kNormal || | |
| 2384 state == kFlushing), | |
| 2385 "Failed to process output. Unexpected decoder state: " << state, | |
| 2386 ILLEGAL_STATE,); | |
| 2387 | |
| 2388 LONGLONG input_buffer_id = 0; | |
| 2389 RETURN_ON_HR_FAILURE(sample->GetSampleTime(&input_buffer_id), | |
| 2390 "Failed to get input buffer id associated with sample",); | |
| 2391 // The Microsoft Media foundation decoder internally buffers up to 30 frames | |
| 2392 // before returning a decoded frame. We need to inform the client that this | |
| 2393 // input buffer is processed as it may stop sending us further input. | |
| 2394 // Note: This may break clients which expect every input buffer to be | |
| 2395 // associated with a decoded output buffer. | |
| 2396 // TODO(ananta) | |
| 2397 // Do some more investigation into whether it is possible to get the MFT | |
| 2398 // decoder to emit an output packet for every input packet. | |
| 2399 // http://code.google.com/p/chromium/issues/detail?id=108121 | |
| 2400 // http://code.google.com/p/chromium/issues/detail?id=150925 | |
| 2401 main_thread_task_runner_->PostTask( | |
| 2402 FROM_HERE, | |
| 2403 base::Bind(&DXVAVideoDecodeAccelerator::NotifyInputBufferRead, | |
| 2404 weak_this_factory_.GetWeakPtr(), | |
| 2405 input_buffer_id)); | |
| 2406 } | |
| 2407 | |
| 2408 void DXVAVideoDecodeAccelerator::HandleResolutionChanged(int width, | |
| 2409 int height) { | |
| 2410 dx11_video_format_converter_media_type_needs_init_ = true; | |
| 2411 | |
| 2412 main_thread_task_runner_->PostTask( | |
| 2413 FROM_HERE, | |
| 2414 base::Bind(&DXVAVideoDecodeAccelerator::DismissStaleBuffers, | |
| 2415 weak_this_factory_.GetWeakPtr(), false)); | |
| 2416 | |
| 2417 main_thread_task_runner_->PostTask( | |
| 2418 FROM_HERE, | |
| 2419 base::Bind(&DXVAVideoDecodeAccelerator::RequestPictureBuffers, | |
| 2420 weak_this_factory_.GetWeakPtr(), | |
| 2421 width, | |
| 2422 height)); | |
| 2423 } | |
| 2424 | |
| 2425 void DXVAVideoDecodeAccelerator::DismissStaleBuffers(bool force) { | |
| 2426 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(), | |
| 2427 "Failed to make context current", | |
| 2428 PLATFORM_FAILURE, ); | |
| 2429 | |
| 2430 OutputBuffers::iterator index; | |
| 2431 | |
| 2432 for (index = output_picture_buffers_.begin(); | |
| 2433 index != output_picture_buffers_.end(); | |
| 2434 ++index) { | |
| 2435 if (force || index->second->available()) { | |
| 2436 DVLOG(1) << "Dismissing picture id: " << index->second->id(); | |
| 2437 client_->DismissPictureBuffer(index->second->id()); | |
| 2438 } else { | |
| 2439 // Move to |stale_output_picture_buffers_| for deferred deletion. | |
| 2440 stale_output_picture_buffers_.insert( | |
| 2441 std::make_pair(index->first, index->second)); | |
| 2442 } | |
| 2443 } | |
| 2444 | |
| 2445 output_picture_buffers_.clear(); | |
| 2446 } | |
| 2447 | |
| 2448 void DXVAVideoDecodeAccelerator::DeferredDismissStaleBuffer( | |
| 2449 int32_t picture_buffer_id) { | |
| 2450 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(), | |
| 2451 "Failed to make context current", | |
| 2452 PLATFORM_FAILURE, ); | |
| 2453 | |
| 2454 OutputBuffers::iterator it = stale_output_picture_buffers_.find( | |
| 2455 picture_buffer_id); | |
| 2456 DCHECK(it != stale_output_picture_buffers_.end()); | |
| 2457 DVLOG(1) << "Dismissing picture id: " << it->second->id(); | |
| 2458 client_->DismissPictureBuffer(it->second->id()); | |
| 2459 stale_output_picture_buffers_.erase(it); | |
| 2460 } | |
| 2461 | |
| 2462 DXVAVideoDecodeAccelerator::State | |
| 2463 DXVAVideoDecodeAccelerator::GetState() { | |
| 2464 static_assert(sizeof(State) == sizeof(long), "mismatched type sizes"); | |
| 2465 State state = static_cast<State>( | |
| 2466 InterlockedAdd(reinterpret_cast<volatile long*>(&state_), 0)); | |
| 2467 return state; | |
| 2468 } | |
| 2469 | |
| 2470 void DXVAVideoDecodeAccelerator::SetState(State new_state) { | |
| 2471 if (!main_thread_task_runner_->BelongsToCurrentThread()) { | |
| 2472 main_thread_task_runner_->PostTask( | |
| 2473 FROM_HERE, | |
| 2474 base::Bind(&DXVAVideoDecodeAccelerator::SetState, | |
| 2475 weak_this_factory_.GetWeakPtr(), | |
| 2476 new_state)); | |
| 2477 return; | |
| 2478 } | |
| 2479 | |
| 2480 static_assert(sizeof(State) == sizeof(long), "mismatched type sizes"); | |
| 2481 ::InterlockedExchange(reinterpret_cast<volatile long*>(&state_), | |
| 2482 new_state); | |
| 2483 DCHECK_EQ(state_, new_state); | |
| 2484 } | |
| 2485 | |
| 2486 void DXVAVideoDecodeAccelerator::StartDecoderThread() { | |
| 2487 decoder_thread_.init_com_with_mta(false); | |
| 2488 decoder_thread_.Start(); | |
| 2489 decoder_thread_task_runner_ = decoder_thread_.task_runner(); | |
| 2490 } | |
| 2491 | |
| 2492 bool DXVAVideoDecodeAccelerator::OutputSamplesPresent() { | |
| 2493 base::AutoLock lock(decoder_lock_); | |
| 2494 return !pending_output_samples_.empty(); | |
| 2495 } | |
| 2496 | |
| 2497 void DXVAVideoDecodeAccelerator::CopySurface(IDirect3DSurface9* src_surface, | |
| 2498 IDirect3DSurface9* dest_surface, | |
| 2499 int picture_buffer_id, | |
| 2500 int input_buffer_id) { | |
| 2501 if (!decoder_thread_task_runner_->BelongsToCurrentThread()) { | |
| 2502 decoder_thread_task_runner_->PostTask( | |
| 2503 FROM_HERE, | |
| 2504 base::Bind(&DXVAVideoDecodeAccelerator::CopySurface, | |
| 2505 base::Unretained(this), | |
| 2506 src_surface, | |
| 2507 dest_surface, | |
| 2508 picture_buffer_id, | |
| 2509 input_buffer_id)); | |
| 2510 return; | |
| 2511 } | |
| 2512 | |
| 2513 HRESULT hr = d3d9_device_ex_->StretchRect(src_surface, NULL, dest_surface, | |
| 2514 NULL, D3DTEXF_NONE); | |
| 2515 RETURN_ON_HR_FAILURE(hr, "Colorspace conversion via StretchRect failed",); | |
| 2516 | |
| 2517 // Ideally, this should be done immediately before the draw call that uses | |
| 2518 // the texture. Flush it once here though. | |
| 2519 hr = query_->Issue(D3DISSUE_END); | |
| 2520 RETURN_ON_HR_FAILURE(hr, "Failed to issue END",); | |
| 2521 | |
| 2522 // If we are sharing the ANGLE device we don't need to wait for the Flush to | |
| 2523 // complete. | |
| 2524 if (using_angle_device_) { | |
| 2525 main_thread_task_runner_->PostTask( | |
| 2526 FROM_HERE, | |
| 2527 base::Bind(&DXVAVideoDecodeAccelerator::CopySurfaceComplete, | |
| 2528 weak_this_factory_.GetWeakPtr(), | |
| 2529 src_surface, | |
| 2530 dest_surface, | |
| 2531 picture_buffer_id, | |
| 2532 input_buffer_id)); | |
| 2533 return; | |
| 2534 } | |
| 2535 | |
| 2536 // Flush the decoder device to ensure that the decoded frame is copied to the | |
| 2537 // target surface. | |
| 2538 decoder_thread_task_runner_->PostDelayedTask( | |
| 2539 FROM_HERE, | |
| 2540 base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder, | |
| 2541 base::Unretained(this), 0, src_surface, dest_surface, | |
| 2542 picture_buffer_id, input_buffer_id), | |
| 2543 base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs)); | |
| 2544 } | |
| 2545 | |
| 2546 void DXVAVideoDecodeAccelerator::CopySurfaceComplete( | |
| 2547 IDirect3DSurface9* src_surface, | |
| 2548 IDirect3DSurface9* dest_surface, | |
| 2549 int picture_buffer_id, | |
| 2550 int input_buffer_id) { | |
| 2551 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 2552 | |
| 2553 // The output buffers may have changed in the following scenarios:- | |
| 2554 // 1. A resolution change. | |
| 2555 // 2. Decoder instance was destroyed. | |
| 2556 // Ignore copy surface notifications for such buffers. | |
| 2557 // copy surface notifications for such buffers. | |
| 2558 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id); | |
| 2559 if (it == output_picture_buffers_.end()) | |
| 2560 return; | |
| 2561 | |
| 2562 // If the picture buffer is marked as available it probably means that there | |
| 2563 // was a Reset operation which dropped the output frame. | |
| 2564 DXVAPictureBuffer* picture_buffer = it->second.get(); | |
| 2565 if (picture_buffer->available()) | |
| 2566 return; | |
| 2567 | |
| 2568 RETURN_AND_NOTIFY_ON_FAILURE(make_context_current_cb_.Run(), | |
| 2569 "Failed to make context current", | |
| 2570 PLATFORM_FAILURE, ); | |
| 2571 | |
| 2572 DCHECK(!output_picture_buffers_.empty()); | |
| 2573 | |
| 2574 bool result = picture_buffer->CopySurfaceComplete(src_surface, dest_surface); | |
| 2575 RETURN_AND_NOTIFY_ON_FAILURE(result, "Failed to complete copying surface", | |
| 2576 PLATFORM_FAILURE, ); | |
| 2577 | |
| 2578 NotifyPictureReady(picture_buffer->id(), input_buffer_id); | |
| 2579 | |
| 2580 { | |
| 2581 base::AutoLock lock(decoder_lock_); | |
| 2582 if (!pending_output_samples_.empty()) | |
| 2583 pending_output_samples_.pop_front(); | |
| 2584 } | |
| 2585 | |
| 2586 if (pending_flush_) { | |
| 2587 decoder_thread_task_runner_->PostTask( | |
| 2588 FROM_HERE, | |
| 2589 base::Bind(&DXVAVideoDecodeAccelerator::FlushInternal, | |
| 2590 base::Unretained(this))); | |
| 2591 return; | |
| 2592 } | |
| 2593 decoder_thread_task_runner_->PostTask( | |
| 2594 FROM_HERE, | |
| 2595 base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers, | |
| 2596 base::Unretained(this))); | |
| 2597 } | |
| 2598 | |
| 2599 void DXVAVideoDecodeAccelerator::CopyTexture( | |
| 2600 ID3D11Texture2D* src_texture, | |
| 2601 ID3D11Texture2D* dest_texture, | |
| 2602 base::win::ScopedComPtr<IDXGIKeyedMutex> dest_keyed_mutex, | |
| 2603 uint64_t keyed_mutex_value, | |
| 2604 IMFSample* video_frame, | |
| 2605 int picture_buffer_id, | |
| 2606 int input_buffer_id) { | |
| 2607 HRESULT hr = E_FAIL; | |
| 2608 | |
| 2609 DCHECK(use_dx11_); | |
| 2610 | |
| 2611 if (!decoder_thread_task_runner_->BelongsToCurrentThread()) { | |
| 2612 // The media foundation H.264 decoder outputs YUV12 textures which we | |
| 2613 // cannot copy into ANGLE as they expect ARGB textures. In D3D land | |
| 2614 // the StretchRect API in the IDirect3DDevice9Ex interface did the color | |
| 2615 // space conversion for us. Sadly in DX11 land the API does not provide | |
| 2616 // a straightforward way to do this. | |
| 2617 // We use the video processor MFT. | |
| 2618 // https://msdn.microsoft.com/en-us/library/hh162913(v=vs.85).aspx | |
| 2619 // This object implements a media foundation transform (IMFTransform) | |
| 2620 // which follows the same contract as the decoder. The color space | |
| 2621 // conversion as per msdn is done in the GPU. | |
| 2622 | |
| 2623 D3D11_TEXTURE2D_DESC source_desc; | |
| 2624 src_texture->GetDesc(&source_desc); | |
| 2625 | |
| 2626 // Set up the input and output types for the video processor MFT. | |
| 2627 if (!InitializeDX11VideoFormatConverterMediaType(source_desc.Width, | |
| 2628 source_desc.Height)) { | |
| 2629 RETURN_AND_NOTIFY_ON_FAILURE( | |
| 2630 false, "Failed to initialize media types for convesion.", | |
| 2631 PLATFORM_FAILURE,); | |
| 2632 } | |
| 2633 | |
| 2634 // The input to the video processor is the output sample. | |
| 2635 base::win::ScopedComPtr<IMFSample> input_sample_for_conversion; | |
| 2636 { | |
| 2637 base::AutoLock lock(decoder_lock_); | |
| 2638 PendingSampleInfo& sample_info = pending_output_samples_.front(); | |
| 2639 input_sample_for_conversion = sample_info.output_sample; | |
| 2640 } | |
| 2641 | |
| 2642 decoder_thread_task_runner_->PostTask( | |
| 2643 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::CopyTexture, | |
| 2644 base::Unretained(this), src_texture, dest_texture, | |
| 2645 dest_keyed_mutex, keyed_mutex_value, | |
| 2646 input_sample_for_conversion.Detach(), | |
| 2647 picture_buffer_id, input_buffer_id)); | |
| 2648 return; | |
| 2649 } | |
| 2650 | |
| 2651 DCHECK(video_frame); | |
| 2652 | |
| 2653 base::win::ScopedComPtr<IMFSample> input_sample; | |
| 2654 input_sample.Attach(video_frame); | |
| 2655 | |
| 2656 DCHECK(video_format_converter_mft_.get()); | |
| 2657 | |
| 2658 if (dest_keyed_mutex) { | |
| 2659 HRESULT hr = | |
| 2660 dest_keyed_mutex->AcquireSync(keyed_mutex_value, kAcquireSyncWaitMs); | |
| 2661 RETURN_AND_NOTIFY_ON_FAILURE( | |
| 2662 hr == S_OK, "D3D11 failed to acquire keyed mutex for texture.", | |
| 2663 PLATFORM_FAILURE, ); | |
| 2664 } | |
| 2665 // The video processor MFT requires output samples to be allocated by the | |
| 2666 // caller. We create a sample with a buffer backed with the ID3D11Texture2D | |
| 2667 // interface exposed by ANGLE. This works nicely as this ensures that the | |
| 2668 // video processor coverts the color space of the output frame and copies | |
| 2669 // the result into the ANGLE texture. | |
| 2670 base::win::ScopedComPtr<IMFSample> output_sample; | |
| 2671 hr = MFCreateSample(output_sample.Receive()); | |
| 2672 if (FAILED(hr)) { | |
| 2673 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
| 2674 "Failed to create output sample.", PLATFORM_FAILURE,); | |
| 2675 } | |
| 2676 | |
| 2677 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | |
| 2678 hr = MFCreateDXGISurfaceBuffer( | |
| 2679 __uuidof(ID3D11Texture2D), dest_texture, 0, FALSE, | |
| 2680 output_buffer.Receive()); | |
| 2681 if (FAILED(hr)) { | |
| 2682 base::debug::Alias(&hr); | |
| 2683 // TODO(ananta) | |
| 2684 // Remove this CHECK when the change to use DX11 for H/W decoding | |
| 2685 // stablizes. | |
| 2686 CHECK(false); | |
| 2687 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
| 2688 "Failed to create output sample.", PLATFORM_FAILURE,); | |
| 2689 } | |
| 2690 | |
| 2691 output_sample->AddBuffer(output_buffer.get()); | |
| 2692 | |
| 2693 hr = video_format_converter_mft_->ProcessInput(0, video_frame, 0); | |
| 2694 if (FAILED(hr)) { | |
| 2695 DCHECK(false); | |
| 2696 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
| 2697 "Failed to convert output sample format.", PLATFORM_FAILURE,); | |
| 2698 } | |
| 2699 | |
| 2700 DWORD status = 0; | |
| 2701 MFT_OUTPUT_DATA_BUFFER format_converter_output = {}; | |
| 2702 format_converter_output.pSample = output_sample.get(); | |
| 2703 hr = video_format_converter_mft_->ProcessOutput( | |
| 2704 0, // No flags | |
| 2705 1, // # of out streams to pull from | |
| 2706 &format_converter_output, | |
| 2707 &status); | |
| 2708 | |
| 2709 if (FAILED(hr)) { | |
| 2710 base::debug::Alias(&hr); | |
| 2711 // TODO(ananta) | |
| 2712 // Remove this CHECK when the change to use DX11 for H/W decoding | |
| 2713 // stablizes. | |
| 2714 CHECK(false); | |
| 2715 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
| 2716 "Failed to convert output sample format.", PLATFORM_FAILURE,); | |
| 2717 } | |
| 2718 | |
| 2719 if (dest_keyed_mutex) { | |
| 2720 HRESULT hr = dest_keyed_mutex->ReleaseSync(keyed_mutex_value + 1); | |
| 2721 RETURN_AND_NOTIFY_ON_FAILURE(hr == S_OK, "Failed to release keyed mutex.", | |
| 2722 PLATFORM_FAILURE, ); | |
| 2723 | |
| 2724 main_thread_task_runner_->PostTask( | |
| 2725 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::CopySurfaceComplete, | |
| 2726 weak_this_factory_.GetWeakPtr(), nullptr, nullptr, | |
| 2727 picture_buffer_id, input_buffer_id)); | |
| 2728 } else { | |
| 2729 d3d11_device_context_->Flush(); | |
| 2730 d3d11_device_context_->End(d3d11_query_.get()); | |
| 2731 | |
| 2732 decoder_thread_task_runner_->PostDelayedTask( | |
| 2733 FROM_HERE, base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder, | |
| 2734 base::Unretained(this), 0, | |
| 2735 reinterpret_cast<IDirect3DSurface9*>(NULL), | |
| 2736 reinterpret_cast<IDirect3DSurface9*>(NULL), | |
| 2737 picture_buffer_id, input_buffer_id), | |
| 2738 base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs)); | |
| 2739 } | |
| 2740 } | |
| 2741 | |
| 2742 void DXVAVideoDecodeAccelerator::FlushDecoder( | |
| 2743 int iterations, | |
| 2744 IDirect3DSurface9* src_surface, | |
| 2745 IDirect3DSurface9* dest_surface, | |
| 2746 int picture_buffer_id, | |
| 2747 int input_buffer_id) { | |
| 2748 DCHECK(decoder_thread_task_runner_->BelongsToCurrentThread()); | |
| 2749 | |
| 2750 // The DXVA decoder has its own device which it uses for decoding. ANGLE | |
| 2751 // has its own device which we don't have access to. | |
| 2752 // The above code attempts to copy the decoded picture into a surface | |
| 2753 // which is owned by ANGLE. As there are multiple devices involved in | |
| 2754 // this, the StretchRect call above is not synchronous. | |
| 2755 // We attempt to flush the batched operations to ensure that the picture is | |
| 2756 // copied to the surface owned by ANGLE. | |
| 2757 // We need to do this in a loop and call flush multiple times. | |
| 2758 // We have seen the GetData call for flushing the command buffer fail to | |
| 2759 // return success occassionally on multi core machines, leading to an | |
| 2760 // infinite loop. | |
| 2761 // Workaround is to have an upper limit of 4 on the number of iterations to | |
| 2762 // wait for the Flush to finish. | |
| 2763 | |
| 2764 HRESULT hr = E_FAIL; | |
| 2765 if (use_dx11_) { | |
| 2766 BOOL query_data = 0; | |
| 2767 hr = d3d11_device_context_->GetData(d3d11_query_.get(), &query_data, | |
| 2768 sizeof(BOOL), 0); | |
| 2769 if (FAILED(hr)) { | |
| 2770 base::debug::Alias(&hr); | |
| 2771 // TODO(ananta) | |
| 2772 // Remove this CHECK when the change to use DX11 for H/W decoding | |
| 2773 // stablizes. | |
| 2774 CHECK(false); | |
| 2775 } | |
| 2776 } else { | |
| 2777 hr = query_->GetData(NULL, 0, D3DGETDATA_FLUSH); | |
| 2778 } | |
| 2779 | |
| 2780 if ((hr == S_FALSE) && (++iterations < kMaxIterationsForD3DFlush)) { | |
| 2781 decoder_thread_task_runner_->PostDelayedTask( | |
| 2782 FROM_HERE, | |
| 2783 base::Bind(&DXVAVideoDecodeAccelerator::FlushDecoder, | |
| 2784 base::Unretained(this), iterations, src_surface, | |
| 2785 dest_surface, picture_buffer_id, input_buffer_id), | |
| 2786 base::TimeDelta::FromMilliseconds(kFlushDecoderSurfaceTimeoutMs)); | |
| 2787 return; | |
| 2788 } | |
| 2789 | |
| 2790 main_thread_task_runner_->PostTask( | |
| 2791 FROM_HERE, | |
| 2792 base::Bind(&DXVAVideoDecodeAccelerator::CopySurfaceComplete, | |
| 2793 weak_this_factory_.GetWeakPtr(), | |
| 2794 src_surface, | |
| 2795 dest_surface, | |
| 2796 picture_buffer_id, | |
| 2797 input_buffer_id)); | |
| 2798 } | |
| 2799 | |
| 2800 bool DXVAVideoDecodeAccelerator::InitializeDX11VideoFormatConverterMediaType( | |
| 2801 int width, int height) { | |
| 2802 if (!dx11_video_format_converter_media_type_needs_init_) | |
| 2803 return true; | |
| 2804 | |
| 2805 CHECK(video_format_converter_mft_.get()); | |
| 2806 | |
| 2807 HRESULT hr = video_format_converter_mft_->ProcessMessage( | |
| 2808 MFT_MESSAGE_SET_D3D_MANAGER, | |
| 2809 reinterpret_cast<ULONG_PTR>( | |
| 2810 d3d11_device_manager_.get())); | |
| 2811 | |
| 2812 if (FAILED(hr)) { | |
| 2813 base::debug::Alias(&hr); | |
| 2814 // TODO(ananta) | |
| 2815 // Remove this CHECK when the change to use DX11 for H/W decoding | |
| 2816 // stablizes. | |
| 2817 CHECK(false); | |
| 2818 } | |
| 2819 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, | |
| 2820 "Failed to initialize video format converter", PLATFORM_FAILURE, false); | |
| 2821 | |
| 2822 video_format_converter_mft_->ProcessMessage( | |
| 2823 MFT_MESSAGE_NOTIFY_END_STREAMING, 0); | |
| 2824 | |
| 2825 base::win::ScopedComPtr<IMFMediaType> media_type; | |
| 2826 hr = MFCreateMediaType(media_type.Receive()); | |
| 2827 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "MFCreateMediaType failed", | |
| 2828 PLATFORM_FAILURE, false); | |
| 2829 | |
| 2830 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); | |
| 2831 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set major input type", | |
| 2832 PLATFORM_FAILURE, false); | |
| 2833 | |
| 2834 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_NV12); | |
| 2835 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set input sub type", | |
| 2836 PLATFORM_FAILURE, false); | |
| 2837 | |
| 2838 hr = MFSetAttributeSize(media_type.get(), MF_MT_FRAME_SIZE, width, height); | |
| 2839 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set media type attributes", | |
| 2840 PLATFORM_FAILURE, false); | |
| 2841 | |
| 2842 hr = video_format_converter_mft_->SetInputType(0, media_type.get(), 0); | |
| 2843 if (FAILED(hr)) { | |
| 2844 base::debug::Alias(&hr); | |
| 2845 // TODO(ananta) | |
| 2846 // Remove this CHECK when the change to use DX11 for H/W decoding | |
| 2847 // stablizes. | |
| 2848 CHECK(false); | |
| 2849 } | |
| 2850 RETURN_AND_NOTIFY_ON_HR_FAILURE(hr, "Failed to set converter input type", | |
| 2851 PLATFORM_FAILURE, false); | |
| 2852 | |
| 2853 // It appears that we fail to set MFVideoFormat_ARGB32 as the output media | |
| 2854 // type in certain configurations. Try to fallback to MFVideoFormat_RGB32 | |
| 2855 // in such cases. If both fail, then bail. | |
| 2856 bool media_type_set = | |
| 2857 SetTransformOutputType(video_format_converter_mft_.get(), | |
| 2858 MFVideoFormat_ARGB32, | |
| 2859 width, | |
| 2860 height); | |
| 2861 if (!media_type_set) { | |
| 2862 media_type_set = | |
| 2863 SetTransformOutputType(video_format_converter_mft_.get(), | |
| 2864 MFVideoFormat_RGB32, | |
| 2865 width, | |
| 2866 height); | |
| 2867 } | |
| 2868 | |
| 2869 if (!media_type_set) { | |
| 2870 // Remove this once this stabilizes in the field. | |
| 2871 CHECK(false); | |
| 2872 LOG(ERROR) << "Failed to find a matching RGB output type in the converter"; | |
| 2873 return false; | |
| 2874 } | |
| 2875 | |
| 2876 dx11_video_format_converter_media_type_needs_init_ = false; | |
| 2877 return true; | |
| 2878 } | |
| 2879 | |
| 2880 bool DXVAVideoDecodeAccelerator::GetVideoFrameDimensions( | |
| 2881 IMFSample* sample, | |
| 2882 int* width, | |
| 2883 int* height) { | |
| 2884 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | |
| 2885 HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive()); | |
| 2886 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from output sample", false); | |
| 2887 | |
| 2888 if (use_dx11_) { | |
| 2889 base::win::ScopedComPtr<IMFDXGIBuffer> dxgi_buffer; | |
| 2890 base::win::ScopedComPtr<ID3D11Texture2D> d3d11_texture; | |
| 2891 hr = dxgi_buffer.QueryFrom(output_buffer.get()); | |
| 2892 RETURN_ON_HR_FAILURE(hr, "Failed to get DXGIBuffer from output sample", | |
| 2893 false); | |
| 2894 hr = dxgi_buffer->GetResource( | |
| 2895 __uuidof(ID3D11Texture2D), | |
| 2896 reinterpret_cast<void**>(d3d11_texture.Receive())); | |
| 2897 RETURN_ON_HR_FAILURE(hr, "Failed to get D3D11Texture from output buffer", | |
| 2898 false); | |
| 2899 D3D11_TEXTURE2D_DESC d3d11_texture_desc; | |
| 2900 d3d11_texture->GetDesc(&d3d11_texture_desc); | |
| 2901 *width = d3d11_texture_desc.Width; | |
| 2902 *height = d3d11_texture_desc.Height; | |
| 2903 } else { | |
| 2904 base::win::ScopedComPtr<IDirect3DSurface9> surface; | |
| 2905 hr = MFGetService(output_buffer.get(), MR_BUFFER_SERVICE, | |
| 2906 IID_PPV_ARGS(surface.Receive())); | |
| 2907 RETURN_ON_HR_FAILURE(hr, "Failed to get D3D surface from output sample", | |
| 2908 false); | |
| 2909 D3DSURFACE_DESC surface_desc; | |
| 2910 hr = surface->GetDesc(&surface_desc); | |
| 2911 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description", false); | |
| 2912 *width = surface_desc.Width; | |
| 2913 *height = surface_desc.Height; | |
| 2914 } | |
| 2915 return true; | |
| 2916 } | |
| 2917 | |
| 2918 bool DXVAVideoDecodeAccelerator::SetTransformOutputType( | |
| 2919 IMFTransform* transform, | |
| 2920 const GUID& output_type, | |
| 2921 int width, | |
| 2922 int height) { | |
| 2923 HRESULT hr = E_FAIL; | |
| 2924 base::win::ScopedComPtr<IMFMediaType> media_type; | |
| 2925 | |
| 2926 for (uint32_t i = 0; | |
| 2927 SUCCEEDED(transform->GetOutputAvailableType( | |
| 2928 0, i, media_type.Receive())); | |
| 2929 ++i) { | |
| 2930 GUID out_subtype = {0}; | |
| 2931 hr = media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype); | |
| 2932 RETURN_ON_HR_FAILURE(hr, "Failed to get output major type", false); | |
| 2933 | |
| 2934 if (out_subtype == output_type) { | |
| 2935 if (width && height) { | |
| 2936 hr = MFSetAttributeSize(media_type.get(), MF_MT_FRAME_SIZE, width, | |
| 2937 height); | |
| 2938 RETURN_ON_HR_FAILURE(hr, "Failed to set media type attributes", false); | |
| 2939 } | |
| 2940 hr = transform->SetOutputType(0, media_type.get(), 0); // No flags | |
| 2941 RETURN_ON_HR_FAILURE(hr, "Failed to set output type", false); | |
| 2942 return true; | |
| 2943 } | |
| 2944 media_type.Release(); | |
| 2945 } | |
| 2946 return false; | |
| 2947 } | |
| 2948 | |
| 2949 HRESULT DXVAVideoDecodeAccelerator::CheckConfigChanged( | |
| 2950 IMFSample* sample, bool* config_changed) { | |
| 2951 if (codec_ != media::kCodecH264) | |
| 2952 return S_FALSE; | |
| 2953 | |
| 2954 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
| 2955 HRESULT hr = sample->GetBufferByIndex(0, buffer.Receive()); | |
| 2956 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from input sample", hr); | |
| 2957 | |
| 2958 MediaBufferScopedPointer scoped_media_buffer(buffer.get()); | |
| 2959 | |
| 2960 if (!config_change_detector_->DetectConfig( | |
| 2961 scoped_media_buffer.get(), | |
| 2962 scoped_media_buffer.current_length())) { | |
| 2963 RETURN_ON_HR_FAILURE(E_FAIL, "Failed to detect H.264 stream config", | |
| 2964 E_FAIL); | |
| 2965 } | |
| 2966 *config_changed = config_change_detector_->config_changed(); | |
| 2967 return S_OK; | |
| 2968 } | |
| 2969 | |
| 2970 void DXVAVideoDecodeAccelerator::ConfigChanged( | |
| 2971 const Config& config) { | |
| 2972 DCHECK(main_thread_task_runner_->BelongsToCurrentThread()); | |
| 2973 | |
| 2974 SetState(kConfigChange); | |
| 2975 DismissStaleBuffers(true); | |
| 2976 Invalidate(); | |
| 2977 Initialize(config_, client_); | |
| 2978 decoder_thread_task_runner_->PostTask( | |
| 2979 FROM_HERE, | |
| 2980 base::Bind(&DXVAVideoDecodeAccelerator::DecodePendingInputBuffers, | |
| 2981 base::Unretained(this))); | |
| 2982 } | |
| 2983 | |
| 2984 } // namespace content | |
| OLD | NEW |