OLD | NEW |
| (Empty) |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include <libdrm/drm_fourcc.h> | |
6 #include <linux/videodev2.h> | |
7 #include <string.h> | |
8 | |
9 #include "base/numerics/safe_conversions.h" | |
10 #include "build/build_config.h" | |
11 #include "content/common/gpu/media/generic_v4l2_device.h" | |
12 #if defined(ARCH_CPU_ARMEL) | |
13 #include "content/common/gpu/media/tegra_v4l2_device.h" | |
14 #endif | |
15 | |
16 namespace content { | |
17 | |
18 V4L2Device::V4L2Device(Type type) : type_(type) { | |
19 } | |
20 | |
21 V4L2Device::~V4L2Device() { | |
22 } | |
23 | |
24 // static | |
25 scoped_refptr<V4L2Device> V4L2Device::Create(Type type) { | |
26 DVLOG(3) << __PRETTY_FUNCTION__; | |
27 | |
28 scoped_refptr<GenericV4L2Device> generic_device(new GenericV4L2Device(type)); | |
29 if (generic_device->Initialize()) | |
30 return generic_device; | |
31 | |
32 #if defined(ARCH_CPU_ARMEL) | |
33 scoped_refptr<TegraV4L2Device> tegra_device(new TegraV4L2Device(type)); | |
34 if (tegra_device->Initialize()) | |
35 return tegra_device; | |
36 #endif | |
37 | |
38 DVLOG(1) << "Failed to create V4L2Device"; | |
39 return scoped_refptr<V4L2Device>(); | |
40 } | |
41 | |
42 // static | |
43 media::VideoPixelFormat V4L2Device::V4L2PixFmtToVideoPixelFormat( | |
44 uint32_t pix_fmt) { | |
45 switch (pix_fmt) { | |
46 case V4L2_PIX_FMT_NV12: | |
47 case V4L2_PIX_FMT_NV12M: | |
48 return media::PIXEL_FORMAT_NV12; | |
49 | |
50 case V4L2_PIX_FMT_MT21: | |
51 return media::PIXEL_FORMAT_MT21; | |
52 | |
53 case V4L2_PIX_FMT_YUV420: | |
54 case V4L2_PIX_FMT_YUV420M: | |
55 return media::PIXEL_FORMAT_I420; | |
56 | |
57 case V4L2_PIX_FMT_RGB32: | |
58 return media::PIXEL_FORMAT_ARGB; | |
59 | |
60 default: | |
61 LOG(FATAL) << "Add more cases as needed"; | |
62 return media::PIXEL_FORMAT_UNKNOWN; | |
63 } | |
64 } | |
65 | |
66 // static | |
67 uint32_t V4L2Device::VideoPixelFormatToV4L2PixFmt( | |
68 media::VideoPixelFormat format) { | |
69 switch (format) { | |
70 case media::PIXEL_FORMAT_NV12: | |
71 return V4L2_PIX_FMT_NV12M; | |
72 | |
73 case media::PIXEL_FORMAT_MT21: | |
74 return V4L2_PIX_FMT_MT21; | |
75 | |
76 case media::PIXEL_FORMAT_I420: | |
77 return V4L2_PIX_FMT_YUV420M; | |
78 | |
79 default: | |
80 LOG(FATAL) << "Add more cases as needed"; | |
81 return 0; | |
82 } | |
83 } | |
84 | |
85 // static | |
86 uint32_t V4L2Device::VideoCodecProfileToV4L2PixFmt( | |
87 media::VideoCodecProfile profile, | |
88 bool slice_based) { | |
89 if (profile >= media::H264PROFILE_MIN && | |
90 profile <= media::H264PROFILE_MAX) { | |
91 if (slice_based) | |
92 return V4L2_PIX_FMT_H264_SLICE; | |
93 else | |
94 return V4L2_PIX_FMT_H264; | |
95 } else if (profile >= media::VP8PROFILE_MIN && | |
96 profile <= media::VP8PROFILE_MAX) { | |
97 if (slice_based) | |
98 return V4L2_PIX_FMT_VP8_FRAME; | |
99 else | |
100 return V4L2_PIX_FMT_VP8; | |
101 } else if (profile >= media::VP9PROFILE_MIN && | |
102 profile <= media::VP9PROFILE_MAX) { | |
103 return V4L2_PIX_FMT_VP9; | |
104 } else { | |
105 LOG(FATAL) << "Add more cases as needed"; | |
106 return 0; | |
107 } | |
108 } | |
109 | |
110 // static | |
111 uint32_t V4L2Device::V4L2PixFmtToDrmFormat(uint32_t format) { | |
112 switch (format) { | |
113 case V4L2_PIX_FMT_NV12: | |
114 case V4L2_PIX_FMT_NV12M: | |
115 return DRM_FORMAT_NV12; | |
116 | |
117 case V4L2_PIX_FMT_YUV420: | |
118 case V4L2_PIX_FMT_YUV420M: | |
119 return DRM_FORMAT_YUV420; | |
120 | |
121 case V4L2_PIX_FMT_RGB32: | |
122 return DRM_FORMAT_ARGB8888; | |
123 | |
124 default: | |
125 DVLOG(1) << "Add more cases as needed"; | |
126 return 0; | |
127 } | |
128 } | |
129 | |
130 // static | |
131 gfx::Size V4L2Device::CodedSizeFromV4L2Format(struct v4l2_format format) { | |
132 gfx::Size coded_size; | |
133 gfx::Size visible_size; | |
134 media::VideoPixelFormat frame_format = media::PIXEL_FORMAT_UNKNOWN; | |
135 size_t bytesperline = 0; | |
136 // Total bytes in the frame. | |
137 size_t sizeimage = 0; | |
138 | |
139 if (V4L2_TYPE_IS_MULTIPLANAR(format.type)) { | |
140 DCHECK_GT(format.fmt.pix_mp.num_planes, 0); | |
141 bytesperline = | |
142 base::checked_cast<int>(format.fmt.pix_mp.plane_fmt[0].bytesperline); | |
143 for (size_t i = 0; i < format.fmt.pix_mp.num_planes; ++i) { | |
144 sizeimage += | |
145 base::checked_cast<int>(format.fmt.pix_mp.plane_fmt[i].sizeimage); | |
146 } | |
147 visible_size.SetSize(base::checked_cast<int>(format.fmt.pix_mp.width), | |
148 base::checked_cast<int>(format.fmt.pix_mp.height)); | |
149 frame_format = | |
150 V4L2Device::V4L2PixFmtToVideoPixelFormat(format.fmt.pix_mp.pixelformat); | |
151 } else { | |
152 bytesperline = base::checked_cast<int>(format.fmt.pix.bytesperline); | |
153 sizeimage = base::checked_cast<int>(format.fmt.pix.sizeimage); | |
154 visible_size.SetSize(base::checked_cast<int>(format.fmt.pix.width), | |
155 base::checked_cast<int>(format.fmt.pix.height)); | |
156 frame_format = | |
157 V4L2Device::V4L2PixFmtToVideoPixelFormat(format.fmt.pix.pixelformat); | |
158 } | |
159 | |
160 // V4L2 does not provide per-plane bytesperline (bpl) when different | |
161 // components are sharing one physical plane buffer. In this case, it only | |
162 // provides bpl for the first component in the plane. So we can't depend on it | |
163 // for calculating height, because bpl may vary within one physical plane | |
164 // buffer. For example, YUV420 contains 3 components in one physical plane, | |
165 // with Y at 8 bits per pixel, and Cb/Cr at 4 bits per pixel per component, | |
166 // but we only get 8 pits per pixel from bytesperline in physical plane 0. | |
167 // So we need to get total frame bpp from elsewhere to calculate coded height. | |
168 | |
169 // We need bits per pixel for one component only to calculate | |
170 // coded_width from bytesperline. | |
171 int plane_horiz_bits_per_pixel = | |
172 media::VideoFrame::PlaneHorizontalBitsPerPixel(frame_format, 0); | |
173 | |
174 // Adding up bpp for each component will give us total bpp for all components. | |
175 int total_bpp = 0; | |
176 for (size_t i = 0; i < media::VideoFrame::NumPlanes(frame_format); ++i) | |
177 total_bpp += media::VideoFrame::PlaneBitsPerPixel(frame_format, i); | |
178 | |
179 if (sizeimage == 0 || bytesperline == 0 || plane_horiz_bits_per_pixel == 0 || | |
180 total_bpp == 0 || (bytesperline * 8) % plane_horiz_bits_per_pixel != 0) { | |
181 LOG(ERROR) << "Invalid format provided"; | |
182 return coded_size; | |
183 } | |
184 | |
185 // Coded width can be calculated by taking the first component's bytesperline, | |
186 // which in V4L2 always applies to the first component in physical plane | |
187 // buffer. | |
188 int coded_width = bytesperline * 8 / plane_horiz_bits_per_pixel; | |
189 // Sizeimage is coded_width * coded_height * total_bpp. | |
190 int coded_height = sizeimage * 8 / coded_width / total_bpp; | |
191 | |
192 coded_size.SetSize(coded_width, coded_height); | |
193 // It's possible the driver gave us a slightly larger sizeimage than what | |
194 // would be calculated from coded size. This is technically not allowed, but | |
195 // some drivers (Exynos) like to have some additional alignment that is not a | |
196 // multiple of bytesperline. The best thing we can do is to compensate by | |
197 // aligning to next full row. | |
198 if (sizeimage > media::VideoFrame::AllocationSize(frame_format, coded_size)) | |
199 coded_size.SetSize(coded_width, coded_height + 1); | |
200 DVLOG(3) << "coded_size=" << coded_size.ToString(); | |
201 | |
202 // Sanity checks. Calculated coded size has to contain given visible size | |
203 // and fulfill buffer byte size requirements. | |
204 DCHECK(gfx::Rect(coded_size).Contains(gfx::Rect(visible_size))); | |
205 DCHECK_LE(sizeimage, | |
206 media::VideoFrame::AllocationSize(frame_format, coded_size)); | |
207 | |
208 return coded_size; | |
209 } | |
210 | |
211 void V4L2Device::GetSupportedResolution(uint32_t pixelformat, | |
212 gfx::Size* min_resolution, | |
213 gfx::Size* max_resolution) { | |
214 max_resolution->SetSize(0, 0); | |
215 min_resolution->SetSize(0, 0); | |
216 v4l2_frmsizeenum frame_size; | |
217 memset(&frame_size, 0, sizeof(frame_size)); | |
218 frame_size.pixel_format = pixelformat; | |
219 for (; Ioctl(VIDIOC_ENUM_FRAMESIZES, &frame_size) == 0; ++frame_size.index) { | |
220 if (frame_size.type == V4L2_FRMSIZE_TYPE_DISCRETE) { | |
221 if (frame_size.discrete.width >= | |
222 base::checked_cast<uint32_t>(max_resolution->width()) && | |
223 frame_size.discrete.height >= | |
224 base::checked_cast<uint32_t>(max_resolution->height())) { | |
225 max_resolution->SetSize(frame_size.discrete.width, | |
226 frame_size.discrete.height); | |
227 } | |
228 if (min_resolution->IsEmpty() || | |
229 (frame_size.discrete.width <= | |
230 base::checked_cast<uint32_t>(min_resolution->width()) && | |
231 frame_size.discrete.height <= | |
232 base::checked_cast<uint32_t>(min_resolution->height()))) { | |
233 min_resolution->SetSize(frame_size.discrete.width, | |
234 frame_size.discrete.height); | |
235 } | |
236 } else if (frame_size.type == V4L2_FRMSIZE_TYPE_STEPWISE || | |
237 frame_size.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) { | |
238 max_resolution->SetSize(frame_size.stepwise.max_width, | |
239 frame_size.stepwise.max_height); | |
240 min_resolution->SetSize(frame_size.stepwise.min_width, | |
241 frame_size.stepwise.min_height); | |
242 break; | |
243 } | |
244 } | |
245 if (max_resolution->IsEmpty()) { | |
246 max_resolution->SetSize(1920, 1088); | |
247 LOG(ERROR) << "GetSupportedResolution failed to get maximum resolution for " | |
248 << "fourcc " << std::hex << pixelformat | |
249 << ", fall back to " << max_resolution->ToString(); | |
250 } | |
251 if (min_resolution->IsEmpty()) { | |
252 min_resolution->SetSize(16, 16); | |
253 LOG(ERROR) << "GetSupportedResolution failed to get minimum resolution for " | |
254 << "fourcc " << std::hex << pixelformat | |
255 << ", fall back to " << min_resolution->ToString(); | |
256 } | |
257 } | |
258 | |
259 media::VideoDecodeAccelerator::SupportedProfiles | |
260 V4L2Device::GetSupportedDecodeProfiles(const size_t num_formats, | |
261 const uint32_t pixelformats[]) { | |
262 DCHECK_EQ(type_, kDecoder); | |
263 media::VideoDecodeAccelerator::SupportedProfiles profiles; | |
264 media::VideoDecodeAccelerator::SupportedProfile profile; | |
265 v4l2_fmtdesc fmtdesc; | |
266 memset(&fmtdesc, 0, sizeof(fmtdesc)); | |
267 fmtdesc.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; | |
268 | |
269 for (; Ioctl(VIDIOC_ENUM_FMT, &fmtdesc) == 0; ++fmtdesc.index) { | |
270 if (std::find(pixelformats, pixelformats + num_formats, | |
271 fmtdesc.pixelformat) == pixelformats + num_formats) | |
272 continue; | |
273 int min_profile, max_profile; | |
274 switch (fmtdesc.pixelformat) { | |
275 case V4L2_PIX_FMT_H264: | |
276 case V4L2_PIX_FMT_H264_SLICE: | |
277 min_profile = media::H264PROFILE_MIN; | |
278 max_profile = media::H264PROFILE_MAX; | |
279 break; | |
280 case V4L2_PIX_FMT_VP8: | |
281 case V4L2_PIX_FMT_VP8_FRAME: | |
282 min_profile = media::VP8PROFILE_MIN; | |
283 max_profile = media::VP8PROFILE_MAX; | |
284 break; | |
285 case V4L2_PIX_FMT_VP9: | |
286 min_profile = media::VP9PROFILE_MIN; | |
287 max_profile = media::VP9PROFILE_MAX; | |
288 break; | |
289 default: | |
290 NOTREACHED() << "Unhandled pixelformat " << std::hex | |
291 << fmtdesc.pixelformat; | |
292 return profiles; | |
293 } | |
294 GetSupportedResolution(fmtdesc.pixelformat, &profile.min_resolution, | |
295 &profile.max_resolution); | |
296 for (int media_profile = min_profile; media_profile <= max_profile; | |
297 ++media_profile) { | |
298 profile.profile = static_cast<media::VideoCodecProfile>(media_profile); | |
299 profiles.push_back(profile); | |
300 } | |
301 } | |
302 return profiles; | |
303 } | |
304 | |
305 bool V4L2Device::SupportsDecodeProfileForV4L2PixelFormats( | |
306 media::VideoCodecProfile profile, | |
307 const size_t num_formats, | |
308 const uint32_t pixelformats[]) { | |
309 // Get all supported profiles by this device, taking into account only fourccs | |
310 // in pixelformats. | |
311 const auto supported_profiles = | |
312 GetSupportedDecodeProfiles(num_formats, pixelformats); | |
313 | |
314 // Try to find requested profile among the returned supported_profiles. | |
315 const auto iter = std::find_if( | |
316 supported_profiles.begin(), supported_profiles.end(), | |
317 [profile](const media::VideoDecodeAccelerator::SupportedProfile& p) { | |
318 return profile == p.profile; | |
319 }); | |
320 | |
321 return iter != supported_profiles.end(); | |
322 } | |
323 | |
324 } // namespace content | |
OLD | NEW |