OLD | NEW |
1 sub vp8_common_forward_decls() { | 1 sub vp8_common_forward_decls() { |
2 print <<EOF | 2 print <<EOF |
3 /* | 3 /* |
4 * VP8 | 4 * VP8 |
5 */ | 5 */ |
6 | 6 |
7 struct blockd; | 7 struct blockd; |
8 struct macroblockd; | 8 struct macroblockd; |
9 struct loop_filter_info; | 9 struct loop_filter_info; |
10 | 10 |
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
230 | 230 |
231 add_proto qw/void vp8_bilinear_predict8x4/, "unsigned char *src, int src_pitch,
int xofst, int yofst, unsigned char *dst, int dst_pitch"; | 231 add_proto qw/void vp8_bilinear_predict8x4/, "unsigned char *src, int src_pitch,
int xofst, int yofst, unsigned char *dst, int dst_pitch"; |
232 specialize qw/vp8_bilinear_predict8x4 mmx media neon/; | 232 specialize qw/vp8_bilinear_predict8x4 mmx media neon/; |
233 $vp8_bilinear_predict8x4_media=vp8_bilinear_predict8x4_armv6; | 233 $vp8_bilinear_predict8x4_media=vp8_bilinear_predict8x4_armv6; |
234 | 234 |
235 add_proto qw/void vp8_bilinear_predict4x4/, "unsigned char *src, int src_pitch,
int xofst, int yofst, unsigned char *dst, int dst_pitch"; | 235 add_proto qw/void vp8_bilinear_predict4x4/, "unsigned char *src, int src_pitch,
int xofst, int yofst, unsigned char *dst, int dst_pitch"; |
236 specialize qw/vp8_bilinear_predict4x4 mmx media neon/; | 236 specialize qw/vp8_bilinear_predict4x4 mmx media neon/; |
237 $vp8_bilinear_predict4x4_media=vp8_bilinear_predict4x4_armv6; | 237 $vp8_bilinear_predict4x4_media=vp8_bilinear_predict4x4_armv6; |
238 | 238 |
239 # | 239 # |
240 # Whole-pixel Variance | |
241 # | |
242 add_proto qw/unsigned int vp8_variance4x4/, "const unsigned char *src_ptr, int s
ource_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"; | |
243 specialize qw/vp8_variance4x4 mmx sse2/; | |
244 $vp8_variance4x4_sse2=vp8_variance4x4_wmt; | |
245 | |
246 add_proto qw/unsigned int vp8_variance8x8/, "const unsigned char *src_ptr, int s
ource_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"; | |
247 specialize qw/vp8_variance8x8 mmx sse2 media neon/; | |
248 $vp8_variance8x8_sse2=vp8_variance8x8_wmt; | |
249 $vp8_variance8x8_media=vp8_variance8x8_armv6; | |
250 | |
251 add_proto qw/unsigned int vp8_variance8x16/, "const unsigned char *src_ptr, int
source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
; | |
252 specialize qw/vp8_variance8x16 mmx sse2 neon/; | |
253 $vp8_variance8x16_sse2=vp8_variance8x16_wmt; | |
254 | |
255 add_proto qw/unsigned int vp8_variance16x8/, "const unsigned char *src_ptr, int
source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"
; | |
256 specialize qw/vp8_variance16x8 mmx sse2 neon/; | |
257 $vp8_variance16x8_sse2=vp8_variance16x8_wmt; | |
258 | |
259 add_proto qw/unsigned int vp8_variance16x16/, "const unsigned char *src_ptr, int
source_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse
"; | |
260 specialize qw/vp8_variance16x16 mmx sse2 media neon/; | |
261 $vp8_variance16x16_sse2=vp8_variance16x16_wmt; | |
262 $vp8_variance16x16_media=vp8_variance16x16_armv6; | |
263 | |
264 # | |
265 # Sub-pixel Variance | 240 # Sub-pixel Variance |
266 # | 241 # |
267 add_proto qw/unsigned int vp8_sub_pixel_variance4x4/, "const unsigned char *src
_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_p
tr, int Refstride, unsigned int *sse"; | 242 add_proto qw/unsigned int vp8_sub_pixel_variance4x4/, "const unsigned char *src
_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_p
tr, int Refstride, unsigned int *sse"; |
268 specialize qw/vp8_sub_pixel_variance4x4 mmx sse2/; | 243 specialize qw/vp8_sub_pixel_variance4x4 mmx sse2/; |
269 $vp8_sub_pixel_variance4x4_sse2=vp8_sub_pixel_variance4x4_wmt; | 244 $vp8_sub_pixel_variance4x4_sse2=vp8_sub_pixel_variance4x4_wmt; |
270 | 245 |
271 add_proto qw/unsigned int vp8_sub_pixel_variance8x8/, "const unsigned char *src
_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_p
tr, int Refstride, unsigned int *sse"; | 246 add_proto qw/unsigned int vp8_sub_pixel_variance8x8/, "const unsigned char *src
_ptr, int source_stride, int xoffset, int yoffset, const unsigned char *ref_p
tr, int Refstride, unsigned int *sse"; |
272 specialize qw/vp8_sub_pixel_variance8x8 mmx sse2 media neon_asm/; | 247 specialize qw/vp8_sub_pixel_variance8x8 mmx sse2 media neon_asm/; |
273 $vp8_sub_pixel_variance8x8_sse2=vp8_sub_pixel_variance8x8_wmt; | 248 $vp8_sub_pixel_variance8x8_sse2=vp8_sub_pixel_variance8x8_wmt; |
274 $vp8_sub_pixel_variance8x8_media=vp8_sub_pixel_variance8x8_armv6; | 249 $vp8_sub_pixel_variance8x8_media=vp8_sub_pixel_variance8x8_armv6; |
(...skipping 27 matching lines...) Expand all Loading... |
302 specialize qw/vp8_variance_halfpixvar16x16_hv mmx sse2 media neon/; | 277 specialize qw/vp8_variance_halfpixvar16x16_hv mmx sse2 media neon/; |
303 $vp8_variance_halfpixvar16x16_hv_sse2=vp8_variance_halfpixvar16x16_hv_wmt; | 278 $vp8_variance_halfpixvar16x16_hv_sse2=vp8_variance_halfpixvar16x16_hv_wmt; |
304 $vp8_variance_halfpixvar16x16_hv_media=vp8_variance_halfpixvar16x16_hv_armv6; | 279 $vp8_variance_halfpixvar16x16_hv_media=vp8_variance_halfpixvar16x16_hv_armv6; |
305 | 280 |
306 # | 281 # |
307 # Encoder functions below this point. | 282 # Encoder functions below this point. |
308 # | 283 # |
309 if (vpx_config("CONFIG_VP8_ENCODER") eq "yes") { | 284 if (vpx_config("CONFIG_VP8_ENCODER") eq "yes") { |
310 | 285 |
311 # | 286 # |
312 # Sum of squares (vector) | |
313 # | |
314 add_proto qw/unsigned int vp8_get_mb_ss/, "const short *"; | |
315 specialize qw/vp8_get_mb_ss mmx sse2/; | |
316 | |
317 # | |
318 # SSE (Sum Squared Error) | 287 # SSE (Sum Squared Error) |
319 # | 288 # |
320 add_proto qw/unsigned int vp8_sub_pixel_mse16x16/, "const unsigned char *src_pt
r, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr,
int Refstride, unsigned int *sse"; | 289 add_proto qw/unsigned int vp8_sub_pixel_mse16x16/, "const unsigned char *src_pt
r, int source_stride, int xoffset, int yoffset, const unsigned char *ref_ptr,
int Refstride, unsigned int *sse"; |
321 specialize qw/vp8_sub_pixel_mse16x16 mmx sse2/; | 290 specialize qw/vp8_sub_pixel_mse16x16 mmx sse2/; |
322 $vp8_sub_pixel_mse16x16_sse2=vp8_sub_pixel_mse16x16_wmt; | 291 $vp8_sub_pixel_mse16x16_sse2=vp8_sub_pixel_mse16x16_wmt; |
323 | 292 |
324 add_proto qw/unsigned int vp8_mse16x16/, "const unsigned char *src_ptr, int sour
ce_stride, const unsigned char *ref_ptr, int ref_stride, unsigned int *sse"; | |
325 specialize qw/vp8_mse16x16 mmx sse2 media neon/; | |
326 $vp8_mse16x16_sse2=vp8_mse16x16_wmt; | |
327 $vp8_mse16x16_media=vp8_mse16x16_armv6; | |
328 | |
329 add_proto qw/unsigned int vp8_get4x4sse_cs/, "const unsigned char *src_ptr, int
source_stride, const unsigned char *ref_ptr, int ref_stride"; | |
330 specialize qw/vp8_get4x4sse_cs mmx neon/; | |
331 | |
332 # | 293 # |
333 # Block copy | 294 # Block copy |
334 # | 295 # |
335 if ($opts{arch} =~ /x86/) { | 296 if ($opts{arch} =~ /x86/) { |
336 add_proto qw/void vp8_copy32xn/, "const unsigned char *src_ptr, int source_s
tride, const unsigned char *ref_ptr, int ref_stride, int n"; | 297 add_proto qw/void vp8_copy32xn/, "const unsigned char *src_ptr, int source_s
tride, unsigned char *dst_ptr, int dst_stride, int n"; |
337 specialize qw/vp8_copy32xn sse2 sse3/; | 298 specialize qw/vp8_copy32xn sse2 sse3/; |
338 } | 299 } |
339 | 300 |
340 # | 301 # |
341 # Structured Similarity (SSIM) | 302 # Structured Similarity (SSIM) |
342 # | 303 # |
343 if (vpx_config("CONFIG_INTERNAL_STATS") eq "yes") { | 304 if (vpx_config("CONFIG_INTERNAL_STATS") eq "yes") { |
344 $opts{arch} eq "x86_64" and $sse2_on_x86_64 = "sse2"; | 305 $opts{arch} eq "x86_64" and $sse2_on_x86_64 = "sse2"; |
345 | 306 |
346 add_proto qw/void vp8_ssim_parms_8x8/, "unsigned char *s, int sp, unsigned c
har *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_s
q_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"; | 307 add_proto qw/void vp8_ssim_parms_8x8/, "unsigned char *s, int sp, unsigned c
har *r, int rp, unsigned long *sum_s, unsigned long *sum_r, unsigned long *sum_s
q_s, unsigned long *sum_sq_r, unsigned long *sum_sxr"; |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
427 if (vpx_config("CONFIG_TEMPORAL_DENOISING") eq "yes") { | 388 if (vpx_config("CONFIG_TEMPORAL_DENOISING") eq "yes") { |
428 add_proto qw/int vp8_denoiser_filter/, "unsigned char *mc_running_avg_y, int
mc_avg_y_stride, unsigned char *running_avg_y, int avg_y_stride, unsigned char
*sig, int sig_stride, unsigned int motion_magnitude, int increase_denoising"; | 389 add_proto qw/int vp8_denoiser_filter/, "unsigned char *mc_running_avg_y, int
mc_avg_y_stride, unsigned char *running_avg_y, int avg_y_stride, unsigned char
*sig, int sig_stride, unsigned int motion_magnitude, int increase_denoising"; |
429 specialize qw/vp8_denoiser_filter sse2 neon/; | 390 specialize qw/vp8_denoiser_filter sse2 neon/; |
430 add_proto qw/int vp8_denoiser_filter_uv/, "unsigned char *mc_running_avg, in
t mc_avg_stride, unsigned char *running_avg, int avg_stride, unsigned char *sig,
int sig_stride, unsigned int motion_magnitude, int increase_denoising"; | 391 add_proto qw/int vp8_denoiser_filter_uv/, "unsigned char *mc_running_avg, in
t mc_avg_stride, unsigned char *running_avg, int avg_stride, unsigned char *sig,
int sig_stride, unsigned int motion_magnitude, int increase_denoising"; |
431 specialize qw/vp8_denoiser_filter_uv sse2 neon/; | 392 specialize qw/vp8_denoiser_filter_uv sse2 neon/; |
432 } | 393 } |
433 | 394 |
434 # End of encoder only functions | 395 # End of encoder only functions |
435 } | 396 } |
436 1; | 397 1; |
OLD | NEW |