| Index: libvpx/patches/ugly/02_private_symbols.patch
|
| diff --git a/libvpx/patches/ugly/02_private_symbols.patch b/libvpx/patches/ugly/02_private_symbols.patch
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..d55657f26fe6a4f19681d67d99ff0ba7c58a6681
|
| --- /dev/null
|
| +++ b/libvpx/patches/ugly/02_private_symbols.patch
|
| @@ -0,0 +1,1394 @@
|
| +diff --git a/vp8/common/x86/idctllm_mmx.asm b/libvpx/source/libvpx/vp8/common/x86/idctllm_mmx.asm
|
| +index 43735bc..b3b8a35 100644
|
| +--- a/vp8/common/x86/idctllm_mmx.asm
|
| ++++ b/vp8/common/x86/idctllm_mmx.asm
|
| +@@ -33,7 +33,7 @@
|
| +
|
| +
|
| + ;void short_idct4x4llm_mmx(short *input, short *output, int pitch)
|
| +-global sym(vp8_short_idct4x4llm_mmx)
|
| ++global sym(vp8_short_idct4x4llm_mmx) PRIVATE
|
| + sym(vp8_short_idct4x4llm_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -185,7 +185,7 @@ sym(vp8_short_idct4x4llm_mmx):
|
| +
|
| +
|
| + ;void short_idct4x4llm_1_mmx(short *input, short *output, int pitch)
|
| +-global sym(vp8_short_idct4x4llm_1_mmx)
|
| ++global sym(vp8_short_idct4x4llm_1_mmx) PRIVATE
|
| + sym(vp8_short_idct4x4llm_1_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -221,7 +221,7 @@ sym(vp8_short_idct4x4llm_1_mmx):
|
| + ret
|
| +
|
| + ;void vp8_dc_only_idct_add_mmx(short input_dc, unsigned char *pred_ptr, unsigned char *dst_ptr, int pitch, int stride)
|
| +-global sym(vp8_dc_only_idct_add_mmx)
|
| ++global sym(vp8_dc_only_idct_add_mmx) PRIVATE
|
| + sym(vp8_dc_only_idct_add_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/common/x86/idctllm_sse2.asm b/libvpx/source/libvpx/vp8/common/x86/idctllm_sse2.asm
|
| +index edee157..0da7229 100644
|
| +--- a/vp8/common/x86/idctllm_sse2.asm
|
| ++++ b/vp8/common/x86/idctllm_sse2.asm
|
| +@@ -21,7 +21,7 @@
|
| + ; int blk_stride - 5
|
| + ; )
|
| +
|
| +-global sym(idct_dequant_0_2x_sse2)
|
| ++global sym(idct_dequant_0_2x_sse2) PRIVATE
|
| + sym(idct_dequant_0_2x_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -97,7 +97,7 @@ sym(idct_dequant_0_2x_sse2):
|
| + pop rbp
|
| + ret
|
| +
|
| +-global sym(idct_dequant_full_2x_sse2)
|
| ++global sym(idct_dequant_full_2x_sse2) PRIVATE
|
| + sym(idct_dequant_full_2x_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -360,7 +360,7 @@ sym(idct_dequant_full_2x_sse2):
|
| + ; int dst_stride - 4
|
| + ; short *dc - 5
|
| + ; )
|
| +-global sym(idct_dequant_dc_0_2x_sse2)
|
| ++global sym(idct_dequant_dc_0_2x_sse2) PRIVATE
|
| + sym(idct_dequant_dc_0_2x_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -436,7 +436,7 @@ sym(idct_dequant_dc_0_2x_sse2):
|
| + pop rbp
|
| + ret
|
| +
|
| +-global sym(idct_dequant_dc_full_2x_sse2)
|
| ++global sym(idct_dequant_dc_full_2x_sse2) PRIVATE
|
| + sym(idct_dequant_dc_full_2x_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/common/x86/iwalsh_mmx.asm b/libvpx/source/libvpx/vp8/common/x86/iwalsh_mmx.asm
|
| +index 10b5274..09a9813 100644
|
| +--- a/vp8/common/x86/iwalsh_mmx.asm
|
| ++++ b/vp8/common/x86/iwalsh_mmx.asm
|
| +@@ -12,7 +12,7 @@
|
| + %include "vpx_ports/x86_abi_support.asm"
|
| +
|
| + ;void vp8_short_inv_walsh4x4_1_mmx(short *input, short *output)
|
| +-global sym(vp8_short_inv_walsh4x4_1_mmx)
|
| ++global sym(vp8_short_inv_walsh4x4_1_mmx) PRIVATE
|
| + sym(vp8_short_inv_walsh4x4_1_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -48,7 +48,7 @@ sym(vp8_short_inv_walsh4x4_1_mmx):
|
| + ret
|
| +
|
| + ;void vp8_short_inv_walsh4x4_mmx(short *input, short *output)
|
| +-global sym(vp8_short_inv_walsh4x4_mmx)
|
| ++global sym(vp8_short_inv_walsh4x4_mmx) PRIVATE
|
| + sym(vp8_short_inv_walsh4x4_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/common/x86/iwalsh_sse2.asm b/libvpx/source/libvpx/vp8/common/x86/iwalsh_sse2.asm
|
| +index 83c97df..fde3e65 100644
|
| +--- a/vp8/common/x86/iwalsh_sse2.asm
|
| ++++ b/vp8/common/x86/iwalsh_sse2.asm
|
| +@@ -12,7 +12,7 @@
|
| + %include "vpx_ports/x86_abi_support.asm"
|
| +
|
| + ;void vp8_short_inv_walsh4x4_sse2(short *input, short *output)
|
| +-global sym(vp8_short_inv_walsh4x4_sse2)
|
| ++global sym(vp8_short_inv_walsh4x4_sse2) PRIVATE
|
| + sym(vp8_short_inv_walsh4x4_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/common/x86/loopfilter_mmx.asm b/libvpx/source/libvpx/vp8/common/x86/loopfilter_mmx.asm
|
| +index c6c215c..feccbe2 100644
|
| +--- a/vp8/common/x86/loopfilter_mmx.asm
|
| ++++ b/vp8/common/x86/loopfilter_mmx.asm
|
| +@@ -21,7 +21,7 @@
|
| + ; const char *thresh,
|
| + ; int count
|
| + ;)
|
| +-global sym(vp8_loop_filter_horizontal_edge_mmx)
|
| ++global sym(vp8_loop_filter_horizontal_edge_mmx) PRIVATE
|
| + sym(vp8_loop_filter_horizontal_edge_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -235,7 +235,7 @@ next8_h:
|
| + ; const char *thresh,
|
| + ; int count
|
| + ;)
|
| +-global sym(vp8_loop_filter_vertical_edge_mmx)
|
| ++global sym(vp8_loop_filter_vertical_edge_mmx) PRIVATE
|
| + sym(vp8_loop_filter_vertical_edge_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -608,7 +608,7 @@ next8_v:
|
| + ; const char *thresh,
|
| + ; int count
|
| + ;)
|
| +-global sym(vp8_mbloop_filter_horizontal_edge_mmx)
|
| ++global sym(vp8_mbloop_filter_horizontal_edge_mmx) PRIVATE
|
| + sym(vp8_mbloop_filter_horizontal_edge_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -927,7 +927,7 @@ next8_mbh:
|
| + ; const char *thresh,
|
| + ; int count
|
| + ;)
|
| +-global sym(vp8_mbloop_filter_vertical_edge_mmx)
|
| ++global sym(vp8_mbloop_filter_vertical_edge_mmx) PRIVATE
|
| + sym(vp8_mbloop_filter_vertical_edge_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -1397,7 +1397,7 @@ next8_mbv:
|
| + ; const char *thresh,
|
| + ; int count
|
| + ;)
|
| +-global sym(vp8_loop_filter_simple_horizontal_edge_mmx)
|
| ++global sym(vp8_loop_filter_simple_horizontal_edge_mmx) PRIVATE
|
| + sym(vp8_loop_filter_simple_horizontal_edge_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -1520,7 +1520,7 @@ nexts8_h:
|
| + ; const char *thresh,
|
| + ; int count
|
| + ;)
|
| +-global sym(vp8_loop_filter_simple_vertical_edge_mmx)
|
| ++global sym(vp8_loop_filter_simple_vertical_edge_mmx) PRIVATE
|
| + sym(vp8_loop_filter_simple_vertical_edge_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/common/x86/loopfilter_sse2.asm b/libvpx/source/libvpx/vp8/common/x86/loopfilter_sse2.asm
|
| +index 849133d..ca4b77a 100644
|
| +--- a/vp8/common/x86/loopfilter_sse2.asm
|
| ++++ b/vp8/common/x86/loopfilter_sse2.asm
|
| +@@ -283,7 +283,7 @@
|
| + ; const char *thresh,
|
| + ; int count
|
| + ;)
|
| +-global sym(vp8_loop_filter_horizontal_edge_sse2)
|
| ++global sym(vp8_loop_filter_horizontal_edge_sse2) PRIVATE
|
| + sym(vp8_loop_filter_horizontal_edge_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -333,7 +333,7 @@ sym(vp8_loop_filter_horizontal_edge_sse2):
|
| + ; const char *thresh,
|
| + ; int count
|
| + ;)
|
| +-global sym(vp8_loop_filter_horizontal_edge_uv_sse2)
|
| ++global sym(vp8_loop_filter_horizontal_edge_uv_sse2) PRIVATE
|
| + sym(vp8_loop_filter_horizontal_edge_uv_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -579,7 +579,7 @@ sym(vp8_loop_filter_horizontal_edge_uv_sse2):
|
| + ; const char *thresh,
|
| + ; int count
|
| + ;)
|
| +-global sym(vp8_mbloop_filter_horizontal_edge_sse2)
|
| ++global sym(vp8_mbloop_filter_horizontal_edge_sse2) PRIVATE
|
| + sym(vp8_mbloop_filter_horizontal_edge_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -629,7 +629,7 @@ sym(vp8_mbloop_filter_horizontal_edge_sse2):
|
| + ; const char *thresh,
|
| + ; unsigned char *v
|
| + ;)
|
| +-global sym(vp8_mbloop_filter_horizontal_edge_uv_sse2)
|
| ++global sym(vp8_mbloop_filter_horizontal_edge_uv_sse2) PRIVATE
|
| + sym(vp8_mbloop_filter_horizontal_edge_uv_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -1019,7 +1019,7 @@ sym(vp8_mbloop_filter_horizontal_edge_uv_sse2):
|
| + ; const char *thresh,
|
| + ; int count
|
| + ;)
|
| +-global sym(vp8_loop_filter_vertical_edge_sse2)
|
| ++global sym(vp8_loop_filter_vertical_edge_sse2) PRIVATE
|
| + sym(vp8_loop_filter_vertical_edge_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -1086,7 +1086,7 @@ sym(vp8_loop_filter_vertical_edge_sse2):
|
| + ; const char *thresh,
|
| + ; unsigned char *v
|
| + ;)
|
| +-global sym(vp8_loop_filter_vertical_edge_uv_sse2)
|
| ++global sym(vp8_loop_filter_vertical_edge_uv_sse2) PRIVATE
|
| + sym(vp8_loop_filter_vertical_edge_uv_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -1244,7 +1244,7 @@ sym(vp8_loop_filter_vertical_edge_uv_sse2):
|
| + ; const char *thresh,
|
| + ; int count
|
| + ;)
|
| +-global sym(vp8_mbloop_filter_vertical_edge_sse2)
|
| ++global sym(vp8_mbloop_filter_vertical_edge_sse2) PRIVATE
|
| + sym(vp8_mbloop_filter_vertical_edge_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -1313,7 +1313,7 @@ sym(vp8_mbloop_filter_vertical_edge_sse2):
|
| + ; const char *thresh,
|
| + ; unsigned char *v
|
| + ;)
|
| +-global sym(vp8_mbloop_filter_vertical_edge_uv_sse2)
|
| ++global sym(vp8_mbloop_filter_vertical_edge_uv_sse2) PRIVATE
|
| + sym(vp8_mbloop_filter_vertical_edge_uv_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -1381,7 +1381,7 @@ sym(vp8_mbloop_filter_vertical_edge_uv_sse2):
|
| + ; const char *thresh,
|
| + ; int count
|
| + ;)
|
| +-global sym(vp8_loop_filter_simple_horizontal_edge_sse2)
|
| ++global sym(vp8_loop_filter_simple_horizontal_edge_sse2) PRIVATE
|
| + sym(vp8_loop_filter_simple_horizontal_edge_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -1498,7 +1498,7 @@ sym(vp8_loop_filter_simple_horizontal_edge_sse2):
|
| + ; const char *thresh,
|
| + ; int count
|
| + ;)
|
| +-global sym(vp8_loop_filter_simple_vertical_edge_sse2)
|
| ++global sym(vp8_loop_filter_simple_vertical_edge_sse2) PRIVATE
|
| + sym(vp8_loop_filter_simple_vertical_edge_sse2):
|
| + push rbp ; save old base pointer value.
|
| + mov rbp, rsp ; set new base pointer value.
|
| +diff --git a/vp8/common/x86/postproc_mmx.asm b/libvpx/source/libvpx/vp8/common/x86/postproc_mmx.asm
|
| +index 787e832..a6102a6 100644
|
| +--- a/vp8/common/x86/postproc_mmx.asm
|
| ++++ b/vp8/common/x86/postproc_mmx.asm
|
| +@@ -24,7 +24,7 @@
|
| + ; int cols,
|
| + ; int flimit
|
| + ;)
|
| +-global sym(vp8_post_proc_down_and_across_mmx)
|
| ++global sym(vp8_post_proc_down_and_across_mmx) PRIVATE
|
| + sym(vp8_post_proc_down_and_across_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -265,7 +265,7 @@ acrossnextcol:
|
| + ;void vp8_mbpost_proc_down_mmx(unsigned char *dst,
|
| + ; int pitch, int rows, int cols,int flimit)
|
| + extern sym(vp8_rv)
|
| +-global sym(vp8_mbpost_proc_down_mmx)
|
| ++global sym(vp8_mbpost_proc_down_mmx) PRIVATE
|
| + sym(vp8_mbpost_proc_down_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -465,7 +465,7 @@ loop_row:
|
| + ; unsigned char bothclamp[16],
|
| + ; unsigned int Width, unsigned int Height, int Pitch)
|
| + extern sym(rand)
|
| +-global sym(vp8_plane_add_noise_mmx)
|
| ++global sym(vp8_plane_add_noise_mmx) PRIVATE
|
| + sym(vp8_plane_add_noise_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/common/x86/postproc_sse2.asm b/libvpx/source/libvpx/vp8/common/x86/postproc_sse2.asm
|
| +index 30b4bf5..40aab84 100644
|
| +--- a/vp8/common/x86/postproc_sse2.asm
|
| ++++ b/vp8/common/x86/postproc_sse2.asm
|
| +@@ -21,7 +21,7 @@
|
| + ; int cols,
|
| + ; int flimit
|
| + ;)
|
| +-global sym(vp8_post_proc_down_and_across_xmm)
|
| ++global sym(vp8_post_proc_down_and_across_xmm) PRIVATE
|
| + sym(vp8_post_proc_down_and_across_xmm):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -251,7 +251,7 @@ acrossnextcol:
|
| + ;void vp8_mbpost_proc_down_xmm(unsigned char *dst,
|
| + ; int pitch, int rows, int cols,int flimit)
|
| + extern sym(vp8_rv)
|
| +-global sym(vp8_mbpost_proc_down_xmm)
|
| ++global sym(vp8_mbpost_proc_down_xmm) PRIVATE
|
| + sym(vp8_mbpost_proc_down_xmm):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -451,7 +451,7 @@ loop_row:
|
| +
|
| + ;void vp8_mbpost_proc_across_ip_xmm(unsigned char *src,
|
| + ; int pitch, int rows, int cols,int flimit)
|
| +-global sym(vp8_mbpost_proc_across_ip_xmm)
|
| ++global sym(vp8_mbpost_proc_across_ip_xmm) PRIVATE
|
| + sym(vp8_mbpost_proc_across_ip_xmm):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -630,7 +630,7 @@ nextcol4:
|
| + ; unsigned char bothclamp[16],
|
| + ; unsigned int Width, unsigned int Height, int Pitch)
|
| + extern sym(rand)
|
| +-global sym(vp8_plane_add_noise_wmt)
|
| ++global sym(vp8_plane_add_noise_wmt) PRIVATE
|
| + sym(vp8_plane_add_noise_wmt):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/common/x86/recon_mmx.asm b/libvpx/source/libvpx/vp8/common/x86/recon_mmx.asm
|
| +index e7211fc..4256f56 100644
|
| +--- a/vp8/common/x86/recon_mmx.asm
|
| ++++ b/vp8/common/x86/recon_mmx.asm
|
| +@@ -11,7 +11,7 @@
|
| +
|
| + %include "vpx_ports/x86_abi_support.asm"
|
| + ;void vp8_recon_b_mmx(unsigned char *s, short *q, unsigned char *d, int stride)
|
| +-global sym(vp8_recon_b_mmx)
|
| ++global sym(vp8_recon_b_mmx) PRIVATE
|
| + sym(vp8_recon_b_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -65,7 +65,7 @@ sym(vp8_recon_b_mmx):
|
| + ; unsigned char *dst,
|
| + ; int dst_stride
|
| + ; )
|
| +-global sym(vp8_copy_mem8x8_mmx)
|
| ++global sym(vp8_copy_mem8x8_mmx) PRIVATE
|
| + sym(vp8_copy_mem8x8_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -128,7 +128,7 @@ sym(vp8_copy_mem8x8_mmx):
|
| + ; unsigned char *dst,
|
| + ; int dst_stride
|
| + ; )
|
| +-global sym(vp8_copy_mem8x4_mmx)
|
| ++global sym(vp8_copy_mem8x4_mmx) PRIVATE
|
| + sym(vp8_copy_mem8x4_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -172,7 +172,7 @@ sym(vp8_copy_mem8x4_mmx):
|
| + ; unsigned char *dst,
|
| + ; int dst_stride
|
| + ; )
|
| +-global sym(vp8_copy_mem16x16_mmx)
|
| ++global sym(vp8_copy_mem16x16_mmx) PRIVATE
|
| + sym(vp8_copy_mem16x16_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/common/x86/recon_sse2.asm b/libvpx/source/libvpx/vp8/common/x86/recon_sse2.asm
|
| +index 4ad3973..05b5480 100644
|
| +--- a/vp8/common/x86/recon_sse2.asm
|
| ++++ b/vp8/common/x86/recon_sse2.asm
|
| +@@ -11,7 +11,7 @@
|
| +
|
| + %include "vpx_ports/x86_abi_support.asm"
|
| + ;void vp8_recon2b_sse2(unsigned char *s, short *q, unsigned char *d, int stride)
|
| +-global sym(vp8_recon2b_sse2)
|
| ++global sym(vp8_recon2b_sse2) PRIVATE
|
| + sym(vp8_recon2b_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -62,7 +62,7 @@ sym(vp8_recon2b_sse2):
|
| +
|
| +
|
| + ;void vp8_recon4b_sse2(unsigned char *s, short *q, unsigned char *d, int stride)
|
| +-global sym(vp8_recon4b_sse2)
|
| ++global sym(vp8_recon4b_sse2) PRIVATE
|
| + sym(vp8_recon4b_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -132,7 +132,7 @@ sym(vp8_recon4b_sse2):
|
| + ; unsigned char *dst,
|
| + ; int dst_stride
|
| + ; )
|
| +-global sym(vp8_copy_mem16x16_sse2)
|
| ++global sym(vp8_copy_mem16x16_sse2) PRIVATE
|
| + sym(vp8_copy_mem16x16_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/common/x86/subpixel_mmx.asm b/libvpx/source/libvpx/vp8/common/x86/subpixel_mmx.asm
|
| +index 9004b52..8c71050 100644
|
| +--- a/vp8/common/x86/subpixel_mmx.asm
|
| ++++ b/vp8/common/x86/subpixel_mmx.asm
|
| +@@ -27,7 +27,7 @@
|
| + ; unsigned int output_width,
|
| + ; short * vp8_filter
|
| + ;)
|
| +-global sym(vp8_filter_block1d_h6_mmx)
|
| ++global sym(vp8_filter_block1d_h6_mmx) PRIVATE
|
| + sym(vp8_filter_block1d_h6_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -124,7 +124,7 @@ nextrow:
|
| + ; unsigned int output_width,
|
| + ; short * vp8_filter
|
| + ;)
|
| +-global sym(vp8_filter_block1dc_v6_mmx)
|
| ++global sym(vp8_filter_block1dc_v6_mmx) PRIVATE
|
| + sym(vp8_filter_block1dc_v6_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -212,7 +212,7 @@ nextrow_cv:
|
| + ; unsigned char *dst_ptr,
|
| + ; int dst_pitch
|
| + ;)
|
| +-global sym(vp8_bilinear_predict8x8_mmx)
|
| ++global sym(vp8_bilinear_predict8x8_mmx) PRIVATE
|
| + sym(vp8_bilinear_predict8x8_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -369,7 +369,7 @@ next_row_8x8:
|
| + ; unsigned char *dst_ptr,
|
| + ; int dst_pitch
|
| + ;)
|
| +-global sym(vp8_bilinear_predict8x4_mmx)
|
| ++global sym(vp8_bilinear_predict8x4_mmx) PRIVATE
|
| + sym(vp8_bilinear_predict8x4_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -524,7 +524,7 @@ next_row_8x4:
|
| + ; unsigned char *dst_ptr,
|
| + ; int dst_pitch
|
| + ;)
|
| +-global sym(vp8_bilinear_predict4x4_mmx)
|
| ++global sym(vp8_bilinear_predict4x4_mmx) PRIVATE
|
| + sym(vp8_bilinear_predict4x4_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/common/x86/subpixel_sse2.asm b/libvpx/source/libvpx/vp8/common/x86/subpixel_sse2.asm
|
| +index b87cad2..5691961 100644
|
| +--- a/vp8/common/x86/subpixel_sse2.asm
|
| ++++ b/vp8/common/x86/subpixel_sse2.asm
|
| +@@ -32,7 +32,7 @@
|
| + ; unsigned int output_width,
|
| + ; short *vp8_filter
|
| + ;)
|
| +-global sym(vp8_filter_block1d8_h6_sse2)
|
| ++global sym(vp8_filter_block1d8_h6_sse2) PRIVATE
|
| + sym(vp8_filter_block1d8_h6_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -152,7 +152,7 @@ filter_block1d8_h6_rowloop:
|
| + ; even number. This function handles 8 pixels in horizontal direction, calculating ONE
|
| + ; rows each iteration to take advantage of the 128 bits operations.
|
| + ;*************************************************************************************/
|
| +-global sym(vp8_filter_block1d16_h6_sse2)
|
| ++global sym(vp8_filter_block1d16_h6_sse2) PRIVATE
|
| + sym(vp8_filter_block1d16_h6_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -328,7 +328,7 @@ filter_block1d16_h6_sse2_rowloop:
|
| + ; Notes: filter_block1d8_v6 applies a 6 tap filter vertically to the input pixels. The
|
| + ; input pixel array has output_height rows.
|
| + ;*************************************************************************************/
|
| +-global sym(vp8_filter_block1d8_v6_sse2)
|
| ++global sym(vp8_filter_block1d8_v6_sse2) PRIVATE
|
| + sym(vp8_filter_block1d8_v6_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -423,7 +423,7 @@ vp8_filter_block1d8_v6_sse2_loop:
|
| + ; Notes: filter_block1d16_v6 applies a 6 tap filter vertically to the input pixels. The
|
| + ; input pixel array has output_height rows.
|
| + ;*************************************************************************************/
|
| +-global sym(vp8_filter_block1d16_v6_sse2)
|
| ++global sym(vp8_filter_block1d16_v6_sse2) PRIVATE
|
| + sym(vp8_filter_block1d16_v6_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -533,7 +533,7 @@ vp8_filter_block1d16_v6_sse2_loop:
|
| + ; const short *vp8_filter
|
| + ;)
|
| + ; First-pass filter only when yoffset==0
|
| +-global sym(vp8_filter_block1d8_h6_only_sse2)
|
| ++global sym(vp8_filter_block1d8_h6_only_sse2) PRIVATE
|
| + sym(vp8_filter_block1d8_h6_only_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -646,7 +646,7 @@ filter_block1d8_h6_only_rowloop:
|
| + ; const short *vp8_filter
|
| + ;)
|
| + ; First-pass filter only when yoffset==0
|
| +-global sym(vp8_filter_block1d16_h6_only_sse2)
|
| ++global sym(vp8_filter_block1d16_h6_only_sse2) PRIVATE
|
| + sym(vp8_filter_block1d16_h6_only_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -811,7 +811,7 @@ filter_block1d16_h6_only_sse2_rowloop:
|
| + ; const short *vp8_filter
|
| + ;)
|
| + ; Second-pass filter only when xoffset==0
|
| +-global sym(vp8_filter_block1d8_v6_only_sse2)
|
| ++global sym(vp8_filter_block1d8_v6_only_sse2) PRIVATE
|
| + sym(vp8_filter_block1d8_v6_only_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -903,7 +903,7 @@ vp8_filter_block1d8_v6_only_sse2_loop:
|
| + ; unsigned int output_height,
|
| + ; unsigned int output_width
|
| + ;)
|
| +-global sym(vp8_unpack_block1d16_h6_sse2)
|
| ++global sym(vp8_unpack_block1d16_h6_sse2) PRIVATE
|
| + sym(vp8_unpack_block1d16_h6_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -964,7 +964,7 @@ unpack_block1d16_h6_sse2_rowloop:
|
| + ; int dst_pitch
|
| + ;)
|
| + extern sym(vp8_bilinear_filters_mmx)
|
| +-global sym(vp8_bilinear_predict16x16_sse2)
|
| ++global sym(vp8_bilinear_predict16x16_sse2) PRIVATE
|
| + sym(vp8_bilinear_predict16x16_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -1233,7 +1233,7 @@ done:
|
| + ; int dst_pitch
|
| + ;)
|
| + extern sym(vp8_bilinear_filters_mmx)
|
| +-global sym(vp8_bilinear_predict8x8_sse2)
|
| ++global sym(vp8_bilinear_predict8x8_sse2) PRIVATE
|
| + sym(vp8_bilinear_predict8x8_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/common/x86/subpixel_ssse3.asm b/libvpx/source/libvpx/vp8/common/x86/subpixel_ssse3.asm
|
| +index 7f6fd93..3679b0f 100644
|
| +--- a/vp8/common/x86/subpixel_ssse3.asm
|
| ++++ b/vp8/common/x86/subpixel_ssse3.asm
|
| +@@ -34,7 +34,7 @@
|
| + ; unsigned int output_height,
|
| + ; unsigned int vp8_filter_index
|
| + ;)
|
| +-global sym(vp8_filter_block1d8_h6_ssse3)
|
| ++global sym(vp8_filter_block1d8_h6_ssse3) PRIVATE
|
| + sym(vp8_filter_block1d8_h6_ssse3):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -174,7 +174,7 @@ filter_block1d8_h4_rowloop_ssse3:
|
| + ; unsigned int output_height,
|
| + ; unsigned int vp8_filter_index
|
| + ;)
|
| +-global sym(vp8_filter_block1d16_h6_ssse3)
|
| ++global sym(vp8_filter_block1d16_h6_ssse3) PRIVATE
|
| + sym(vp8_filter_block1d16_h6_ssse3):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -339,7 +339,7 @@ filter_block1d16_h4_rowloop_ssse3:
|
| + ; unsigned int output_height,
|
| + ; unsigned int vp8_filter_index
|
| + ;)
|
| +-global sym(vp8_filter_block1d4_h6_ssse3)
|
| ++global sym(vp8_filter_block1d4_h6_ssse3) PRIVATE
|
| + sym(vp8_filter_block1d4_h6_ssse3):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -466,7 +466,7 @@ filter_block1d4_h4_rowloop_ssse3:
|
| + ; unsigned int output_height,
|
| + ; unsigned int vp8_filter_index
|
| + ;)
|
| +-global sym(vp8_filter_block1d16_v6_ssse3)
|
| ++global sym(vp8_filter_block1d16_v6_ssse3) PRIVATE
|
| + sym(vp8_filter_block1d16_v6_ssse3):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -651,7 +651,7 @@ vp8_filter_block1d16_v4_ssse3_loop:
|
| + ; unsigned int output_height,
|
| + ; unsigned int vp8_filter_index
|
| + ;)
|
| +-global sym(vp8_filter_block1d8_v6_ssse3)
|
| ++global sym(vp8_filter_block1d8_v6_ssse3) PRIVATE
|
| + sym(vp8_filter_block1d8_v6_ssse3):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -788,7 +788,7 @@ vp8_filter_block1d8_v4_ssse3_loop:
|
| + ; unsigned int output_height,
|
| + ; unsigned int vp8_filter_index
|
| + ;)
|
| +-global sym(vp8_filter_block1d4_v6_ssse3)
|
| ++global sym(vp8_filter_block1d4_v6_ssse3) PRIVATE
|
| + sym(vp8_filter_block1d4_v6_ssse3):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -927,7 +927,7 @@ vp8_filter_block1d4_v4_ssse3_loop:
|
| + ; unsigned char *dst_ptr,
|
| + ; int dst_pitch
|
| + ;)
|
| +-global sym(vp8_bilinear_predict16x16_ssse3)
|
| ++global sym(vp8_bilinear_predict16x16_ssse3) PRIVATE
|
| + sym(vp8_bilinear_predict16x16_ssse3):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -1190,7 +1190,7 @@ done:
|
| + ; unsigned char *dst_ptr,
|
| + ; int dst_pitch
|
| + ;)
|
| +-global sym(vp8_bilinear_predict8x8_ssse3)
|
| ++global sym(vp8_bilinear_predict8x8_ssse3) PRIVATE
|
| + sym(vp8_bilinear_predict8x8_ssse3):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/decoder/x86/dequantize_mmx.asm b/libvpx/source/libvpx/vp8/decoder/x86/dequantize_mmx.asm
|
| +index 0d6133a..208fa17 100644
|
| +--- a/vp8/decoder/x86/dequantize_mmx.asm
|
| ++++ b/vp8/decoder/x86/dequantize_mmx.asm
|
| +@@ -13,7 +13,7 @@
|
| +
|
| +
|
| + ;void vp8_dequantize_b_impl_mmx(short *sq, short *dq, short *q)
|
| +-global sym(vp8_dequantize_b_impl_mmx)
|
| ++global sym(vp8_dequantize_b_impl_mmx) PRIVATE
|
| + sym(vp8_dequantize_b_impl_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -51,7 +51,7 @@ sym(vp8_dequantize_b_impl_mmx):
|
| +
|
| +
|
| + ;void dequant_idct_add_mmx(short *input, short *dq, unsigned char *pred, unsigned char *dest, int pitch, int stride)
|
| +-global sym(vp8_dequant_idct_add_mmx)
|
| ++global sym(vp8_dequant_idct_add_mmx) PRIVATE
|
| + sym(vp8_dequant_idct_add_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -248,7 +248,7 @@ sym(vp8_dequant_idct_add_mmx):
|
| +
|
| +
|
| + ;void dequant_dc_idct_add_mmx(short *input, short *dq, unsigned char *pred, unsigned char *dest, int pitch, int stride, int Dc)
|
| +-global sym(vp8_dequant_dc_idct_add_mmx)
|
| ++global sym(vp8_dequant_dc_idct_add_mmx) PRIVATE
|
| + sym(vp8_dequant_dc_idct_add_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/dct_mmx.asm b/libvpx/source/libvpx/vp8/encoder/x86/dct_mmx.asm
|
| +index f07b030..6f188cb 100644
|
| +--- a/vp8/encoder/x86/dct_mmx.asm
|
| ++++ b/vp8/encoder/x86/dct_mmx.asm
|
| +@@ -12,7 +12,7 @@
|
| + %include "vpx_ports/x86_abi_support.asm"
|
| +
|
| + ;void vp8_short_fdct4x4_mmx(short *input, short *output, int pitch)
|
| +-global sym(vp8_short_fdct4x4_mmx)
|
| ++global sym(vp8_short_fdct4x4_mmx) PRIVATE
|
| + sym(vp8_short_fdct4x4_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/dct_sse2.asm b/libvpx/source/libvpx/vp8/encoder/x86/dct_sse2.asm
|
| +index 652dd98..e92b9b0 100644
|
| +--- a/vp8/encoder/x86/dct_sse2.asm
|
| ++++ b/vp8/encoder/x86/dct_sse2.asm
|
| +@@ -59,7 +59,7 @@
|
| + %endmacro
|
| +
|
| + ;void vp8_short_fdct4x4_sse2(short *input, short *output, int pitch)
|
| +-global sym(vp8_short_fdct4x4_sse2)
|
| ++global sym(vp8_short_fdct4x4_sse2) PRIVATE
|
| + sym(vp8_short_fdct4x4_sse2):
|
| +
|
| + STACK_FRAME_CREATE
|
| +@@ -164,7 +164,7 @@ sym(vp8_short_fdct4x4_sse2):
|
| + STACK_FRAME_DESTROY
|
| +
|
| + ;void vp8_short_fdct8x4_sse2(short *input, short *output, int pitch)
|
| +-global sym(vp8_short_fdct8x4_sse2)
|
| ++global sym(vp8_short_fdct8x4_sse2) PRIVATE
|
| + sym(vp8_short_fdct8x4_sse2):
|
| +
|
| + STACK_FRAME_CREATE
|
| +diff --git a/vp8/encoder/x86/encodeopt.asm b/libvpx/source/libvpx/vp8/encoder/x86/encodeopt.asm
|
| +index c0f06bb..e3802ef 100644
|
| +--- a/vp8/encoder/x86/encodeopt.asm
|
| ++++ b/vp8/encoder/x86/encodeopt.asm
|
| +@@ -12,7 +12,7 @@
|
| + %include "vpx_ports/x86_abi_support.asm"
|
| +
|
| + ;int vp8_block_error_xmm(short *coeff_ptr, short *dcoef_ptr)
|
| +-global sym(vp8_block_error_xmm)
|
| ++global sym(vp8_block_error_xmm) PRIVATE
|
| + sym(vp8_block_error_xmm):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -60,7 +60,7 @@ sym(vp8_block_error_xmm):
|
| + ret
|
| +
|
| + ;int vp8_block_error_mmx(short *coeff_ptr, short *dcoef_ptr)
|
| +-global sym(vp8_block_error_mmx)
|
| ++global sym(vp8_block_error_mmx) PRIVATE
|
| + sym(vp8_block_error_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -126,7 +126,7 @@ sym(vp8_block_error_mmx):
|
| +
|
| +
|
| + ;int vp8_mbblock_error_mmx_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
|
| +-global sym(vp8_mbblock_error_mmx_impl)
|
| ++global sym(vp8_mbblock_error_mmx_impl) PRIVATE
|
| + sym(vp8_mbblock_error_mmx_impl):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -203,7 +203,7 @@ mberror_loop_mmx:
|
| +
|
| +
|
| + ;int vp8_mbblock_error_xmm_impl(short *coeff_ptr, short *dcoef_ptr, int dc);
|
| +-global sym(vp8_mbblock_error_xmm_impl)
|
| ++global sym(vp8_mbblock_error_xmm_impl) PRIVATE
|
| + sym(vp8_mbblock_error_xmm_impl):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -271,7 +271,7 @@ mberror_loop:
|
| +
|
| +
|
| + ;int vp8_mbuverror_mmx_impl(short *s_ptr, short *d_ptr);
|
| +-global sym(vp8_mbuverror_mmx_impl)
|
| ++global sym(vp8_mbuverror_mmx_impl) PRIVATE
|
| + sym(vp8_mbuverror_mmx_impl):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -328,7 +328,7 @@ mbuverror_loop_mmx:
|
| +
|
| +
|
| + ;int vp8_mbuverror_xmm_impl(short *s_ptr, short *d_ptr);
|
| +-global sym(vp8_mbuverror_xmm_impl)
|
| ++global sym(vp8_mbuverror_xmm_impl) PRIVATE
|
| + sym(vp8_mbuverror_xmm_impl):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/fwalsh_sse2.asm b/libvpx/source/libvpx/vp8/encoder/x86/fwalsh_sse2.asm
|
| +index 39439f0..8a6915f 100644
|
| +--- a/vp8/encoder/x86/fwalsh_sse2.asm
|
| ++++ b/vp8/encoder/x86/fwalsh_sse2.asm
|
| +@@ -12,7 +12,7 @@
|
| + %include "vpx_ports/x86_abi_support.asm"
|
| +
|
| + ;void vp8_short_walsh4x4_sse2(short *input, short *output, int pitch)
|
| +-global sym(vp8_short_walsh4x4_sse2)
|
| ++global sym(vp8_short_walsh4x4_sse2) PRIVATE
|
| + sym(vp8_short_walsh4x4_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/quantize_mmx.asm b/libvpx/source/libvpx/vp8/encoder/x86/quantize_mmx.asm
|
| +index f29a54e..2864ce1 100644
|
| +--- a/vp8/encoder/x86/quantize_mmx.asm
|
| ++++ b/vp8/encoder/x86/quantize_mmx.asm
|
| +@@ -15,7 +15,7 @@
|
| + ; short *qcoeff_ptr,short *dequant_ptr,
|
| + ; short *scan_mask, short *round_ptr,
|
| + ; short *quant_ptr, short *dqcoeff_ptr);
|
| +-global sym(vp8_fast_quantize_b_impl_mmx)
|
| ++global sym(vp8_fast_quantize_b_impl_mmx) PRIVATE
|
| + sym(vp8_fast_quantize_b_impl_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/quantize_sse2.asm b/libvpx/source/libvpx/vp8/encoder/x86/quantize_sse2.asm
|
| +index 9a15840..2e655a4 100644
|
| +--- a/vp8/encoder/x86/quantize_sse2.asm
|
| ++++ b/vp8/encoder/x86/quantize_sse2.asm
|
| +@@ -16,7 +16,7 @@
|
| + ; (BLOCK *b, | 0
|
| + ; BLOCKD *d) | 1
|
| +
|
| +-global sym(vp8_regular_quantize_b_sse2)
|
| ++global sym(vp8_regular_quantize_b_sse2) PRIVATE
|
| + sym(vp8_regular_quantize_b_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -247,7 +247,7 @@ ZIGZAG_LOOP 15
|
| + ; short *quant_ptr, | 5
|
| + ; short *dqcoeff_ptr) | 6
|
| +
|
| +-global sym(vp8_fast_quantize_b_impl_sse2)
|
| ++global sym(vp8_fast_quantize_b_impl_sse2) PRIVATE
|
| + sym(vp8_fast_quantize_b_impl_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/quantize_ssse3.asm b/libvpx/source/libvpx/vp8/encoder/x86/quantize_ssse3.asm
|
| +index 2f33199..6cfdd8f 100644
|
| +--- a/vp8/encoder/x86/quantize_ssse3.asm
|
| ++++ b/vp8/encoder/x86/quantize_ssse3.asm
|
| +@@ -16,7 +16,7 @@
|
| + ; short *round_ptr,
|
| + ; short *quant_ptr, short *dqcoeff_ptr);
|
| + ;
|
| +-global sym(vp8_fast_quantize_b_impl_ssse3)
|
| ++global sym(vp8_fast_quantize_b_impl_ssse3) PRIVATE
|
| + sym(vp8_fast_quantize_b_impl_ssse3):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/sad_mmx.asm b/libvpx/source/libvpx/vp8/encoder/x86/sad_mmx.asm
|
| +index 85cb023..6c4c3c3 100644
|
| +--- a/vp8/encoder/x86/sad_mmx.asm
|
| ++++ b/vp8/encoder/x86/sad_mmx.asm
|
| +@@ -11,11 +11,11 @@
|
| +
|
| + %include "vpx_ports/x86_abi_support.asm"
|
| +
|
| +-global sym(vp8_sad16x16_mmx)
|
| +-global sym(vp8_sad8x16_mmx)
|
| +-global sym(vp8_sad8x8_mmx)
|
| +-global sym(vp8_sad4x4_mmx)
|
| +-global sym(vp8_sad16x8_mmx)
|
| ++global sym(vp8_sad16x16_mmx) PRIVATE
|
| ++global sym(vp8_sad8x16_mmx) PRIVATE
|
| ++global sym(vp8_sad8x8_mmx) PRIVATE
|
| ++global sym(vp8_sad4x4_mmx) PRIVATE
|
| ++global sym(vp8_sad16x8_mmx) PRIVATE
|
| +
|
| + ;unsigned int vp8_sad16x16_mmx(
|
| + ; unsigned char *src_ptr,
|
| +diff --git a/vp8/encoder/x86/sad_sse2.asm b/libvpx/source/libvpx/vp8/encoder/x86/sad_sse2.asm
|
| +index 39ed796..92df957 100644
|
| +--- a/vp8/encoder/x86/sad_sse2.asm
|
| ++++ b/vp8/encoder/x86/sad_sse2.asm
|
| +@@ -16,7 +16,7 @@
|
| + ; int src_stride,
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride)
|
| +-global sym(vp8_sad16x16_wmt)
|
| ++global sym(vp8_sad16x16_wmt) PRIVATE
|
| + sym(vp8_sad16x16_wmt):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -88,7 +88,7 @@ x16x16sad_wmt_loop:
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride,
|
| + ; int max_err)
|
| +-global sym(vp8_sad8x16_wmt)
|
| ++global sym(vp8_sad8x16_wmt) PRIVATE
|
| + sym(vp8_sad8x16_wmt):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -151,7 +151,7 @@ x8x16sad_wmt_early_exit:
|
| + ; int src_stride,
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride)
|
| +-global sym(vp8_sad8x8_wmt)
|
| ++global sym(vp8_sad8x8_wmt) PRIVATE
|
| + sym(vp8_sad8x8_wmt):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -204,7 +204,7 @@ x8x8sad_wmt_early_exit:
|
| + ; int src_stride,
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride)
|
| +-global sym(vp8_sad4x4_wmt)
|
| ++global sym(vp8_sad4x4_wmt) PRIVATE
|
| + sym(vp8_sad4x4_wmt):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -259,7 +259,7 @@ sym(vp8_sad4x4_wmt):
|
| + ; int src_stride,
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride)
|
| +-global sym(vp8_sad16x8_wmt)
|
| ++global sym(vp8_sad16x8_wmt) PRIVATE
|
| + sym(vp8_sad16x8_wmt):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/sad_sse3.asm b/libvpx/source/libvpx/vp8/encoder/x86/sad_sse3.asm
|
| +index f0336ab..84c3240 100644
|
| +--- a/vp8/encoder/x86/sad_sse3.asm
|
| ++++ b/vp8/encoder/x86/sad_sse3.asm
|
| +@@ -372,7 +372,7 @@
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride,
|
| + ; int *results)
|
| +-global sym(vp8_sad16x16x3_sse3)
|
| ++global sym(vp8_sad16x16x3_sse3) PRIVATE
|
| + sym(vp8_sad16x16x3_sse3):
|
| +
|
| + STACK_FRAME_CREATE_X3
|
| +@@ -414,7 +414,7 @@ sym(vp8_sad16x16x3_sse3):
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride,
|
| + ; int *results)
|
| +-global sym(vp8_sad16x8x3_sse3)
|
| ++global sym(vp8_sad16x8x3_sse3) PRIVATE
|
| + sym(vp8_sad16x8x3_sse3):
|
| +
|
| + STACK_FRAME_CREATE_X3
|
| +@@ -452,7 +452,7 @@ sym(vp8_sad16x8x3_sse3):
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride,
|
| + ; int *results)
|
| +-global sym(vp8_sad8x16x3_sse3)
|
| ++global sym(vp8_sad8x16x3_sse3) PRIVATE
|
| + sym(vp8_sad8x16x3_sse3):
|
| +
|
| + STACK_FRAME_CREATE_X3
|
| +@@ -481,7 +481,7 @@ sym(vp8_sad8x16x3_sse3):
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride,
|
| + ; int *results)
|
| +-global sym(vp8_sad8x8x3_sse3)
|
| ++global sym(vp8_sad8x8x3_sse3) PRIVATE
|
| + sym(vp8_sad8x8x3_sse3):
|
| +
|
| + STACK_FRAME_CREATE_X3
|
| +@@ -506,7 +506,7 @@ sym(vp8_sad8x8x3_sse3):
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride,
|
| + ; int *results)
|
| +-global sym(vp8_sad4x4x3_sse3)
|
| ++global sym(vp8_sad4x4x3_sse3) PRIVATE
|
| + sym(vp8_sad4x4x3_sse3):
|
| +
|
| + STACK_FRAME_CREATE_X3
|
| +@@ -581,7 +581,7 @@ sym(vp8_sad4x4x3_sse3):
|
| + ; int ref_stride,
|
| + ; int max_err)
|
| + ;%define lddqu movdqu
|
| +-global sym(vp8_sad16x16_sse3)
|
| ++global sym(vp8_sad16x16_sse3) PRIVATE
|
| + sym(vp8_sad16x16_sse3):
|
| +
|
| + STACK_FRAME_CREATE_X3
|
| +@@ -634,7 +634,7 @@ sym(vp8_sad16x16_sse3):
|
| + ; unsigned char *ref_ptr_base,
|
| + ; int ref_stride,
|
| + ; int *results)
|
| +-global sym(vp8_sad16x16x4d_sse3)
|
| ++global sym(vp8_sad16x16x4d_sse3) PRIVATE
|
| + sym(vp8_sad16x16x4d_sse3):
|
| +
|
| + STACK_FRAME_CREATE_X4
|
| +@@ -685,7 +685,7 @@ sym(vp8_sad16x16x4d_sse3):
|
| + ; unsigned char *ref_ptr_base,
|
| + ; int ref_stride,
|
| + ; int *results)
|
| +-global sym(vp8_sad16x8x4d_sse3)
|
| ++global sym(vp8_sad16x8x4d_sse3) PRIVATE
|
| + sym(vp8_sad16x8x4d_sse3):
|
| +
|
| + STACK_FRAME_CREATE_X4
|
| +@@ -732,7 +732,7 @@ sym(vp8_sad16x8x4d_sse3):
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride,
|
| + ; int *results)
|
| +-global sym(vp8_sad8x16x4d_sse3)
|
| ++global sym(vp8_sad8x16x4d_sse3) PRIVATE
|
| + sym(vp8_sad8x16x4d_sse3):
|
| +
|
| + STACK_FRAME_CREATE_X4
|
| +@@ -765,7 +765,7 @@ sym(vp8_sad8x16x4d_sse3):
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride,
|
| + ; int *results)
|
| +-global sym(vp8_sad8x8x4d_sse3)
|
| ++global sym(vp8_sad8x8x4d_sse3) PRIVATE
|
| + sym(vp8_sad8x8x4d_sse3):
|
| +
|
| + STACK_FRAME_CREATE_X4
|
| +@@ -794,7 +794,7 @@ sym(vp8_sad8x8x4d_sse3):
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride,
|
| + ; int *results)
|
| +-global sym(vp8_sad4x4x4d_sse3)
|
| ++global sym(vp8_sad4x4x4d_sse3) PRIVATE
|
| + sym(vp8_sad4x4x4d_sse3):
|
| +
|
| + STACK_FRAME_CREATE_X4
|
| +diff --git a/vp8/encoder/x86/sad_sse4.asm b/libvpx/source/libvpx/vp8/encoder/x86/sad_sse4.asm
|
| +index 03ecec4..f7fccd7 100644
|
| +--- a/vp8/encoder/x86/sad_sse4.asm
|
| ++++ b/vp8/encoder/x86/sad_sse4.asm
|
| +@@ -161,7 +161,7 @@
|
| + ; const unsigned char *ref_ptr,
|
| + ; int ref_stride,
|
| + ; unsigned short *sad_array);
|
| +-global sym(vp8_sad16x16x8_sse4)
|
| ++global sym(vp8_sad16x16x8_sse4) PRIVATE
|
| + sym(vp8_sad16x16x8_sse4):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -203,7 +203,7 @@ sym(vp8_sad16x16x8_sse4):
|
| + ; int ref_stride,
|
| + ; unsigned short *sad_array
|
| + ;);
|
| +-global sym(vp8_sad16x8x8_sse4)
|
| ++global sym(vp8_sad16x8x8_sse4) PRIVATE
|
| + sym(vp8_sad16x8x8_sse4):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -241,7 +241,7 @@ sym(vp8_sad16x8x8_sse4):
|
| + ; int ref_stride,
|
| + ; unsigned short *sad_array
|
| + ;);
|
| +-global sym(vp8_sad8x8x8_sse4)
|
| ++global sym(vp8_sad8x8x8_sse4) PRIVATE
|
| + sym(vp8_sad8x8x8_sse4):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -279,7 +279,7 @@ sym(vp8_sad8x8x8_sse4):
|
| + ; int ref_stride,
|
| + ; unsigned short *sad_array
|
| + ;);
|
| +-global sym(vp8_sad8x16x8_sse4)
|
| ++global sym(vp8_sad8x16x8_sse4) PRIVATE
|
| + sym(vp8_sad8x16x8_sse4):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -320,7 +320,7 @@ sym(vp8_sad8x16x8_sse4):
|
| + ; int ref_stride,
|
| + ; unsigned short *sad_array
|
| + ;);
|
| +-global sym(vp8_sad4x4x8_sse4)
|
| ++global sym(vp8_sad4x4x8_sse4) PRIVATE
|
| + sym(vp8_sad4x4x8_sse4):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/sad_ssse3.asm b/libvpx/source/libvpx/vp8/encoder/x86/sad_ssse3.asm
|
| +index 69c5eae..8fb850b 100644
|
| +--- a/vp8/encoder/x86/sad_ssse3.asm
|
| ++++ b/vp8/encoder/x86/sad_ssse3.asm
|
| +@@ -152,7 +152,7 @@
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride,
|
| + ; int *results)
|
| +-global sym(vp8_sad16x16x3_ssse3)
|
| ++global sym(vp8_sad16x16x3_ssse3) PRIVATE
|
| + sym(vp8_sad16x16x3_ssse3):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -263,7 +263,7 @@ vp8_sad16x16x3_ssse3_store_off:
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride,
|
| + ; int *results)
|
| +-global sym(vp8_sad16x8x3_ssse3)
|
| ++global sym(vp8_sad16x8x3_ssse3) PRIVATE
|
| + sym(vp8_sad16x8x3_ssse3):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/ssim_opt.asm b/libvpx/source/libvpx/vp8/encoder/x86/ssim_opt.asm
|
| +index c267cdb..4a8661b 100644
|
| +--- a/vp8/encoder/x86/ssim_opt.asm
|
| ++++ b/vp8/encoder/x86/ssim_opt.asm
|
| +@@ -61,7 +61,7 @@
|
| + ; or pavgb At this point this is just meant to be first pass for calculating
|
| + ; all the parms needed for 16x16 ssim so we can play with dssim as distortion
|
| + ; in mode selection code.
|
| +-global sym(vp8_ssim_parms_16x16_sse3)
|
| ++global sym(vp8_ssim_parms_16x16_sse3) PRIVATE
|
| + sym(vp8_ssim_parms_16x16_sse3):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -149,7 +149,7 @@ NextRow:
|
| + ; or pavgb At this point this is just meant to be first pass for calculating
|
| + ; all the parms needed for 16x16 ssim so we can play with dssim as distortion
|
| + ; in mode selection code.
|
| +-global sym(vp8_ssim_parms_8x8_sse3)
|
| ++global sym(vp8_ssim_parms_8x8_sse3) PRIVATE
|
| + sym(vp8_ssim_parms_8x8_sse3):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/subtract_mmx.asm b/libvpx/source/libvpx/vp8/encoder/x86/subtract_mmx.asm
|
| +index a47e1f0..08e7fa3 100644
|
| +--- a/vp8/encoder/x86/subtract_mmx.asm
|
| ++++ b/vp8/encoder/x86/subtract_mmx.asm
|
| +@@ -14,7 +14,7 @@
|
| + ;void vp8_subtract_b_mmx_impl(unsigned char *z, int src_stride,
|
| + ; short *diff, unsigned char *Predictor,
|
| + ; int pitch);
|
| +-global sym(vp8_subtract_b_mmx_impl)
|
| ++global sym(vp8_subtract_b_mmx_impl) PRIVATE
|
| + sym(vp8_subtract_b_mmx_impl):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -74,7 +74,7 @@ sym(vp8_subtract_b_mmx_impl):
|
| + ret
|
| +
|
| + ;void vp8_subtract_mby_mmx(short *diff, unsigned char *src, unsigned char *pred, int stride)
|
| +-global sym(vp8_subtract_mby_mmx)
|
| ++global sym(vp8_subtract_mby_mmx) PRIVATE
|
| + sym(vp8_subtract_mby_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -150,7 +150,7 @@ submby_loop:
|
| +
|
| +
|
| + ;void vp8_subtract_mbuv_mmx(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride)
|
| +-global sym(vp8_subtract_mbuv_mmx)
|
| ++global sym(vp8_subtract_mbuv_mmx) PRIVATE
|
| + sym(vp8_subtract_mbuv_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/subtract_sse2.asm b/libvpx/source/libvpx/vp8/encoder/x86/subtract_sse2.asm
|
| +index 3fb23d0..2a14730 100644
|
| +--- a/vp8/encoder/x86/subtract_sse2.asm
|
| ++++ b/vp8/encoder/x86/subtract_sse2.asm
|
| +@@ -14,7 +14,7 @@
|
| + ;void vp8_subtract_b_sse2_impl(unsigned char *z, int src_stride,
|
| + ; short *diff, unsigned char *Predictor,
|
| + ; int pitch);
|
| +-global sym(vp8_subtract_b_sse2_impl)
|
| ++global sym(vp8_subtract_b_sse2_impl) PRIVATE
|
| + sym(vp8_subtract_b_sse2_impl):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -72,7 +72,7 @@ sym(vp8_subtract_b_sse2_impl):
|
| +
|
| +
|
| + ;void vp8_subtract_mby_sse2(short *diff, unsigned char *src, unsigned char *pred, int stride)
|
| +-global sym(vp8_subtract_mby_sse2)
|
| ++global sym(vp8_subtract_mby_sse2) PRIVATE
|
| + sym(vp8_subtract_mby_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -146,7 +146,7 @@ submby_loop:
|
| +
|
| +
|
| + ;void vp8_subtract_mbuv_sse2(short *diff, unsigned char *usrc, unsigned char *vsrc, unsigned char *pred, int stride)
|
| +-global sym(vp8_subtract_mbuv_sse2)
|
| ++global sym(vp8_subtract_mbuv_sse2) PRIVATE
|
| + sym(vp8_subtract_mbuv_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/temporal_filter_apply_sse2.asm b/libvpx/source/libvpx/vp8/encoder/x86/temporal_filter_apply_sse2.asm
|
| +index 0127b01..8d66309 100644
|
| +--- a/vp8/encoder/x86/temporal_filter_apply_sse2.asm
|
| ++++ b/vp8/encoder/x86/temporal_filter_apply_sse2.asm
|
| +@@ -20,7 +20,7 @@
|
| + ; int filter_weight, | 5
|
| + ; unsigned int *accumulator, | 6
|
| + ; unsigned short *count) | 7
|
| +-global sym(vp8_temporal_filter_apply_sse2)
|
| ++global sym(vp8_temporal_filter_apply_sse2) PRIVATE
|
| + sym(vp8_temporal_filter_apply_sse2):
|
| +
|
| + push rbp
|
| +diff --git a/vp8/encoder/x86/variance_impl_mmx.asm b/libvpx/source/libvpx/vp8/encoder/x86/variance_impl_mmx.asm
|
| +index 67a9b4d..78122c7 100644
|
| +--- a/vp8/encoder/x86/variance_impl_mmx.asm
|
| ++++ b/vp8/encoder/x86/variance_impl_mmx.asm
|
| +@@ -12,7 +12,7 @@
|
| + %include "vpx_ports/x86_abi_support.asm"
|
| +
|
| + ;unsigned int vp8_get_mb_ss_mmx( short *src_ptr )
|
| +-global sym(vp8_get_mb_ss_mmx)
|
| ++global sym(vp8_get_mb_ss_mmx) PRIVATE
|
| + sym(vp8_get_mb_ss_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -72,7 +72,7 @@ NEXTROW:
|
| + ; unsigned int *SSE,
|
| + ; int *Sum
|
| + ;)
|
| +-global sym(vp8_get8x8var_mmx)
|
| ++global sym(vp8_get8x8var_mmx) PRIVATE
|
| + sym(vp8_get8x8var_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -320,7 +320,7 @@ sym(vp8_get8x8var_mmx):
|
| + ; unsigned int *SSE,
|
| + ; int *Sum
|
| + ;)
|
| +-global sym(vp8_get4x4var_mmx)
|
| ++global sym(vp8_get4x4var_mmx) PRIVATE
|
| + sym(vp8_get4x4var_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -433,7 +433,7 @@ sym(vp8_get4x4var_mmx):
|
| + ; unsigned char *ref_ptr,
|
| + ; int recon_stride
|
| + ;)
|
| +-global sym(vp8_get4x4sse_cs_mmx)
|
| ++global sym(vp8_get4x4sse_cs_mmx) PRIVATE
|
| + sym(vp8_get4x4sse_cs_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -522,7 +522,7 @@ sym(vp8_get4x4sse_cs_mmx):
|
| + ; int *sum,
|
| + ; unsigned int *sumsquared
|
| + ;)
|
| +-global sym(vp8_filter_block2d_bil4x4_var_mmx)
|
| ++global sym(vp8_filter_block2d_bil4x4_var_mmx) PRIVATE
|
| + sym(vp8_filter_block2d_bil4x4_var_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -667,7 +667,7 @@ filter_block2d_bil4x4_var_mmx_loop:
|
| + ; int *sum,
|
| + ; unsigned int *sumsquared
|
| + ;)
|
| +-global sym(vp8_filter_block2d_bil_var_mmx)
|
| ++global sym(vp8_filter_block2d_bil_var_mmx) PRIVATE
|
| + sym(vp8_filter_block2d_bil_var_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -850,7 +850,7 @@ filter_block2d_bil_var_mmx_loop:
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride
|
| + ;)
|
| +-global sym(vp8_get16x16pred_error_mmx)
|
| ++global sym(vp8_get16x16pred_error_mmx) PRIVATE
|
| + sym(vp8_get16x16pred_error_mmx):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/variance_impl_sse2.asm b/libvpx/source/libvpx/vp8/encoder/x86/variance_impl_sse2.asm
|
| +index c2c30de..feb0601 100644
|
| +--- a/vp8/encoder/x86/variance_impl_sse2.asm
|
| ++++ b/vp8/encoder/x86/variance_impl_sse2.asm
|
| +@@ -17,7 +17,7 @@
|
| + ;(
|
| + ; short *src_ptr
|
| + ;)
|
| +-global sym(vp8_get_mb_ss_sse2)
|
| ++global sym(vp8_get_mb_ss_sse2) PRIVATE
|
| + sym(vp8_get_mb_ss_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -80,7 +80,7 @@ NEXTROW:
|
| + ; unsigned int * SSE,
|
| + ; int * Sum
|
| + ;)
|
| +-global sym(vp8_get16x16var_sse2)
|
| ++global sym(vp8_get16x16var_sse2) PRIVATE
|
| + sym(vp8_get16x16var_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -218,7 +218,7 @@ var16loop:
|
| + ; unsigned char *ref_ptr,
|
| + ; int ref_stride
|
| + ;)
|
| +-global sym(vp8_get16x16pred_error_sse2)
|
| ++global sym(vp8_get16x16pred_error_sse2) PRIVATE
|
| + sym(vp8_get16x16pred_error_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -336,7 +336,7 @@ var16peloop:
|
| + ; unsigned int * SSE,
|
| + ; int * Sum
|
| + ;)
|
| +-global sym(vp8_get8x8var_sse2)
|
| ++global sym(vp8_get8x8var_sse2) PRIVATE
|
| + sym(vp8_get8x8var_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -523,7 +523,7 @@ sym(vp8_get8x8var_sse2):
|
| + ; unsigned int *sumsquared;;
|
| + ;
|
| + ;)
|
| +-global sym(vp8_filter_block2d_bil_var_sse2)
|
| ++global sym(vp8_filter_block2d_bil_var_sse2) PRIVATE
|
| + sym(vp8_filter_block2d_bil_var_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -800,7 +800,7 @@ filter_block2d_bil_variance:
|
| + ; int *sum,
|
| + ; unsigned int *sumsquared
|
| + ;)
|
| +-global sym(vp8_half_horiz_vert_variance8x_h_sse2)
|
| ++global sym(vp8_half_horiz_vert_variance8x_h_sse2) PRIVATE
|
| + sym(vp8_half_horiz_vert_variance8x_h_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -920,7 +920,7 @@ vp8_half_horiz_vert_variance8x_h_1:
|
| + ; int *sum,
|
| + ; unsigned int *sumsquared
|
| + ;)
|
| +-global sym(vp8_half_horiz_vert_variance16x_h_sse2)
|
| ++global sym(vp8_half_horiz_vert_variance16x_h_sse2) PRIVATE
|
| + sym(vp8_half_horiz_vert_variance16x_h_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -1036,7 +1036,7 @@ vp8_half_horiz_vert_variance16x_h_1:
|
| + ; int *sum,
|
| + ; unsigned int *sumsquared
|
| + ;)
|
| +-global sym(vp8_half_vert_variance8x_h_sse2)
|
| ++global sym(vp8_half_vert_variance8x_h_sse2) PRIVATE
|
| + sym(vp8_half_vert_variance8x_h_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -1141,7 +1141,7 @@ vp8_half_vert_variance8x_h_1:
|
| + ; int *sum,
|
| + ; unsigned int *sumsquared
|
| + ;)
|
| +-global sym(vp8_half_vert_variance16x_h_sse2)
|
| ++global sym(vp8_half_vert_variance16x_h_sse2) PRIVATE
|
| + sym(vp8_half_vert_variance16x_h_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -1249,7 +1249,7 @@ vp8_half_vert_variance16x_h_1:
|
| + ; int *sum,
|
| + ; unsigned int *sumsquared
|
| + ;)
|
| +-global sym(vp8_half_horiz_variance8x_h_sse2)
|
| ++global sym(vp8_half_horiz_variance8x_h_sse2) PRIVATE
|
| + sym(vp8_half_horiz_variance8x_h_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +@@ -1352,7 +1352,7 @@ vp8_half_horiz_variance8x_h_1:
|
| + ; int *sum,
|
| + ; unsigned int *sumsquared
|
| + ;)
|
| +-global sym(vp8_half_horiz_variance16x_h_sse2)
|
| ++global sym(vp8_half_horiz_variance16x_h_sse2) PRIVATE
|
| + sym(vp8_half_horiz_variance16x_h_sse2):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vp8/encoder/x86/variance_impl_ssse3.asm b/libvpx/source/libvpx/vp8/encoder/x86/variance_impl_ssse3.asm
|
| +index 3c0fef9..ec7141a 100644
|
| +--- a/vp8/encoder/x86/variance_impl_ssse3.asm
|
| ++++ b/vp8/encoder/x86/variance_impl_ssse3.asm
|
| +@@ -29,7 +29,7 @@
|
| + ;)
|
| + ;Note: The filter coefficient at offset=0 is 128. Since the second register
|
| + ;for Pmaddubsw is signed bytes, we must calculate zero offset seperately.
|
| +-global sym(vp8_filter_block2d_bil_var_ssse3)
|
| ++global sym(vp8_filter_block2d_bil_var_ssse3) PRIVATE
|
| + sym(vp8_filter_block2d_bil_var_ssse3):
|
| + push rbp
|
| + mov rbp, rsp
|
| +diff --git a/vpx_ports/emms.asm b/libvpx/source/libvpx/vpx_ports/emms.asm
|
| +index 306e235..555279a5 100644
|
| +--- a/vpx_ports/emms.asm
|
| ++++ b/vpx_ports/emms.asm
|
| +@@ -12,14 +12,14 @@
|
| + %include "vpx_ports/x86_abi_support.asm"
|
| +
|
| + section .text
|
| +- global sym(vpx_reset_mmx_state)
|
| ++ global sym(vpx_reset_mmx_state) PRIVATE
|
| + sym(vpx_reset_mmx_state):
|
| + emms
|
| + ret
|
| +
|
| +
|
| + %ifidn __OUTPUT_FORMAT__,x64
|
| +-global sym(vpx_winx64_fldcw)
|
| ++global sym(vpx_winx64_fldcw) PRIVATE
|
| + sym(vpx_winx64_fldcw):
|
| + sub rsp, 8
|
| + mov [rsp], rcx ; win x64 specific
|
| +@@ -28,7 +28,7 @@ sym(vpx_winx64_fldcw):
|
| + ret
|
| +
|
| +
|
| +-global sym(vpx_winx64_fstcw)
|
| ++global sym(vpx_winx64_fstcw) PRIVATE
|
| + sym(vpx_winx64_fstcw):
|
| + sub rsp, 8
|
| + fstcw [rsp]
|
| +diff --git a/vpx_ports/x86_abi_support.asm b/libvpx/source/libvpx/vpx_ports/x86_abi_support.asm
|
| +index be64cd7..49ff009 100644
|
| +--- a/vpx_ports/x86_abi_support.asm
|
| ++++ b/vpx_ports/x86_abi_support.asm
|
| +@@ -92,6 +92,28 @@
|
| + %define sym(x) _ %+ x
|
| + %endif
|
| +
|
| ++
|
| ++; PRIVATE
|
| ++; Macro for the attribute to hide a global symbol for the target ABI.
|
| ++;
|
| ++; Chromium doesn't like exported global symbols due to symbol clashing with
|
| ++; plugins among other things.
|
| ++;
|
| ++; Requires Chromium's patched copy of yasm:
|
| ++; http://src.chromium.org/viewvc/chrome?view=rev&revision=73761
|
| ++; http://www.tortall.net/projects/yasm/ticket/236
|
| ++;
|
| ++%ifidn __OUTPUT_FORMAT__,elf32
|
| ++%define PRIVATE :hidden
|
| ++%elifidn __OUTPUT_FORMAT__,elf64
|
| ++%define PRIVATE :hidden
|
| ++%elifidn __OUTPUT_FORMAT__,x64
|
| ++%define PRIVATE
|
| ++%else
|
| ++%define PRIVATE :private_extern
|
| ++%endif
|
| ++
|
| ++
|
| + ; arg()
|
| + ; Return the address specification of the given argument
|
| + ;
|
| +--
|
| +1.7.3.1
|
| +
|
|
|