Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(185)

Side by Side Diff: source/libvpx/vp9/common/x86/vp9_asm_stubs.c

Issue 1124333011: libvpx: Pull from upstream (Closed) Base URL: https://chromium.googlesource.com/chromium/deps/libvpx.git@master
Patch Set: only update to last nights LKGR Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
111 void vp9_convolve8_##avg##opt(const uint8_t *src, ptrdiff_t src_stride, \ 111 void vp9_convolve8_##avg##opt(const uint8_t *src, ptrdiff_t src_stride, \
112 uint8_t *dst, ptrdiff_t dst_stride, \ 112 uint8_t *dst, ptrdiff_t dst_stride, \
113 const int16_t *filter_x, int x_step_q4, \ 113 const int16_t *filter_x, int x_step_q4, \
114 const int16_t *filter_y, int y_step_q4, \ 114 const int16_t *filter_y, int y_step_q4, \
115 int w, int h) { \ 115 int w, int h) { \
116 assert(w <= 64); \ 116 assert(w <= 64); \
117 assert(h <= 64); \ 117 assert(h <= 64); \
118 if (x_step_q4 == 16 && y_step_q4 == 16) { \ 118 if (x_step_q4 == 16 && y_step_q4 == 16) { \
119 if (filter_x[0] || filter_x[1] || filter_x[2] || filter_x[3] == 128 || \ 119 if (filter_x[0] || filter_x[1] || filter_x[2] || filter_x[3] == 128 || \
120 filter_y[0] || filter_y[1] || filter_y[2] || filter_y[3] == 128) { \ 120 filter_y[0] || filter_y[1] || filter_y[2] || filter_y[3] == 128) { \
121 DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64 * 71); \ 121 DECLARE_ALIGNED(16, unsigned char, fdata2[64 * 71]); \
122 vp9_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2, 64, \ 122 vp9_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, fdata2, 64, \
123 filter_x, x_step_q4, filter_y, y_step_q4, \ 123 filter_x, x_step_q4, filter_y, y_step_q4, \
124 w, h + 7); \ 124 w, h + 7); \
125 vp9_convolve8_##avg##vert_##opt(fdata2 + 3 * 64, 64, dst, dst_stride, \ 125 vp9_convolve8_##avg##vert_##opt(fdata2 + 3 * 64, 64, dst, dst_stride, \
126 filter_x, x_step_q4, filter_y, \ 126 filter_x, x_step_q4, filter_y, \
127 y_step_q4, w, h); \ 127 y_step_q4, w, h); \
128 } else { \ 128 } else { \
129 DECLARE_ALIGNED_ARRAY(16, unsigned char, fdata2, 64 * 65); \ 129 DECLARE_ALIGNED(16, unsigned char, fdata2[64 * 65]); \
130 vp9_convolve8_horiz_##opt(src, src_stride, fdata2, 64, \ 130 vp9_convolve8_horiz_##opt(src, src_stride, fdata2, 64, \
131 filter_x, x_step_q4, filter_y, y_step_q4, \ 131 filter_x, x_step_q4, filter_y, y_step_q4, \
132 w, h + 1); \ 132 w, h + 1); \
133 vp9_convolve8_##avg##vert_##opt(fdata2, 64, dst, dst_stride, \ 133 vp9_convolve8_##avg##vert_##opt(fdata2, 64, dst, dst_stride, \
134 filter_x, x_step_q4, filter_y, \ 134 filter_x, x_step_q4, filter_y, \
135 y_step_q4, w, h); \ 135 y_step_q4, w, h); \
136 } \ 136 } \
137 } else { \ 137 } else { \
138 vp9_convolve8_##avg##c(src, src_stride, dst, dst_stride, \ 138 vp9_convolve8_##avg##c(src, src_stride, dst, dst_stride, \
139 filter_x, x_step_q4, filter_y, y_step_q4, w, h); \ 139 filter_x, x_step_q4, filter_y, y_step_q4, w, h); \
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
252 void vp9_highbd_convolve8_##avg##opt(const uint8_t *src, ptrdiff_t src_stride, \ 252 void vp9_highbd_convolve8_##avg##opt(const uint8_t *src, ptrdiff_t src_stride, \
253 uint8_t *dst, ptrdiff_t dst_stride, \ 253 uint8_t *dst, ptrdiff_t dst_stride, \
254 const int16_t *filter_x, int x_step_q4, \ 254 const int16_t *filter_x, int x_step_q4, \
255 const int16_t *filter_y, int y_step_q4, \ 255 const int16_t *filter_y, int y_step_q4, \
256 int w, int h, int bd) { \ 256 int w, int h, int bd) { \
257 assert(w <= 64); \ 257 assert(w <= 64); \
258 assert(h <= 64); \ 258 assert(h <= 64); \
259 if (x_step_q4 == 16 && y_step_q4 == 16) { \ 259 if (x_step_q4 == 16 && y_step_q4 == 16) { \
260 if (filter_x[0] || filter_x[1] || filter_x[2] || filter_x[3] == 128 || \ 260 if (filter_x[0] || filter_x[1] || filter_x[2] || filter_x[3] == 128 || \
261 filter_y[0] || filter_y[1] || filter_y[2] || filter_y[3] == 128) { \ 261 filter_y[0] || filter_y[1] || filter_y[2] || filter_y[3] == 128) { \
262 DECLARE_ALIGNED_ARRAY(16, uint16_t, fdata2, 64 * 71); \ 262 DECLARE_ALIGNED(16, uint16_t, fdata2[64 * 71]); \
263 vp9_highbd_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, \ 263 vp9_highbd_convolve8_horiz_##opt(src - 3 * src_stride, src_stride, \
264 CONVERT_TO_BYTEPTR(fdata2), 64, \ 264 CONVERT_TO_BYTEPTR(fdata2), 64, \
265 filter_x, x_step_q4, \ 265 filter_x, x_step_q4, \
266 filter_y, y_step_q4, \ 266 filter_y, y_step_q4, \
267 w, h + 7, bd); \ 267 w, h + 7, bd); \
268 vp9_highbd_convolve8_##avg##vert_##opt(CONVERT_TO_BYTEPTR(fdata2) + 192, \ 268 vp9_highbd_convolve8_##avg##vert_##opt(CONVERT_TO_BYTEPTR(fdata2) + 192, \
269 64, dst, dst_stride, \ 269 64, dst, dst_stride, \
270 filter_x, x_step_q4, \ 270 filter_x, x_step_q4, \
271 filter_y, y_step_q4, \ 271 filter_y, y_step_q4, \
272 w, h, bd); \ 272 w, h, bd); \
273 } else { \ 273 } else { \
274 DECLARE_ALIGNED_ARRAY(16, uint16_t, fdata2, 64 * 65); \ 274 DECLARE_ALIGNED(16, uint16_t, fdata2[64 * 65]); \
275 vp9_highbd_convolve8_horiz_##opt(src, src_stride, \ 275 vp9_highbd_convolve8_horiz_##opt(src, src_stride, \
276 CONVERT_TO_BYTEPTR(fdata2), 64, \ 276 CONVERT_TO_BYTEPTR(fdata2), 64, \
277 filter_x, x_step_q4, \ 277 filter_x, x_step_q4, \
278 filter_y, y_step_q4, \ 278 filter_y, y_step_q4, \
279 w, h + 1, bd); \ 279 w, h + 1, bd); \
280 vp9_highbd_convolve8_##avg##vert_##opt(CONVERT_TO_BYTEPTR(fdata2), 64, \ 280 vp9_highbd_convolve8_##avg##vert_##opt(CONVERT_TO_BYTEPTR(fdata2), 64, \
281 dst, dst_stride, \ 281 dst, dst_stride, \
282 filter_x, x_step_q4, \ 282 filter_x, x_step_q4, \
283 filter_y, y_step_q4, \ 283 filter_y, y_step_q4, \
284 w, h, bd); \ 284 w, h, bd); \
(...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after
565 // int w, int h, int bd); 565 // int w, int h, int bd);
566 // void vp9_highbd_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride, 566 // void vp9_highbd_convolve8_avg_sse2(const uint8_t *src, ptrdiff_t src_stride,
567 // uint8_t *dst, ptrdiff_t dst_stride, 567 // uint8_t *dst, ptrdiff_t dst_stride,
568 // const int16_t *filter_x, int x_step_q4, 568 // const int16_t *filter_x, int x_step_q4,
569 // const int16_t *filter_y, int y_step_q4, 569 // const int16_t *filter_y, int y_step_q4,
570 // int w, int h, int bd); 570 // int w, int h, int bd);
571 HIGH_FUN_CONV_2D(, sse2); 571 HIGH_FUN_CONV_2D(, sse2);
572 HIGH_FUN_CONV_2D(avg_ , sse2); 572 HIGH_FUN_CONV_2D(avg_ , sse2);
573 #endif // CONFIG_VP9_HIGHBITDEPTH && ARCH_X86_64 573 #endif // CONFIG_VP9_HIGHBITDEPTH && ARCH_X86_64
574 #endif // HAVE_SSE2 574 #endif // HAVE_SSE2
OLDNEW
« no previous file with comments | « source/libvpx/vp9/common/vp9_thread_common.c ('k') | source/libvpx/vp9/common/x86/vp9_high_loopfilter_intrin_sse2.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698