Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(514)

Side by Side Diff: source/libvpx/vpx_dsp/x86/sad_avx2.c

Issue 1124333011: libvpx: Pull from upstream (Closed) Base URL: https://chromium.googlesource.com/chromium/deps/libvpx.git@master
Patch Set: only update to last nights LKGR Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « source/libvpx/vpx_dsp/x86/sad4d_sse2.asm ('k') | source/libvpx/vpx_dsp/x86/sad_mmx.asm » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 #include <immintrin.h> 10 #include <immintrin.h>
11 #include "vpx_ports/mem.h" 11 #include "vpx_ports/mem.h"
12 12
13 #define FSAD64_H(h) \ 13 #define FSAD64_H(h) \
14 unsigned int vp9_sad64x##h##_avx2(const uint8_t *src_ptr, \ 14 unsigned int vpx_sad64x##h##_avx2(const uint8_t *src_ptr, \
15 int src_stride, \ 15 int src_stride, \
16 const uint8_t *ref_ptr, \ 16 const uint8_t *ref_ptr, \
17 int ref_stride) { \ 17 int ref_stride) { \
18 int i, res; \ 18 int i, res; \
19 __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \ 19 __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
20 __m256i sum_sad = _mm256_setzero_si256(); \ 20 __m256i sum_sad = _mm256_setzero_si256(); \
21 __m256i sum_sad_h; \ 21 __m256i sum_sad_h; \
22 __m128i sum_sad128; \ 22 __m128i sum_sad128; \
23 for (i = 0 ; i < h ; i++) { \ 23 for (i = 0 ; i < h ; i++) { \
24 ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \ 24 ref1_reg = _mm256_loadu_si256((__m256i const *)ref_ptr); \
25 ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32)); \ 25 ref2_reg = _mm256_loadu_si256((__m256i const *)(ref_ptr + 32)); \
26 sad1_reg = _mm256_sad_epu8(ref1_reg, \ 26 sad1_reg = _mm256_sad_epu8(ref1_reg, \
27 _mm256_loadu_si256((__m256i const *)src_ptr)); \ 27 _mm256_loadu_si256((__m256i const *)src_ptr)); \
28 sad2_reg = _mm256_sad_epu8(ref2_reg, \ 28 sad2_reg = _mm256_sad_epu8(ref2_reg, \
29 _mm256_loadu_si256((__m256i const *)(src_ptr + 32))); \ 29 _mm256_loadu_si256((__m256i const *)(src_ptr + 32))); \
30 sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \ 30 sum_sad = _mm256_add_epi32(sum_sad, _mm256_add_epi32(sad1_reg, sad2_reg)); \
31 ref_ptr+= ref_stride; \ 31 ref_ptr+= ref_stride; \
32 src_ptr+= src_stride; \ 32 src_ptr+= src_stride; \
33 } \ 33 } \
34 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ 34 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
35 sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \ 35 sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \
36 sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \ 36 sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \
37 sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \ 37 sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \
38 res = _mm_cvtsi128_si32(sum_sad128); \ 38 res = _mm_cvtsi128_si32(sum_sad128); \
39 return res; \ 39 return res; \
40 } 40 }
41 41
42 #define FSAD32_H(h) \ 42 #define FSAD32_H(h) \
43 unsigned int vp9_sad32x##h##_avx2(const uint8_t *src_ptr, \ 43 unsigned int vpx_sad32x##h##_avx2(const uint8_t *src_ptr, \
44 int src_stride, \ 44 int src_stride, \
45 const uint8_t *ref_ptr, \ 45 const uint8_t *ref_ptr, \
46 int ref_stride) { \ 46 int ref_stride) { \
47 int i, res; \ 47 int i, res; \
48 __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \ 48 __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
49 __m256i sum_sad = _mm256_setzero_si256(); \ 49 __m256i sum_sad = _mm256_setzero_si256(); \
50 __m256i sum_sad_h; \ 50 __m256i sum_sad_h; \
51 __m128i sum_sad128; \ 51 __m128i sum_sad128; \
52 int ref2_stride = ref_stride << 1; \ 52 int ref2_stride = ref_stride << 1; \
53 int src2_stride = src_stride << 1; \ 53 int src2_stride = src_stride << 1; \
(...skipping 28 matching lines...) Expand all
82 82
83 FSAD64; 83 FSAD64;
84 FSAD32; 84 FSAD32;
85 85
86 #undef FSAD64 86 #undef FSAD64
87 #undef FSAD32 87 #undef FSAD32
88 #undef FSAD64_H 88 #undef FSAD64_H
89 #undef FSAD32_H 89 #undef FSAD32_H
90 90
91 #define FSADAVG64_H(h) \ 91 #define FSADAVG64_H(h) \
92 unsigned int vp9_sad64x##h##_avg_avx2(const uint8_t *src_ptr, \ 92 unsigned int vpx_sad64x##h##_avg_avx2(const uint8_t *src_ptr, \
93 int src_stride, \ 93 int src_stride, \
94 const uint8_t *ref_ptr, \ 94 const uint8_t *ref_ptr, \
95 int ref_stride, \ 95 int ref_stride, \
96 const uint8_t *second_pred) { \ 96 const uint8_t *second_pred) { \
97 int i, res; \ 97 int i, res; \
98 __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \ 98 __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
99 __m256i sum_sad = _mm256_setzero_si256(); \ 99 __m256i sum_sad = _mm256_setzero_si256(); \
100 __m256i sum_sad_h; \ 100 __m256i sum_sad_h; \
101 __m128i sum_sad128; \ 101 __m128i sum_sad128; \
102 for (i = 0 ; i < h ; i++) { \ 102 for (i = 0 ; i < h ; i++) { \
(...skipping 14 matching lines...) Expand all
117 } \ 117 } \
118 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \ 118 sum_sad_h = _mm256_srli_si256(sum_sad, 8); \
119 sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \ 119 sum_sad = _mm256_add_epi32(sum_sad, sum_sad_h); \
120 sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \ 120 sum_sad128 = _mm256_extracti128_si256(sum_sad, 1); \
121 sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \ 121 sum_sad128 = _mm_add_epi32(_mm256_castsi256_si128(sum_sad), sum_sad128); \
122 res = _mm_cvtsi128_si32(sum_sad128); \ 122 res = _mm_cvtsi128_si32(sum_sad128); \
123 return res; \ 123 return res; \
124 } 124 }
125 125
126 #define FSADAVG32_H(h) \ 126 #define FSADAVG32_H(h) \
127 unsigned int vp9_sad32x##h##_avg_avx2(const uint8_t *src_ptr, \ 127 unsigned int vpx_sad32x##h##_avg_avx2(const uint8_t *src_ptr, \
128 int src_stride, \ 128 int src_stride, \
129 const uint8_t *ref_ptr, \ 129 const uint8_t *ref_ptr, \
130 int ref_stride, \ 130 int ref_stride, \
131 const uint8_t *second_pred) { \ 131 const uint8_t *second_pred) { \
132 int i, res; \ 132 int i, res; \
133 __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \ 133 __m256i sad1_reg, sad2_reg, ref1_reg, ref2_reg; \
134 __m256i sum_sad = _mm256_setzero_si256(); \ 134 __m256i sum_sad = _mm256_setzero_si256(); \
135 __m256i sum_sad_h; \ 135 __m256i sum_sad_h; \
136 __m128i sum_sad128; \ 136 __m128i sum_sad128; \
137 int ref2_stride = ref_stride << 1; \ 137 int ref2_stride = ref_stride << 1; \
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
171 FSADAVG32_H(32); \ 171 FSADAVG32_H(32); \
172 FSADAVG32_H(16); 172 FSADAVG32_H(16);
173 173
174 FSADAVG64; 174 FSADAVG64;
175 FSADAVG32; 175 FSADAVG32;
176 176
177 #undef FSADAVG64 177 #undef FSADAVG64
178 #undef FSADAVG32 178 #undef FSADAVG32
179 #undef FSADAVG64_H 179 #undef FSADAVG64_H
180 #undef FSADAVG32_H 180 #undef FSADAVG32_H
OLDNEW
« no previous file with comments | « source/libvpx/vpx_dsp/x86/sad4d_sse2.asm ('k') | source/libvpx/vpx_dsp/x86/sad_mmx.asm » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698