Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(89)

Side by Side Diff: source/libvpx/vpx_dsp/x86/sad4d_avx2.c

Issue 1162573005: libvpx: Pull from upstream (Closed) Base URL: https://chromium.googlesource.com/chromium/deps/libvpx.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « source/libvpx/vpx_dsp/x86/highbd_variance_sse2.c ('k') | source/libvpx/vpx_dsp/x86/sad_avx2.c » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved. 2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 #include <immintrin.h> // AVX2 10 #include <immintrin.h> // AVX2
11 #include "./vpx_dsp_rtcd.h"
11 #include "vpx/vpx_integer.h" 12 #include "vpx/vpx_integer.h"
12 13
13 void vpx_sad32x32x4d_avx2(uint8_t *src, 14 void vpx_sad32x32x4d_avx2(const uint8_t *src,
14 int src_stride, 15 int src_stride,
15 uint8_t *ref[4], 16 const uint8_t *const ref[4],
16 int ref_stride, 17 int ref_stride,
17 uint32_t res[4]) { 18 uint32_t res[4]) {
18 __m256i src_reg, ref0_reg, ref1_reg, ref2_reg, ref3_reg; 19 __m256i src_reg, ref0_reg, ref1_reg, ref2_reg, ref3_reg;
19 __m256i sum_ref0, sum_ref1, sum_ref2, sum_ref3; 20 __m256i sum_ref0, sum_ref1, sum_ref2, sum_ref3;
20 __m256i sum_mlow, sum_mhigh; 21 __m256i sum_mlow, sum_mhigh;
21 int i; 22 int i;
22 uint8_t *ref0, *ref1, *ref2, *ref3; 23 const uint8_t *ref0, *ref1, *ref2, *ref3;
23 24
24 ref0 = ref[0]; 25 ref0 = ref[0];
25 ref1 = ref[1]; 26 ref1 = ref[1];
26 ref2 = ref[2]; 27 ref2 = ref[2];
27 ref3 = ref[3]; 28 ref3 = ref[3];
28 sum_ref0 = _mm256_set1_epi16(0); 29 sum_ref0 = _mm256_set1_epi16(0);
29 sum_ref1 = _mm256_set1_epi16(0); 30 sum_ref1 = _mm256_set1_epi16(0);
30 sum_ref2 = _mm256_set1_epi16(0); 31 sum_ref2 = _mm256_set1_epi16(0);
31 sum_ref3 = _mm256_set1_epi16(0); 32 sum_ref3 = _mm256_set1_epi16(0);
32 for (i = 0; i < 32 ; i++) { 33 for (i = 0; i < 32 ; i++) {
33 // load src and all refs 34 // load src and all refs
34 src_reg = _mm256_loadu_si256((__m256i *)(src)); 35 src_reg = _mm256_loadu_si256((const __m256i *)src);
35 ref0_reg = _mm256_loadu_si256((__m256i *) (ref0)); 36 ref0_reg = _mm256_loadu_si256((const __m256i *)ref0);
36 ref1_reg = _mm256_loadu_si256((__m256i *) (ref1)); 37 ref1_reg = _mm256_loadu_si256((const __m256i *)ref1);
37 ref2_reg = _mm256_loadu_si256((__m256i *) (ref2)); 38 ref2_reg = _mm256_loadu_si256((const __m256i *)ref2);
38 ref3_reg = _mm256_loadu_si256((__m256i *) (ref3)); 39 ref3_reg = _mm256_loadu_si256((const __m256i *)ref3);
39 // sum of the absolute differences between every ref-i to src 40 // sum of the absolute differences between every ref-i to src
40 ref0_reg = _mm256_sad_epu8(ref0_reg, src_reg); 41 ref0_reg = _mm256_sad_epu8(ref0_reg, src_reg);
41 ref1_reg = _mm256_sad_epu8(ref1_reg, src_reg); 42 ref1_reg = _mm256_sad_epu8(ref1_reg, src_reg);
42 ref2_reg = _mm256_sad_epu8(ref2_reg, src_reg); 43 ref2_reg = _mm256_sad_epu8(ref2_reg, src_reg);
43 ref3_reg = _mm256_sad_epu8(ref3_reg, src_reg); 44 ref3_reg = _mm256_sad_epu8(ref3_reg, src_reg);
44 // sum every ref-i 45 // sum every ref-i
45 sum_ref0 = _mm256_add_epi32(sum_ref0, ref0_reg); 46 sum_ref0 = _mm256_add_epi32(sum_ref0, ref0_reg);
46 sum_ref1 = _mm256_add_epi32(sum_ref1, ref1_reg); 47 sum_ref1 = _mm256_add_epi32(sum_ref1, ref1_reg);
47 sum_ref2 = _mm256_add_epi32(sum_ref2, ref2_reg); 48 sum_ref2 = _mm256_add_epi32(sum_ref2, ref2_reg);
48 sum_ref3 = _mm256_add_epi32(sum_ref3, ref3_reg); 49 sum_ref3 = _mm256_add_epi32(sum_ref3, ref3_reg);
(...skipping 24 matching lines...) Expand all
73 sum_mlow = _mm256_add_epi32(sum_mlow, sum_mhigh); 74 sum_mlow = _mm256_add_epi32(sum_mlow, sum_mhigh);
74 75
75 // add the low 128 bit to the high 128 bit 76 // add the low 128 bit to the high 128 bit
76 sum = _mm_add_epi32(_mm256_castsi256_si128(sum_mlow), 77 sum = _mm_add_epi32(_mm256_castsi256_si128(sum_mlow),
77 _mm256_extractf128_si256(sum_mlow, 1)); 78 _mm256_extractf128_si256(sum_mlow, 1));
78 79
79 _mm_storeu_si128((__m128i *)(res), sum); 80 _mm_storeu_si128((__m128i *)(res), sum);
80 } 81 }
81 } 82 }
82 83
83 void vpx_sad64x64x4d_avx2(uint8_t *src, 84 void vpx_sad64x64x4d_avx2(const uint8_t *src,
84 int src_stride, 85 int src_stride,
85 uint8_t *ref[4], 86 const uint8_t *const ref[4],
86 int ref_stride, 87 int ref_stride,
87 uint32_t res[4]) { 88 uint32_t res[4]) {
88 __m256i src_reg, srcnext_reg, ref0_reg, ref0next_reg; 89 __m256i src_reg, srcnext_reg, ref0_reg, ref0next_reg;
89 __m256i ref1_reg, ref1next_reg, ref2_reg, ref2next_reg; 90 __m256i ref1_reg, ref1next_reg, ref2_reg, ref2next_reg;
90 __m256i ref3_reg, ref3next_reg; 91 __m256i ref3_reg, ref3next_reg;
91 __m256i sum_ref0, sum_ref1, sum_ref2, sum_ref3; 92 __m256i sum_ref0, sum_ref1, sum_ref2, sum_ref3;
92 __m256i sum_mlow, sum_mhigh; 93 __m256i sum_mlow, sum_mhigh;
93 int i; 94 int i;
94 uint8_t *ref0, *ref1, *ref2, *ref3; 95 const uint8_t *ref0, *ref1, *ref2, *ref3;
95 96
96 ref0 = ref[0]; 97 ref0 = ref[0];
97 ref1 = ref[1]; 98 ref1 = ref[1];
98 ref2 = ref[2]; 99 ref2 = ref[2];
99 ref3 = ref[3]; 100 ref3 = ref[3];
100 sum_ref0 = _mm256_set1_epi16(0); 101 sum_ref0 = _mm256_set1_epi16(0);
101 sum_ref1 = _mm256_set1_epi16(0); 102 sum_ref1 = _mm256_set1_epi16(0);
102 sum_ref2 = _mm256_set1_epi16(0); 103 sum_ref2 = _mm256_set1_epi16(0);
103 sum_ref3 = _mm256_set1_epi16(0); 104 sum_ref3 = _mm256_set1_epi16(0);
104 for (i = 0; i < 64 ; i++) { 105 for (i = 0; i < 64 ; i++) {
105 // load 64 bytes from src and all refs 106 // load 64 bytes from src and all refs
106 src_reg = _mm256_loadu_si256((__m256i *)(src)); 107 src_reg = _mm256_loadu_si256((const __m256i *)src);
107 srcnext_reg = _mm256_loadu_si256((__m256i *)(src + 32)); 108 srcnext_reg = _mm256_loadu_si256((const __m256i *)(src + 32));
108 ref0_reg = _mm256_loadu_si256((__m256i *) (ref0)); 109 ref0_reg = _mm256_loadu_si256((const __m256i *)ref0);
109 ref0next_reg = _mm256_loadu_si256((__m256i *) (ref0 + 32)); 110 ref0next_reg = _mm256_loadu_si256((const __m256i *)(ref0 + 32));
110 ref1_reg = _mm256_loadu_si256((__m256i *) (ref1)); 111 ref1_reg = _mm256_loadu_si256((const __m256i *)ref1);
111 ref1next_reg = _mm256_loadu_si256((__m256i *) (ref1 + 32)); 112 ref1next_reg = _mm256_loadu_si256((const __m256i *)(ref1 + 32));
112 ref2_reg = _mm256_loadu_si256((__m256i *) (ref2)); 113 ref2_reg = _mm256_loadu_si256((const __m256i *)ref2);
113 ref2next_reg = _mm256_loadu_si256((__m256i *) (ref2 + 32)); 114 ref2next_reg = _mm256_loadu_si256((const __m256i *)(ref2 + 32));
114 ref3_reg = _mm256_loadu_si256((__m256i *) (ref3)); 115 ref3_reg = _mm256_loadu_si256((const __m256i *)ref3);
115 ref3next_reg = _mm256_loadu_si256((__m256i *) (ref3 + 32)); 116 ref3next_reg = _mm256_loadu_si256((const __m256i *)(ref3 + 32));
116 // sum of the absolute differences between every ref-i to src 117 // sum of the absolute differences between every ref-i to src
117 ref0_reg = _mm256_sad_epu8(ref0_reg, src_reg); 118 ref0_reg = _mm256_sad_epu8(ref0_reg, src_reg);
118 ref1_reg = _mm256_sad_epu8(ref1_reg, src_reg); 119 ref1_reg = _mm256_sad_epu8(ref1_reg, src_reg);
119 ref2_reg = _mm256_sad_epu8(ref2_reg, src_reg); 120 ref2_reg = _mm256_sad_epu8(ref2_reg, src_reg);
120 ref3_reg = _mm256_sad_epu8(ref3_reg, src_reg); 121 ref3_reg = _mm256_sad_epu8(ref3_reg, src_reg);
121 ref0next_reg = _mm256_sad_epu8(ref0next_reg, srcnext_reg); 122 ref0next_reg = _mm256_sad_epu8(ref0next_reg, srcnext_reg);
122 ref1next_reg = _mm256_sad_epu8(ref1next_reg, srcnext_reg); 123 ref1next_reg = _mm256_sad_epu8(ref1next_reg, srcnext_reg);
123 ref2next_reg = _mm256_sad_epu8(ref2next_reg, srcnext_reg); 124 ref2next_reg = _mm256_sad_epu8(ref2next_reg, srcnext_reg);
124 ref3next_reg = _mm256_sad_epu8(ref3next_reg, srcnext_reg); 125 ref3next_reg = _mm256_sad_epu8(ref3next_reg, srcnext_reg);
125 126
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
158 // add the low 64 bit to the high 64 bit 159 // add the low 64 bit to the high 64 bit
159 sum_mlow = _mm256_add_epi32(sum_mlow, sum_mhigh); 160 sum_mlow = _mm256_add_epi32(sum_mlow, sum_mhigh);
160 161
161 // add the low 128 bit to the high 128 bit 162 // add the low 128 bit to the high 128 bit
162 sum = _mm_add_epi32(_mm256_castsi256_si128(sum_mlow), 163 sum = _mm_add_epi32(_mm256_castsi256_si128(sum_mlow),
163 _mm256_extractf128_si256(sum_mlow, 1)); 164 _mm256_extractf128_si256(sum_mlow, 1));
164 165
165 _mm_storeu_si128((__m128i *)(res), sum); 166 _mm_storeu_si128((__m128i *)(res), sum);
166 } 167 }
167 } 168 }
OLDNEW
« no previous file with comments | « source/libvpx/vpx_dsp/x86/highbd_variance_sse2.c ('k') | source/libvpx/vpx_dsp/x86/sad_avx2.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698