OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ |
| 10 |
| 11 #include <emmintrin.h> // SSE2 |
| 12 #include "./vpx_config.h" |
| 13 #include "./vp9_rtcd.h" |
| 14 #include "vpx/vpx_integer.h" |
| 15 |
| 16 #if HAVE_SSE2 |
| 17 unsigned int vp9_sad16x3_sse2( |
| 18 const unsigned char *src_ptr, |
| 19 int src_stride, |
| 20 const unsigned char *ref_ptr, |
| 21 int ref_stride) { |
| 22 __m128i s0, s1, s2; |
| 23 __m128i r0, r1, r2; |
| 24 __m128i sad; |
| 25 |
| 26 s0 = _mm_loadu_si128((const __m128i *)(src_ptr + 0 * src_stride)); |
| 27 s1 = _mm_loadu_si128((const __m128i *)(src_ptr + 1 * src_stride)); |
| 28 s2 = _mm_loadu_si128((const __m128i *)(src_ptr + 2 * src_stride)); |
| 29 |
| 30 r0 = _mm_loadu_si128((const __m128i *)(ref_ptr + 0 * ref_stride)); |
| 31 r1 = _mm_loadu_si128((const __m128i *)(ref_ptr + 1 * ref_stride)); |
| 32 r2 = _mm_loadu_si128((const __m128i *)(ref_ptr + 2 * ref_stride)); |
| 33 |
| 34 sad = _mm_sad_epu8(s0, r0); |
| 35 sad = _mm_add_epi16(sad, _mm_sad_epu8(s1, r1)); |
| 36 sad = _mm_add_epi16(sad, _mm_sad_epu8(s2, r2)); |
| 37 sad = _mm_add_epi16(sad, _mm_srli_si128(sad, 8)); |
| 38 |
| 39 return _mm_cvtsi128_si32(sad); |
| 40 } |
| 41 |
| 42 unsigned int vp9_sad3x16_sse2( |
| 43 const unsigned char *src_ptr, |
| 44 int src_stride, |
| 45 const unsigned char *ref_ptr, |
| 46 int ref_stride) { |
| 47 int r; |
| 48 __m128i s0, s1, s2, s3; |
| 49 __m128i r0, r1, r2, r3; |
| 50 __m128i sad = _mm_setzero_si128(); |
| 51 __m128i mask; |
| 52 const int offset = (uintptr_t)src_ptr & 3; |
| 53 |
| 54 /* In current use case, the offset is 1 if CONFIG_SUBPELREFMV is off. |
| 55 * Here, for offset=1, we adjust src_ptr to be 4-byte aligned. Then, movd |
| 56 * takes much less time. |
| 57 */ |
| 58 if (offset == 1) |
| 59 src_ptr -= 1; |
| 60 |
| 61 /* mask = 0xffffffffffff0000ffffffffffff0000 */ |
| 62 mask = _mm_cmpeq_epi32(sad, sad); |
| 63 mask = _mm_slli_epi64(mask, 16); |
| 64 |
| 65 for (r = 0; r < 16; r += 4) { |
| 66 s0 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 0 * src_stride)); |
| 67 s1 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 1 * src_stride)); |
| 68 s2 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 2 * src_stride)); |
| 69 s3 = _mm_cvtsi32_si128 (*(const int *)(src_ptr + 3 * src_stride)); |
| 70 r0 = _mm_cvtsi32_si128 (*(const int *)(ref_ptr + 0 * ref_stride)); |
| 71 r1 = _mm_cvtsi32_si128 (*(const int *)(ref_ptr + 1 * ref_stride)); |
| 72 r2 = _mm_cvtsi32_si128 (*(const int *)(ref_ptr + 2 * ref_stride)); |
| 73 r3 = _mm_cvtsi32_si128 (*(const int *)(ref_ptr + 3 * ref_stride)); |
| 74 |
| 75 s0 = _mm_unpacklo_epi8(s0, s1); |
| 76 r0 = _mm_unpacklo_epi8(r0, r1); |
| 77 s2 = _mm_unpacklo_epi8(s2, s3); |
| 78 r2 = _mm_unpacklo_epi8(r2, r3); |
| 79 s0 = _mm_unpacklo_epi64(s0, s2); |
| 80 r0 = _mm_unpacklo_epi64(r0, r2); |
| 81 |
| 82 // throw out extra byte |
| 83 if (offset == 1) |
| 84 s0 = _mm_and_si128(s0, mask); |
| 85 else |
| 86 s0 = _mm_slli_epi64(s0, 16); |
| 87 r0 = _mm_slli_epi64(r0, 16); |
| 88 |
| 89 sad = _mm_add_epi16(sad, _mm_sad_epu8(s0, r0)); |
| 90 |
| 91 src_ptr += src_stride*4; |
| 92 ref_ptr += ref_stride*4; |
| 93 } |
| 94 |
| 95 sad = _mm_add_epi16(sad, _mm_srli_si128(sad, 8)); |
| 96 return _mm_cvtsi128_si32(sad); |
| 97 } |
| 98 |
| 99 #endif |
OLD | NEW |