OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ |
| 10 |
| 11 #include <stddef.h> |
| 12 #include <arm_neon.h> |
| 13 |
| 14 void vp9_convolve_copy_neon( |
| 15 const uint8_t *src, // r0 |
| 16 ptrdiff_t src_stride, // r1 |
| 17 uint8_t *dst, // r2 |
| 18 ptrdiff_t dst_stride, // r3 |
| 19 const int16_t *filter_x, |
| 20 int filter_x_stride, |
| 21 const int16_t *filter_y, |
| 22 int filter_y_stride, |
| 23 int w, |
| 24 int h) { |
| 25 uint8x8_t d0u8, d2u8; |
| 26 uint8x16_t q0u8, q1u8, q2u8, q3u8; |
| 27 (void)filter_x; (void)filter_x_stride; |
| 28 (void)filter_y; (void)filter_y_stride; |
| 29 |
| 30 if (w > 32) { // copy64 |
| 31 for (; h > 0; h--) { |
| 32 q0u8 = vld1q_u8(src); |
| 33 q1u8 = vld1q_u8(src + 16); |
| 34 q2u8 = vld1q_u8(src + 32); |
| 35 q3u8 = vld1q_u8(src + 48); |
| 36 src += src_stride; |
| 37 |
| 38 vst1q_u8(dst, q0u8); |
| 39 vst1q_u8(dst + 16, q1u8); |
| 40 vst1q_u8(dst + 32, q2u8); |
| 41 vst1q_u8(dst + 48, q3u8); |
| 42 dst += dst_stride; |
| 43 } |
| 44 } else if (w == 32) { // copy32 |
| 45 for (; h > 0; h -= 2) { |
| 46 q0u8 = vld1q_u8(src); |
| 47 q1u8 = vld1q_u8(src + 16); |
| 48 src += src_stride; |
| 49 q2u8 = vld1q_u8(src); |
| 50 q3u8 = vld1q_u8(src + 16); |
| 51 src += src_stride; |
| 52 |
| 53 vst1q_u8(dst, q0u8); |
| 54 vst1q_u8(dst + 16, q1u8); |
| 55 dst += dst_stride; |
| 56 vst1q_u8(dst, q2u8); |
| 57 vst1q_u8(dst + 16, q3u8); |
| 58 dst += dst_stride; |
| 59 } |
| 60 } else if (w > 8) { // copy16 |
| 61 for (; h > 0; h -= 2) { |
| 62 q0u8 = vld1q_u8(src); |
| 63 src += src_stride; |
| 64 q1u8 = vld1q_u8(src); |
| 65 src += src_stride; |
| 66 |
| 67 vst1q_u8(dst, q0u8); |
| 68 dst += dst_stride; |
| 69 vst1q_u8(dst, q1u8); |
| 70 dst += dst_stride; |
| 71 } |
| 72 } else if (w == 8) { // copy8 |
| 73 for (; h > 0; h -= 2) { |
| 74 d0u8 = vld1_u8(src); |
| 75 src += src_stride; |
| 76 d2u8 = vld1_u8(src); |
| 77 src += src_stride; |
| 78 |
| 79 vst1_u8(dst, d0u8); |
| 80 dst += dst_stride; |
| 81 vst1_u8(dst, d2u8); |
| 82 dst += dst_stride; |
| 83 } |
| 84 } else { // copy4 |
| 85 for (; h > 0; h--) { |
| 86 *(uint32_t *)dst = *(const uint32_t *)src; |
| 87 src += src_stride; |
| 88 dst += dst_stride; |
| 89 } |
| 90 } |
| 91 return; |
| 92 } |
OLD | NEW |