| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2013 The Android Open Source Project | 2 * Copyright 2013 The Android Open Source Project |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 | 8 |
| 9 #include "SkBitmap.h" | 9 #include "SkBitmap.h" |
| 10 #include "SkColorPriv.h" | 10 #include "SkColorPriv.h" |
| 11 #include "SkBlurImage_opts.h" | 11 #include "SkBlurImage_opts.h" |
| 12 #include "SkRect.h" | 12 #include "SkRect.h" |
| 13 | 13 |
| 14 #include <arm_neon.h> | 14 #include <arm_neon.h> |
| 15 | 15 |
| 16 namespace { | 16 namespace { |
| 17 | 17 |
| 18 enum BlurDirection { | 18 enum BlurDirection { |
| 19 kX, kY | 19 kX, kY |
| 20 }; | 20 }; |
| 21 | 21 |
| 22 /** | 22 /** |
| 23 * Helper function to spread the components of a 32-bit integer into the | 23 * Helper function to spread the components of a 32-bit integer into the |
| 24 * lower 8 bits of each 32-bit element of a NEON register. | 24 * lower 8 bits of each 16-bit element of a NEON register. |
| 25 */ | 25 */ |
| 26 | 26 |
| 27 inline uint32x4_t expand(uint32_t a) { | 27 static inline uint16x4_t expand(uint32_t a) { |
| 28 // ( ARGB ) -> ( ARGB ARGB ) -> ( A R G B A R G B ) | 28 // ( ARGB ) -> ( ARGB ARGB ) -> ( A R G B A R G B ) |
| 29 uint8x8_t v8 = vreinterpret_u8_u32(vdup_n_u32(a)); | 29 uint8x8_t v8 = vreinterpret_u8_u32(vdup_n_u32(a)); |
| 30 // ( A R G B A R G B ) -> ( 0A 0R 0G 0B 0A 0R 0G 0B ) -> ( 0A 0R 0G 0B ) | 30 // ( A R G B A R G B ) -> ( 0A 0R 0G 0B 0A 0R 0G 0B ) -> ( 0A 0R 0G 0B ) |
| 31 const uint16x4_t v16 = vget_low_u16(vmovl_u8(v8)); | 31 return vget_low_u16(vmovl_u8(v8)); |
| 32 // ( 0A 0R 0G 0B ) -> ( 000A 000R 000G 000B ) | |
| 33 return vmovl_u16(v16); | |
| 34 } | 32 } |
| 35 | 33 |
| 36 template<BlurDirection srcDirection, BlurDirection dstDirection> | 34 template<BlurDirection srcDirection, BlurDirection dstDirection> |
| 37 void SkBoxBlur_NEON(const SkPMColor* src, int srcStride, SkPMColor* dst, int ker
nelSize, | 35 void SkBoxBlur_NEON(const SkPMColor* src, int srcStride, SkPMColor* dst, int ker
nelSize, |
| 38 int leftOffset, int rightOffset, int width, int height) | 36 int leftOffset, int rightOffset, int width, int height) |
| 39 { | 37 { |
| 40 const int rightBorder = SkMin32(rightOffset + 1, width); | 38 const int rightBorder = SkMin32(rightOffset + 1, width); |
| 41 const int srcStrideX = srcDirection == kX ? 1 : srcStride; | 39 const int srcStrideX = srcDirection == kX ? 1 : srcStride; |
| 42 const int dstStrideX = dstDirection == kX ? 1 : height; | 40 const int dstStrideX = dstDirection == kX ? 1 : height; |
| 43 const int srcStrideY = srcDirection == kX ? srcStride : 1; | 41 const int srcStrideY = srcDirection == kX ? srcStride : 1; |
| 44 const int dstStrideY = dstDirection == kX ? width : 1; | 42 const int dstStrideY = dstDirection == kX ? width : 1; |
| 45 const uint32x4_t scale = vdupq_n_u32((1 << 24) / kernelSize); | 43 const uint32x4_t scale = vdupq_n_u32((1 << 24) / kernelSize); |
| 46 const uint32x4_t half = vdupq_n_u32(1 << 23); | 44 const uint32x4_t half = vdupq_n_u32(1 << 23); |
| 47 for (int y = 0; y < height; ++y) { | 45 for (int y = 0; y < height; ++y) { |
| 48 uint32x4_t sum = vdupq_n_u32(0); | 46 uint32x4_t sum = vdupq_n_u32(0); |
| 49 const SkPMColor* p = src; | 47 const SkPMColor* p = src; |
| 50 for (int i = 0; i < rightBorder; ++i) { | 48 for (int i = 0; i < rightBorder; ++i) { |
| 51 sum = vaddq_u32(sum, expand(*p)); | 49 sum = vaddw_u16(sum, expand(*p)); |
| 52 p += srcStrideX; | 50 p += srcStrideX; |
| 53 } | 51 } |
| 54 | 52 |
| 55 const SkPMColor* sptr = src; | 53 const SkPMColor* sptr = src; |
| 56 SkPMColor* dptr = dst; | 54 SkPMColor* dptr = dst; |
| 57 for (int x = 0; x < width; ++x) { | 55 for (int x = 0; x < width; ++x) { |
| 58 // ( half+sumA*scale half+sumR*scale half+sumG*scale half+sumB*scale
) | 56 // ( half+sumA*scale half+sumR*scale half+sumG*scale half+sumB*scale
) |
| 59 uint32x4_t result = vmlaq_u32(half, sum, scale); | 57 uint32x4_t result = vmlaq_u32(half, sum, scale); |
| 60 | 58 |
| 61 // Shift down to lower 8 bits of each element. | 59 // Saturated conversion to 16-bit. |
| 62 // ( AAAA RRRR GGGG BBBB ) -> ( 000A 000R 000G 000B ) | 60 // ( AAAA RRRR GGGG BBBB ) -> ( 0A 0R 0G 0B ) |
| 63 result = vshrq_n_u32(result, 24); | 61 uint16x4_t result16 = vqshrn_n_u32(result, 16); |
| 64 | 62 |
| 65 // ( 000A 000R 000G 000B ) -> ( 0A 0R 0G 0B ) | 63 // Saturated conversion to 8-bit. |
| 66 uint16x4_t result16 = vqmovn_u32(result); | |
| 67 | |
| 68 // ( 0A 0R 0G 0B ) -> ( 0A 0R 0G 0B 0A 0R 0G 0B ) -> ( A R G B A R G
B ) | 64 // ( 0A 0R 0G 0B ) -> ( 0A 0R 0G 0B 0A 0R 0G 0B ) -> ( A R G B A R G
B ) |
| 69 uint8x8_t result8 = vqmovn_u16(vcombine_u16(result16, result16)); | 65 uint8x8_t result8 = vqshrn_n_u16(vcombine_u16(result16, result16), 8
); |
| 70 | 66 |
| 71 // ( A R G B A R G B ) -> ( ARGB ARGB ) -> ( ARGB ) | 67 // ( A R G B A R G B ) -> ( ARGB ARGB ) -> ( ARGB ) |
| 72 // Store low 32 bits to destination. | 68 // Store low 32 bits to destination. |
| 73 vst1_lane_u32(dptr, vreinterpret_u32_u8(result8), 0); | 69 vst1_lane_u32(dptr, vreinterpret_u32_u8(result8), 0); |
| 70 |
| 74 if (x >= leftOffset) { | 71 if (x >= leftOffset) { |
| 75 const SkPMColor* l = sptr - leftOffset * srcStrideX; | 72 const SkPMColor* l = sptr - leftOffset * srcStrideX; |
| 76 sum = vsubq_u32(sum, expand(*l)); | 73 sum = vsubw_u16(sum, expand(*l)); |
| 77 } | 74 } |
| 78 if (x + rightOffset + 1 < width) { | 75 if (x + rightOffset + 1 < width) { |
| 79 const SkPMColor* r = sptr + (rightOffset + 1) * srcStrideX; | 76 const SkPMColor* r = sptr + (rightOffset + 1) * srcStrideX; |
| 80 sum = vaddq_u32(sum, expand(*r)); | 77 sum = vaddw_u16(sum, expand(*r)); |
| 81 } | 78 } |
| 82 sptr += srcStrideX; | 79 sptr += srcStrideX; |
| 83 if (srcDirection == kY) { | 80 if (srcDirection == kY) { |
| 84 SK_PREFETCH(sptr + (rightOffset + 1) * srcStrideX); | 81 SK_PREFETCH(sptr + (rightOffset + 1) * srcStrideX); |
| 85 } | 82 } |
| 86 dptr += dstStrideX; | 83 dptr += dstStrideX; |
| 87 } | 84 } |
| 88 src += srcStrideY; | 85 src += srcStrideY; |
| 89 dst += dstStrideY; | 86 dst += dstStrideY; |
| 90 } | 87 } |
| 91 } | 88 } |
| 92 | 89 |
| 93 } // namespace | 90 } // namespace |
| 94 | 91 |
| 95 bool SkBoxBlurGetPlatformProcs_NEON(SkBoxBlurProc* boxBlurX, | 92 bool SkBoxBlurGetPlatformProcs_NEON(SkBoxBlurProc* boxBlurX, |
| 96 SkBoxBlurProc* boxBlurY, | 93 SkBoxBlurProc* boxBlurY, |
| 97 SkBoxBlurProc* boxBlurXY, | 94 SkBoxBlurProc* boxBlurXY, |
| 98 SkBoxBlurProc* boxBlurYX) { | 95 SkBoxBlurProc* boxBlurYX) { |
| 99 *boxBlurX = SkBoxBlur_NEON<kX, kX>; | 96 *boxBlurX = SkBoxBlur_NEON<kX, kX>; |
| 100 *boxBlurY = SkBoxBlur_NEON<kY, kY>; | 97 *boxBlurY = SkBoxBlur_NEON<kY, kY>; |
| 101 *boxBlurXY = SkBoxBlur_NEON<kX, kY>; | 98 *boxBlurXY = SkBoxBlur_NEON<kX, kY>; |
| 102 *boxBlurYX = SkBoxBlur_NEON<kY, kX>; | 99 *boxBlurYX = SkBoxBlur_NEON<kY, kX>; |
| 103 return true; | 100 return true; |
| 104 } | 101 } |
| OLD | NEW |