OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkNx_neon_DEFINED | 8 #ifndef SkNx_neon_DEFINED |
9 #define SkNx_neon_DEFINED | 9 #define SkNx_neon_DEFINED |
10 | 10 |
(...skipping 432 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
443 } | 443 } |
444 | 444 |
445 template<> inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { | 445 template<> inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { |
446 return vget_low_u16(vmovl_u8(src.fVec)); | 446 return vget_low_u16(vmovl_u8(src.fVec)); |
447 } | 447 } |
448 | 448 |
449 template<> inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { | 449 template<> inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { |
450 return vmovn_u16(vcombine_u16(src.fVec, src.fVec)); | 450 return vmovn_u16(vcombine_u16(src.fVec, src.fVec)); |
451 } | 451 } |
452 | 452 |
| 453 template<> inline Sk4b SkNx_cast<uint8_t, int>(const Sk4i& src) { |
| 454 uint16x4_t _16 = vqmovun_s32(src.fVec); |
| 455 return vqmovn_u16(vcombine_u16(_16, _16)); |
| 456 } |
| 457 |
| 458 static inline Sk4i Sk4f_round(const Sk4f& x) { |
| 459 return vcvtq_s32_f32((x + 0.5f).fVec); |
| 460 } |
| 461 |
453 #endif//SkNx_neon_DEFINED | 462 #endif//SkNx_neon_DEFINED |
OLD | NEW |