OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkNx_neon_DEFINED | 8 #ifndef SkNx_neon_DEFINED |
9 #define SkNx_neon_DEFINED | 9 #define SkNx_neon_DEFINED |
10 | 10 |
(...skipping 370 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
381 union { int32x4_t v; int is[4]; } pun = {fVec}; | 381 union { int32x4_t v; int is[4]; } pun = {fVec}; |
382 return pun.is[k&3]; | 382 return pun.is[k&3]; |
383 } | 383 } |
384 | 384 |
385 SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); } | 385 SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); } |
386 SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); } | 386 SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); } |
387 SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); } | 387 SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); } |
388 | 388 |
389 SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); } | 389 SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); } |
390 SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); } | 390 SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); } |
| 391 SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); } |
391 | 392 |
392 SkNx operator << (int bits) const { SHIFT32(vshlq_n_s32, fVec, bits); } | 393 SkNx operator << (int bits) const { SHIFT32(vshlq_n_s32, fVec, bits); } |
393 SkNx operator >> (int bits) const { SHIFT32(vshrq_n_s32, fVec, bits); } | 394 SkNx operator >> (int bits) const { SHIFT32(vshrq_n_s32, fVec, bits); } |
394 | 395 |
| 396 SkNx operator == (const SkNx& o) const { |
| 397 return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec)); |
| 398 } |
| 399 SkNx operator < (const SkNx& o) const { |
| 400 return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec)); |
| 401 } |
| 402 SkNx operator > (const SkNx& o) const { |
| 403 return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec)); |
| 404 } |
| 405 |
395 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.f
Vec); } | 406 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.f
Vec); } |
396 // TODO as needed | 407 // TODO as needed |
397 | 408 |
| 409 SkNx thenElse(const SkNx& t, const SkNx& e) const { |
| 410 return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec); |
| 411 } |
| 412 |
398 int32x4_t fVec; | 413 int32x4_t fVec; |
399 }; | 414 }; |
400 | 415 |
401 #undef SHIFT32 | 416 #undef SHIFT32 |
402 #undef SHIFT16 | 417 #undef SHIFT16 |
403 #undef SHIFT8 | 418 #undef SHIFT8 |
404 | 419 |
405 template<> inline Sk4i SkNx_cast<int, float>(const Sk4f& src) { | 420 template<> inline Sk4i SkNx_cast<int, float>(const Sk4f& src) { |
406 return vcvtq_s32_f32(src.fVec); | 421 return vcvtq_s32_f32(src.fVec); |
407 | 422 |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
449 | 464 |
450 template<> inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { | 465 template<> inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { |
451 return vmovn_u16(vcombine_u16(src.fVec, src.fVec)); | 466 return vmovn_u16(vcombine_u16(src.fVec, src.fVec)); |
452 } | 467 } |
453 | 468 |
454 template<> inline Sk4b SkNx_cast<uint8_t, int>(const Sk4i& src) { | 469 template<> inline Sk4b SkNx_cast<uint8_t, int>(const Sk4i& src) { |
455 uint16x4_t _16 = vqmovun_s32(src.fVec); | 470 uint16x4_t _16 = vqmovun_s32(src.fVec); |
456 return vqmovn_u16(vcombine_u16(_16, _16)); | 471 return vqmovn_u16(vcombine_u16(_16, _16)); |
457 } | 472 } |
458 | 473 |
| 474 template<> inline Sk4i SkNx_cast<int, uint16_t>(const Sk4h& src) { |
| 475 return vreinterpretq_s32_u32(vmovl_u16(src.fVec)); |
| 476 } |
| 477 |
| 478 template<> inline Sk4h SkNx_cast<uint16_t, int>(const Sk4i& src) { |
| 479 return vmovn_u32(vreinterpretq_u32_s32(src.fVec)); |
| 480 } |
| 481 |
459 static inline Sk4i Sk4f_round(const Sk4f& x) { | 482 static inline Sk4i Sk4f_round(const Sk4f& x) { |
460 return vcvtq_s32_f32((x + 0.5f).fVec); | 483 return vcvtq_s32_f32((x + 0.5f).fVec); |
461 } | 484 } |
462 | 485 |
463 #endif//SkNx_neon_DEFINED | 486 #endif//SkNx_neon_DEFINED |
OLD | NEW |