OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkNx_neon_DEFINED | 8 #ifndef SkNx_neon_DEFINED |
9 #define SkNx_neon_DEFINED | 9 #define SkNx_neon_DEFINED |
10 | 10 |
(...skipping 368 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
379 int operator[](int k) const { | 379 int operator[](int k) const { |
380 SkASSERT(0 <= k && k < 4); | 380 SkASSERT(0 <= k && k < 4); |
381 union { int32x4_t v; int is[4]; } pun = {fVec}; | 381 union { int32x4_t v; int is[4]; } pun = {fVec}; |
382 return pun.is[k&3]; | 382 return pun.is[k&3]; |
383 } | 383 } |
384 | 384 |
385 SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); } | 385 SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); } |
386 SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); } | 386 SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); } |
387 SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); } | 387 SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); } |
388 | 388 |
| 389 SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); } |
| 390 |
389 SkNx operator << (int bits) const { SHIFT32(vshlq_n_s32, fVec, bits); } | 391 SkNx operator << (int bits) const { SHIFT32(vshlq_n_s32, fVec, bits); } |
390 SkNx operator >> (int bits) const { SHIFT32(vshrq_n_s32, fVec, bits); } | 392 SkNx operator >> (int bits) const { SHIFT32(vshrq_n_s32, fVec, bits); } |
391 | 393 |
392 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.f
Vec); } | 394 static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.f
Vec); } |
393 // TODO as needed | 395 // TODO as needed |
394 | 396 |
395 int32x4_t fVec; | 397 int32x4_t fVec; |
396 }; | 398 }; |
397 | 399 |
398 #undef SHIFT32 | 400 #undef SHIFT32 |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
442 | 444 |
443 template<> inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { | 445 template<> inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { |
444 return vget_low_u16(vmovl_u8(src.fVec)); | 446 return vget_low_u16(vmovl_u8(src.fVec)); |
445 } | 447 } |
446 | 448 |
447 template<> inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { | 449 template<> inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { |
448 return vmovn_u16(vcombine_u16(src.fVec, src.fVec)); | 450 return vmovn_u16(vcombine_u16(src.fVec, src.fVec)); |
449 } | 451 } |
450 | 452 |
451 #endif//SkNx_neon_DEFINED | 453 #endif//SkNx_neon_DEFINED |
OLD | NEW |