OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkNx_neon_DEFINED | 8 #ifndef SkNx_neon_DEFINED |
9 #define SkNx_neon_DEFINED | 9 #define SkNx_neon_DEFINED |
10 | 10 |
(...skipping 311 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
322 | 322 |
323 void store(uint16_t vals[8]) const { vst1q_u16(vals, fVec); } | 323 void store(uint16_t vals[8]) const { vst1q_u16(vals, fVec); } |
324 | 324 |
325 SkNi operator + (const SkNi& o) const { return vaddq_u16(fVec, o.fVec); } | 325 SkNi operator + (const SkNi& o) const { return vaddq_u16(fVec, o.fVec); } |
326 SkNi operator - (const SkNi& o) const { return vsubq_u16(fVec, o.fVec); } | 326 SkNi operator - (const SkNi& o) const { return vsubq_u16(fVec, o.fVec); } |
327 SkNi operator * (const SkNi& o) const { return vmulq_u16(fVec, o.fVec); } | 327 SkNi operator * (const SkNi& o) const { return vmulq_u16(fVec, o.fVec); } |
328 | 328 |
329 SkNi operator << (int bits) const { SHIFT16(vshlq_n_u16, fVec, bits); } | 329 SkNi operator << (int bits) const { SHIFT16(vshlq_n_u16, fVec, bits); } |
330 SkNi operator >> (int bits) const { SHIFT16(vshrq_n_u16, fVec, bits); } | 330 SkNi operator >> (int bits) const { SHIFT16(vshrq_n_u16, fVec, bits); } |
331 | 331 |
| 332 static SkNi Min(const SkNi& a, const SkNi& b) { return vminq_u16(a.fVec, b.f
Vec); } |
| 333 |
332 template <int k> uint16_t kth() const { | 334 template <int k> uint16_t kth() const { |
333 SkASSERT(0 <= k && k < 8); | 335 SkASSERT(0 <= k && k < 8); |
334 return vgetq_lane_u16(fVec, k&7); | 336 return vgetq_lane_u16(fVec, k&7); |
335 } | 337 } |
336 | 338 |
337 uint16x8_t fVec; | 339 uint16x8_t fVec; |
338 }; | 340 }; |
339 | 341 |
340 template <> | 342 template <> |
341 class SkNi<16, uint8_t> { | 343 class SkNi<16, uint8_t> { |
(...skipping 15 matching lines...) Expand all Loading... |
357 | 359 |
358 SkNi saturatedAdd(const SkNi& o) const { return vqaddq_u8(fVec, o.fVec); } | 360 SkNi saturatedAdd(const SkNi& o) const { return vqaddq_u8(fVec, o.fVec); } |
359 | 361 |
360 SkNi operator + (const SkNi& o) const { return vaddq_u8(fVec, o.fVec); } | 362 SkNi operator + (const SkNi& o) const { return vaddq_u8(fVec, o.fVec); } |
361 SkNi operator - (const SkNi& o) const { return vsubq_u8(fVec, o.fVec); } | 363 SkNi operator - (const SkNi& o) const { return vsubq_u8(fVec, o.fVec); } |
362 SkNi operator * (const SkNi& o) const { return vmulq_u8(fVec, o.fVec); } | 364 SkNi operator * (const SkNi& o) const { return vmulq_u8(fVec, o.fVec); } |
363 | 365 |
364 SkNi operator << (int bits) const { SHIFT8(vshlq_n_u8, fVec, bits); } | 366 SkNi operator << (int bits) const { SHIFT8(vshlq_n_u8, fVec, bits); } |
365 SkNi operator >> (int bits) const { SHIFT8(vshrq_n_u8, fVec, bits); } | 367 SkNi operator >> (int bits) const { SHIFT8(vshrq_n_u8, fVec, bits); } |
366 | 368 |
| 369 static SkNi Min(const SkNi& a, const SkNi& b) { return vminq_u8(a.fVec, b.fV
ec); } |
| 370 |
367 template <int k> uint8_t kth() const { | 371 template <int k> uint8_t kth() const { |
368 SkASSERT(0 <= k && k < 15); | 372 SkASSERT(0 <= k && k < 15); |
369 return vgetq_lane_u8(fVec, k&16); | 373 return vgetq_lane_u8(fVec, k&16); |
370 } | 374 } |
371 | 375 |
372 uint8x16_t fVec; | 376 uint8x16_t fVec; |
373 }; | 377 }; |
374 | 378 |
375 #undef SHIFT32 | 379 #undef SHIFT32 |
376 #undef SHIFT16 | 380 #undef SHIFT16 |
377 #undef SHIFT8 | 381 #undef SHIFT8 |
378 | 382 |
379 #endif//SkNx_neon_DEFINED | 383 #endif//SkNx_neon_DEFINED |
OLD | NEW |