| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef SkNx_sse_DEFINED | 8 #ifndef SkNx_sse_DEFINED |
| 9 #define SkNx_sse_DEFINED | 9 #define SkNx_sse_DEFINED |
| 10 | 10 |
| (...skipping 330 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 341 } | 341 } |
| 342 | 342 |
| 343 __m128i fVec; | 343 __m128i fVec; |
| 344 }; | 344 }; |
| 345 | 345 |
| 346 | 346 |
| 347 template<> inline Sk4i SkNx_cast<int, float, 4>(const Sk4f& src) { | 347 template<> inline Sk4i SkNx_cast<int, float, 4>(const Sk4f& src) { |
| 348 return _mm_cvttps_epi32(src.fVec); | 348 return _mm_cvttps_epi32(src.fVec); |
| 349 } | 349 } |
| 350 | 350 |
| 351 template<> inline Sk4h SkNx_cast<uint16_t, float, 4>(const Sk4f& src) { |
| 352 auto _32 = _mm_cvttps_epi32(src.fVec); |
| 353 return _mm_packus_epi16(_32, _32); |
| 354 } |
| 355 |
| 351 template<> inline Sk4b SkNx_cast<uint8_t, float, 4>(const Sk4f& src) { | 356 template<> inline Sk4b SkNx_cast<uint8_t, float, 4>(const Sk4f& src) { |
| 352 auto _32 = _mm_cvttps_epi32(src.fVec); | 357 auto _32 = _mm_cvttps_epi32(src.fVec); |
| 353 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 | 358 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 |
| 354 const int _ = ~0; | 359 const int _ = ~0; |
| 355 return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_
,_)); | 360 return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_
,_)); |
| 356 #else | 361 #else |
| 357 auto _16 = _mm_packus_epi16(_32, _32); | 362 auto _16 = _mm_packus_epi16(_32, _32); |
| 358 return _mm_packus_epi16(_16, _16); | 363 return _mm_packus_epi16(_16, _16); |
| 359 #endif | 364 #endif |
| 360 } | 365 } |
| 361 | 366 |
| 362 template<> inline Sk4f SkNx_cast<float, uint8_t, 4>(const Sk4b& src) { | 367 template<> inline Sk4f SkNx_cast<float, uint8_t, 4>(const Sk4b& src) { |
| 363 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 | 368 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 |
| 364 const int _ = ~0; | 369 const int _ = ~0; |
| 365 auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,
_, 3,_,_,_)); | 370 auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,
_, 3,_,_,_)); |
| 366 #else | 371 #else |
| 367 auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()), | 372 auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()), |
| 368 _32 = _mm_unpacklo_epi16(_16, _mm_setzero_si128()); | 373 _32 = _mm_unpacklo_epi16(_16, _mm_setzero_si128()); |
| 369 #endif | 374 #endif |
| 370 return _mm_cvtepi32_ps(_32); | 375 return _mm_cvtepi32_ps(_32); |
| 371 } | 376 } |
| 372 | 377 |
| 378 template<> inline Sk4f SkNx_cast<float, uint16_t, 4>(const Sk4h& src) { |
| 379 auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128()); |
| 380 return _mm_cvtepi32_ps(_32); |
| 381 } |
| 382 |
| 373 static inline void Sk4f_ToBytes(uint8_t bytes[16], | 383 static inline void Sk4f_ToBytes(uint8_t bytes[16], |
| 374 const Sk4f& a, const Sk4f& b, const Sk4f& c, con
st Sk4f& d) { | 384 const Sk4f& a, const Sk4f& b, const Sk4f& c, con
st Sk4f& d) { |
| 375 _mm_storeu_si128((__m128i*)bytes, | 385 _mm_storeu_si128((__m128i*)bytes, |
| 376 _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec), | 386 _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec), |
| 377 _mm_cvttps_epi32(b.fVec))
, | 387 _mm_cvttps_epi32(b.fVec))
, |
| 378 _mm_packus_epi16(_mm_cvttps_epi32(c.fVec), | 388 _mm_packus_epi16(_mm_cvttps_epi32(c.fVec), |
| 379 _mm_cvttps_epi32(d.fVec))
)); | 389 _mm_cvttps_epi32(d.fVec))
)); |
| 380 } | 390 } |
| 381 | 391 |
| 382 template<> inline Sk4h SkNx_cast<uint16_t, uint8_t, 4>(const Sk4b& src) { | 392 template<> inline Sk4h SkNx_cast<uint16_t, uint8_t, 4>(const Sk4b& src) { |
| 383 return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); | 393 return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); |
| 384 } | 394 } |
| 385 | 395 |
| 386 template<> inline Sk4b SkNx_cast<uint8_t, uint16_t, 4>(const Sk4h& src) { | 396 template<> inline Sk4b SkNx_cast<uint8_t, uint16_t, 4>(const Sk4h& src) { |
| 387 return _mm_packus_epi16(src.fVec, src.fVec); | 397 return _mm_packus_epi16(src.fVec, src.fVec); |
| 388 } | 398 } |
| 389 | 399 |
| 390 | 400 |
| 391 } // namespace | 401 } // namespace |
| 392 | 402 |
| 393 #endif//SkNx_sse_DEFINED | 403 #endif//SkNx_sse_DEFINED |
| OLD | NEW |