| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef SkNx_sse_DEFINED | 8 #ifndef SkNx_sse_DEFINED |
| 9 #define SkNx_sse_DEFINED | 9 #define SkNx_sse_DEFINED |
| 10 | 10 |
| (...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 288 | 288 |
| 289 SkNx thenElse(const SkNx& t, const SkNx& e) const { | 289 SkNx thenElse(const SkNx& t, const SkNx& e) const { |
| 290 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), | 290 return _mm_or_si128(_mm_and_si128 (fVec, t.fVec), |
| 291 _mm_andnot_si128(fVec, e.fVec)); | 291 _mm_andnot_si128(fVec, e.fVec)); |
| 292 } | 292 } |
| 293 | 293 |
| 294 __m128i fVec; | 294 __m128i fVec; |
| 295 }; | 295 }; |
| 296 | 296 |
| 297 | 297 |
| 298 template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src)
{ | 298 template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) { |
| 299 return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); | 299 auto _32 = _mm_cvttps_epi32(src.fVec); |
| 300 } | |
| 301 template<> /*static*/ inline Sk4i SkNx_cast< int, uint8_t>(const Sk4b& src)
{ | |
| 302 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 | |
| 303 const int _ = ~0; | |
| 304 return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3
,_,_,_)); | |
| 305 #else | |
| 306 return _mm_unpacklo_epi16(SkNx_cast<uint16_t>(src).fVec, _mm_setzero_si128()
); | |
| 307 #endif | |
| 308 } | |
| 309 | |
| 310 template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src)
{ | |
| 311 return _mm_packus_epi16(src.fVec, src.fVec); | |
| 312 } | |
| 313 template<> /*static*/ inline Sk4i SkNx_cast< int, uint16_t>(const Sk4h& src)
{ | |
| 314 return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128()); | |
| 315 } | |
| 316 | |
| 317 template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, int>(const Sk4i& src) { | |
| 318 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 | |
| 319 const int _ = ~0; | |
| 320 return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_,
_,_,_,_)); | |
| 321 #else | |
| 322 // We're on our way to 8-bit anyway, so we don't care that _mm_packs_epi32 c
lamps to int16_t. | |
| 323 Sk4h _16 = _mm_packs_epi32(src.fVec, src.fVec); | |
| 324 return SkNx_cast<uint8_t>(_16); | |
| 325 #endif | |
| 326 } | |
| 327 template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, int>(const Sk4i& src) { | |
| 328 auto _32 = src.fVec; | |
| 329 // Ideally we'd use _mm_packus_epi32 here. But that's SSE4.1+. | 300 // Ideally we'd use _mm_packus_epi32 here. But that's SSE4.1+. |
| 330 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 | 301 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 |
| 331 // With SSSE3, we can just shuffle the low 2 bytes from each lane right into
place. | 302 // With SSSE3, we can just shuffle the low 2 bytes from each lane right into
place. |
| 332 const int _ = ~0; | 303 const int _ = ~0; |
| 333 return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_
,_,_)); | 304 return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_
,_,_)); |
| 334 #else | 305 #else |
| 335 // With SSE2, we have to emulate _mm_packus_epi32 with _mm_packs_epi32: | 306 // With SSE2, we have to emulate _mm_packus_epi32 with _mm_packs_epi32: |
| 336 _32 = _mm_sub_epi32(_32, _mm_set1_epi32((int)0x00008000)); | 307 _32 = _mm_sub_epi32(_32, _mm_set1_epi32((int)0x00008000)); |
| 337 return _mm_add_epi16(_mm_packs_epi32(_32, _32), _mm_set1_epi16((short)0x8000
)); | 308 return _mm_add_epi16(_mm_packs_epi32(_32, _32), _mm_set1_epi16((short)0x8000
)); |
| 338 #endif | 309 #endif |
| 339 } | 310 } |
| 340 | 311 |
| 341 template<> /*static*/ inline Sk4f SkNx_cast<float, int>(const Sk4i& src) { | 312 template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) { |
| 342 return _mm_cvtepi32_ps(src.fVec); | 313 auto _32 = _mm_cvttps_epi32(src.fVec); |
| 343 } | 314 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 |
| 344 template<> /*static*/ inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) { | 315 const int _ = ~0; |
| 345 return SkNx_cast<float>(SkNx_cast<int>(src)); | 316 return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_
,_)); |
| 346 } | 317 #else |
| 347 template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) { | 318 auto _16 = _mm_packus_epi16(_32, _32); |
| 348 return SkNx_cast<float>(SkNx_cast<int>(src)); | 319 return _mm_packus_epi16(_16, _16); |
| 320 #endif |
| 349 } | 321 } |
| 350 | 322 |
| 351 template<> /*static*/ inline Sk4i SkNx_cast< int, float>(const Sk4f& src) { | 323 template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) { |
| 352 return _mm_cvttps_epi32(src.fVec); | 324 #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 |
| 325 const int _ = ~0; |
| 326 auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,
_, 3,_,_,_)); |
| 327 #else |
| 328 auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()), |
| 329 _32 = _mm_unpacklo_epi16(_16, _mm_setzero_si128()); |
| 330 #endif |
| 331 return _mm_cvtepi32_ps(_32); |
| 353 } | 332 } |
| 354 template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) { | 333 |
| 355 return SkNx_cast<uint16_t>(SkNx_cast<int>(src)); | 334 template<> /*static*/ inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) { |
| 356 } | 335 auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128()); |
| 357 template<> /*static*/ inline Sk4b SkNx_cast< uint8_t, float>(const Sk4f& src) { | 336 return _mm_cvtepi32_ps(_32); |
| 358 return SkNx_cast<uint8_t>(SkNx_cast<int>(src)); | |
| 359 } | 337 } |
| 360 | 338 |
| 361 static inline void Sk4f_ToBytes(uint8_t bytes[16], | 339 static inline void Sk4f_ToBytes(uint8_t bytes[16], |
| 362 const Sk4f& a, const Sk4f& b, const Sk4f& c, con
st Sk4f& d) { | 340 const Sk4f& a, const Sk4f& b, const Sk4f& c, con
st Sk4f& d) { |
| 363 // We're on our way to 8-bit anyway, so we don't care that _mm_packs_epi32 c
lamps to int16_t. | |
| 364 _mm_storeu_si128((__m128i*)bytes, | 341 _mm_storeu_si128((__m128i*)bytes, |
| 365 _mm_packus_epi16(_mm_packs_epi32(_mm_cvttps_epi32(a.fVec), | 342 _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec), |
| 366 _mm_cvttps_epi32(b.fVec)), | 343 _mm_cvttps_epi32(b.fVec))
, |
| 367 _mm_packs_epi32(_mm_cvttps_epi32(c.fVec), | 344 _mm_packus_epi16(_mm_cvttps_epi32(c.fVec), |
| 368 _mm_cvttps_epi32(d.fVec)))
); | 345 _mm_cvttps_epi32(d.fVec))
)); |
| 346 } |
| 347 |
| 348 template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src)
{ |
| 349 return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); |
| 350 } |
| 351 |
| 352 template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src)
{ |
| 353 return _mm_packus_epi16(src.fVec, src.fVec); |
| 369 } | 354 } |
| 370 | 355 |
| 371 #endif//SkNx_sse_DEFINED | 356 #endif//SkNx_sse_DEFINED |
| OLD | NEW |