OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkNx_sse_DEFINED | 8 #ifndef SkNx_sse_DEFINED |
9 #define SkNx_sse_DEFINED | 9 #define SkNx_sse_DEFINED |
10 | 10 |
11 // This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything mo
re recent. | 11 // This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything mo
re recent. |
12 // If you do, make sure this is in a static inline function... anywhere else ris
ks violating ODR. | 12 // If you do, make sure this is in a static inline function... anywhere else ris
ks violating ODR. |
13 | 13 |
14 #define SKNX_IS_FAST | 14 #define SKNX_IS_FAST |
15 | 15 |
16 // SSE 4.1 has _mm_floor_ps to floor 4 floats. We emulate it: | 16 // SSE 4.1 has _mm_floor_ps to floor 4 floats. We emulate it: |
17 // - round by adding (1<<23) with our sign, then subtracting it; | 17 // - roundtrip through integers via truncation |
18 // - if that rounded value is bigger than our input, subtract 1. | 18 // - subtract 1 if that's too big (possible for negative values). |
| 19 // This restricts the domain of our inputs to a maximum somehwere around 2^31.
Seems plenty big. |
19 static inline __m128 sse2_mm_floor_ps(__m128 v) { | 20 static inline __m128 sse2_mm_floor_ps(__m128 v) { |
20 __m128 sign = _mm_and_ps(v, _mm_set1_ps(-0.0f)); | 21 __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v)); |
21 __m128 bias = _mm_or_ps(sign, _mm_set1_ps(1<<23)); | 22 __m128 too_big = _mm_cmpgt_ps(roundtrip, v); |
22 __m128 rounded = _mm_sub_ps(_mm_add_ps(v, bias), bias); | 23 return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f))); |
23 __m128 too_big = _mm_cmpgt_ps(rounded, v); | |
24 return _mm_sub_ps(rounded, _mm_and_ps(too_big, _mm_set1_ps(1.0f))); | |
25 } | 24 } |
26 | 25 |
27 template <> | 26 template <> |
28 class SkNx<2, float> { | 27 class SkNx<2, float> { |
29 public: | 28 public: |
30 SkNx(const __m128& vec) : fVec(vec) {} | 29 SkNx(const __m128& vec) : fVec(vec) {} |
31 | 30 |
32 SkNx() {} | 31 SkNx() {} |
33 SkNx(float val) : fVec(_mm_set1_ps(val)) {} | 32 SkNx(float val) : fVec(_mm_set1_ps(val)) {} |
34 static SkNx Load(const void* ptr) { | 33 static SkNx Load(const void* ptr) { |
(...skipping 313 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
348 | 347 |
349 template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src)
{ | 348 template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src)
{ |
350 return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); | 349 return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); |
351 } | 350 } |
352 | 351 |
353 template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src)
{ | 352 template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src)
{ |
354 return _mm_packus_epi16(src.fVec, src.fVec); | 353 return _mm_packus_epi16(src.fVec, src.fVec); |
355 } | 354 } |
356 | 355 |
357 #endif//SkNx_sse_DEFINED | 356 #endif//SkNx_sse_DEFINED |
OLD | NEW |