Index: src/opts/SkNx_sse.h |
diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h |
index c0e48287b4cc5049d4fa0b3cc9bc2ab607f210f5..3881b54e3a9dcf76edff2379fc2313b6435cbf87 100644 |
--- a/src/opts/SkNx_sse.h |
+++ b/src/opts/SkNx_sse.h |
@@ -403,6 +403,19 @@ static inline Sk4i Sk4f_round(const Sk4f& x) { |
return _mm_cvtps_epi32(x.fVec); |
} |
+static inline void Sk4h_load4(const void* ptr, Sk4h* r, Sk4h* g, Sk4h* b, Sk4h* a) { |
msarett
2016/07/26 14:25:07
The fact that we are doing a transpose on loads an
|
+ __m128i lo = _mm_loadu_si128(((__m128i*)ptr) + 0), |
+ hi = _mm_loadu_si128(((__m128i*)ptr) + 1); |
+ __m128i even = _mm_unpacklo_epi16(lo, hi), // r0 r2 g0 g2 b0 b2 a0 a2 |
+ odd = _mm_unpackhi_epi16(lo, hi); // r1 r3 ... |
+ __m128i rg = _mm_unpacklo_epi16(even, odd), // r0 r1 r2 r3 g0 g1 g2 g3 |
+ ba = _mm_unpackhi_epi16(even, odd); // b0 b1 ... a0 a1 ... |
+ *r = rg; |
+ *g = _mm_srli_si128(rg, 8); |
+ *b = ba; |
+ *a = _mm_srli_si128(ba, 8); |
+} |
+ |
static inline void Sk4h_store4(void* dst, const Sk4h& r, const Sk4h& g, const Sk4h& b, |
const Sk4h& a) { |
__m128i rg = _mm_unpacklo_epi16(r.fVec, g.fVec); |