Index: src/opts/SkPx_neon.h |
diff --git a/src/opts/SkPx_neon.h b/src/opts/SkPx_neon.h |
deleted file mode 100644 |
index d026d4de8c27e3d7b802600f61ab2176a44aab9c..0000000000000000000000000000000000000000 |
--- a/src/opts/SkPx_neon.h |
+++ /dev/null |
@@ -1,182 +0,0 @@ |
-/* |
- * Copyright 2015 Google Inc. |
- * |
- * Use of this source code is governed by a BSD-style license that can be |
- * found in the LICENSE file. |
- */ |
- |
-#ifndef SkPx_neon_DEFINED |
-#define SkPx_neon_DEFINED |
- |
-// When we have NEON, we like to work 8 pixels at a time. |
-// This lets us exploit vld4/vst4 and represent SkPx as planar uint8x8x4_t, |
-// Wide as planar uint16x8x4_t, and Alpha as a single uint8x8_t plane. |
- |
-struct SkPx_neon { |
- static const int N = 8; |
- |
- uint8x8x4_t fVec; |
- SkPx_neon(uint8x8x4_t vec) : fVec(vec) {} |
- |
- static SkPx_neon Dup(uint32_t px) { return vld4_dup_u8((const uint8_t*)&px); } |
- static SkPx_neon LoadN(const uint32_t* px) { return vld4_u8((const uint8_t*)px); } |
- static SkPx_neon Load(int n, const uint32_t* px) { |
- SkASSERT(0 < n && n < 8); |
- uint8x8x4_t v = vld4_dup_u8((const uint8_t*)px); // n>=1, so start all lanes with pixel 0. |
- switch (n) { |
- case 7: v = vld4_lane_u8((const uint8_t*)(px+6), v, 6); // fall through |
- case 6: v = vld4_lane_u8((const uint8_t*)(px+5), v, 5); // fall through |
- case 5: v = vld4_lane_u8((const uint8_t*)(px+4), v, 4); // fall through |
- case 4: v = vld4_lane_u8((const uint8_t*)(px+3), v, 3); // fall through |
- case 3: v = vld4_lane_u8((const uint8_t*)(px+2), v, 2); // fall through |
- case 2: v = vld4_lane_u8((const uint8_t*)(px+1), v, 1); |
- } |
- return v; |
- } |
- |
- void storeN(uint32_t* px) const { vst4_u8((uint8_t*)px, fVec); } |
- void store(int n, uint32_t* px) const { |
- SkASSERT(0 < n && n < 8); |
- switch (n) { |
- case 7: vst4_lane_u8((uint8_t*)(px+6), fVec, 6); |
- case 6: vst4_lane_u8((uint8_t*)(px+5), fVec, 5); |
- case 5: vst4_lane_u8((uint8_t*)(px+4), fVec, 4); |
- case 4: vst4_lane_u8((uint8_t*)(px+3), fVec, 3); |
- case 3: vst4_lane_u8((uint8_t*)(px+2), fVec, 2); |
- case 2: vst4_lane_u8((uint8_t*)(px+1), fVec, 1); |
- case 1: vst4_lane_u8((uint8_t*)(px+0), fVec, 0); |
- } |
- } |
- |
- struct Alpha { |
- uint8x8_t fA; |
- Alpha(uint8x8_t a) : fA(a) {} |
- |
- static Alpha Dup(uint8_t a) { return vdup_n_u8(a); } |
- static Alpha LoadN(const uint8_t* a) { return vld1_u8(a); } |
- static Alpha Load(int n, const uint8_t* a) { |
- SkASSERT(0 < n && n < 8); |
- uint8x8_t v = vld1_dup_u8(a); // n>=1, so start all lanes with alpha 0. |
- switch (n) { |
- case 7: v = vld1_lane_u8(a+6, v, 6); // fall through |
- case 6: v = vld1_lane_u8(a+5, v, 5); // fall through |
- case 5: v = vld1_lane_u8(a+4, v, 4); // fall through |
- case 4: v = vld1_lane_u8(a+3, v, 3); // fall through |
- case 3: v = vld1_lane_u8(a+2, v, 2); // fall through |
- case 2: v = vld1_lane_u8(a+1, v, 1); |
- } |
- return v; |
- } |
- Alpha inv() const { return vsub_u8(vdup_n_u8(255), fA); } |
- }; |
- |
- struct Wide { |
- uint16x8x4_t fVec; |
- Wide(uint16x8x4_t vec) : fVec(vec) {} |
- |
- Wide operator+(const Wide& o) const { |
- return (uint16x8x4_t) {{ |
- vaddq_u16(fVec.val[0], o.fVec.val[0]), |
- vaddq_u16(fVec.val[1], o.fVec.val[1]), |
- vaddq_u16(fVec.val[2], o.fVec.val[2]), |
- vaddq_u16(fVec.val[3], o.fVec.val[3]), |
- }}; |
- } |
- Wide operator-(const Wide& o) const { |
- return (uint16x8x4_t) {{ |
- vsubq_u16(fVec.val[0], o.fVec.val[0]), |
- vsubq_u16(fVec.val[1], o.fVec.val[1]), |
- vsubq_u16(fVec.val[2], o.fVec.val[2]), |
- vsubq_u16(fVec.val[3], o.fVec.val[3]), |
- }}; |
- } |
- SK_ALWAYS_INLINE Wide operator<<(int bits) const { |
- return (uint16x8x4_t) {{ |
- vshlq_n_u16(fVec.val[0], bits), |
- vshlq_n_u16(fVec.val[1], bits), |
- vshlq_n_u16(fVec.val[2], bits), |
- vshlq_n_u16(fVec.val[3], bits), |
- }}; |
- } |
- SK_ALWAYS_INLINE Wide operator>>(int bits) const { |
- return (uint16x8x4_t) {{ |
- vshrq_n_u16(fVec.val[0], bits), |
- vshrq_n_u16(fVec.val[1], bits), |
- vshrq_n_u16(fVec.val[2], bits), |
- vshrq_n_u16(fVec.val[3], bits), |
- }}; |
- } |
- |
- SkPx_neon addNarrowHi(const SkPx_neon& o) const { |
- return (uint8x8x4_t) {{ |
- vshrn_n_u16(vaddw_u8(fVec.val[0], o.fVec.val[0]), 8), |
- vshrn_n_u16(vaddw_u8(fVec.val[1], o.fVec.val[1]), 8), |
- vshrn_n_u16(vaddw_u8(fVec.val[2], o.fVec.val[2]), 8), |
- vshrn_n_u16(vaddw_u8(fVec.val[3], o.fVec.val[3]), 8), |
- }}; |
- } |
- }; |
- |
- Alpha alpha() const { return fVec.val[3]; } |
- |
- Wide widenLo() const { |
- return (uint16x8x4_t) {{ |
- vmovl_u8(fVec.val[0]), |
- vmovl_u8(fVec.val[1]), |
- vmovl_u8(fVec.val[2]), |
- vmovl_u8(fVec.val[3]), |
- }}; |
- } |
- // TODO: these two can probably be done faster. |
- Wide widenHi() const { return this->widenLo() << 8; } |
- Wide widenLoHi() const { return this->widenLo() + this->widenHi(); } |
- |
- SkPx_neon operator+(const SkPx_neon& o) const { |
- return (uint8x8x4_t) {{ |
- vadd_u8(fVec.val[0], o.fVec.val[0]), |
- vadd_u8(fVec.val[1], o.fVec.val[1]), |
- vadd_u8(fVec.val[2], o.fVec.val[2]), |
- vadd_u8(fVec.val[3], o.fVec.val[3]), |
- }}; |
- } |
- SkPx_neon operator-(const SkPx_neon& o) const { |
- return (uint8x8x4_t) {{ |
- vsub_u8(fVec.val[0], o.fVec.val[0]), |
- vsub_u8(fVec.val[1], o.fVec.val[1]), |
- vsub_u8(fVec.val[2], o.fVec.val[2]), |
- vsub_u8(fVec.val[3], o.fVec.val[3]), |
- }}; |
- } |
- SkPx_neon saturatedAdd(const SkPx_neon& o) const { |
- return (uint8x8x4_t) {{ |
- vqadd_u8(fVec.val[0], o.fVec.val[0]), |
- vqadd_u8(fVec.val[1], o.fVec.val[1]), |
- vqadd_u8(fVec.val[2], o.fVec.val[2]), |
- vqadd_u8(fVec.val[3], o.fVec.val[3]), |
- }}; |
- } |
- |
- Wide operator*(const Alpha& a) const { |
- return (uint16x8x4_t) {{ |
- vmull_u8(fVec.val[0], a.fA), |
- vmull_u8(fVec.val[1], a.fA), |
- vmull_u8(fVec.val[2], a.fA), |
- vmull_u8(fVec.val[3], a.fA), |
- }}; |
- } |
- SkPx_neon approxMulDiv255(const Alpha& a) const { |
- return (*this * a).addNarrowHi(*this); |
- } |
- |
- SkPx_neon addAlpha(const Alpha& a) const { |
- return (uint8x8x4_t) {{ |
- fVec.val[0], |
- fVec.val[1], |
- fVec.val[2], |
- vadd_u8(fVec.val[3], a.fA), |
- }}; |
- } |
-}; |
-typedef SkPx_neon SkPx; |
- |
-#endif//SkPx_neon_DEFINED |