diff options
-rw-r--r-- | src/core/Sk4px.h | 92 | ||||
-rw-r--r-- | src/core/SkBlitRow_D32.cpp | 96 | ||||
-rw-r--r-- | src/core/SkNx.h | 31 | ||||
-rw-r--r-- | src/opts/Sk4px_NEON.h | 50 | ||||
-rw-r--r-- | src/opts/Sk4px_SSE2.h | 39 | ||||
-rw-r--r-- | src/opts/Sk4px_none.h | 57 | ||||
-rw-r--r-- | src/opts/SkNx_neon.h | 120 | ||||
-rw-r--r-- | src/opts/SkNx_sse.h | 46 |
8 files changed, 397 insertions, 134 deletions
diff --git a/src/core/Sk4px.h b/src/core/Sk4px.h new file mode 100644 index 0000000000..3d2a8e3362 --- /dev/null +++ b/src/core/Sk4px.h @@ -0,0 +1,92 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef Sk4px_DEFINED +#define Sk4px_DEFINED + +#include "SkNx.h" +#include "SkColor.h" + +// 1, 2 or 4 SkPMColors, generally vectorized. +class Sk4px : public Sk16b { +public: + Sk4px(SkPMColor); // Duplicate 4x. + Sk4px(const Sk16b& v) : Sk16b(v) {} + + // When loading or storing fewer than 4 SkPMColors, we use the low lanes. + static Sk4px Load4(const SkPMColor[4]); + static Sk4px Load2(const SkPMColor[2]); + static Sk4px Load1(const SkPMColor[1]); + + void store4(SkPMColor[4]) const; + void store2(SkPMColor[2]) const; + void store1(SkPMColor[1]) const; + + // 1, 2, or 4 SkPMColors with 16-bit components. + // This is most useful as the result of a multiply, e.g. from mulWiden(). + class Wide : public Sk16h { + public: + Wide(const Sk16h& v) : Sk16h(v) {} + + // Pack the top byte of each component back down into 4 SkPMColors. + Sk4px addNarrowHi(const Sk16h&) const; + private: + typedef Sk16h INHERITED; + }; + + Wide widenLo() const; // ARGB -> 0A 0R 0G 0B + Wide widenHi() const; // ARGB -> A0 R0 G0 B0 + Wide mulWiden(const Sk16b&) const; // 8-bit x 8-bit -> 16-bit components. + + // A generic driver that maps fn over a src array into a dst array. + // fn should take an Sk4px (4 src pixels) and return an Sk4px (4 dst pixels). + template <typename Fn> + static void MapSrc(int count, SkPMColor* dst, const SkPMColor* src, Fn fn) { + // This looks a bit odd, but it helps loop-invariant hoisting across different calls to fn. + // Basically, we need to make sure we keep things inside a single loop. + while (count > 0) { + if (count >= 8) { + Sk4px dst0 = fn(Load4(src+0)), + dst4 = fn(Load4(src+4)); + dst0.store4(dst+0); + dst4.store4(dst+4); + dst += 8; src += 8; count -= 8; + continue; // Keep our stride at 8 pixels as long as possible. + } + SkASSERT(count <= 7); + if (count >= 4) { + fn(Load4(src)).store4(dst); + dst += 4; src += 4; count -= 4; + } + if (count >= 2) { + fn(Load2(src)).store2(dst); + dst += 2; src += 2; count -= 2; + } + if (count >= 1) { + fn(Load1(src)).store1(dst); + } + break; + } + } + +private: + typedef Sk16b INHERITED; +}; + +#ifdef SKNX_NO_SIMD + #include "../opts/Sk4px_none.h" +#else + #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 + #include "../opts/Sk4px_SSE2.h" + #elif defined(SK_ARM_HAS_NEON) + #include "../opts/Sk4px_NEON.h" + #else + #include "../opts/Sk4px_none.h" + #endif +#endif + +#endif//Sk4px_DEFINED diff --git a/src/core/SkBlitRow_D32.cpp b/src/core/SkBlitRow_D32.cpp index de99894282..f5cb45d0fa 100644 --- a/src/core/SkBlitRow_D32.cpp +++ b/src/core/SkBlitRow_D32.cpp @@ -131,6 +131,8 @@ SkBlitRow::Proc32 SkBlitRow::Factory32(unsigned flags) { return proc; } +#include "Sk4px.h" + // Color32 uses the blend_256_round_alt algorithm from tests/BlendTest.cpp. // It's not quite perfect, but it's never wrong in the interesting edge cases, // and it's quite a bit faster than blend_perfect. @@ -146,94 +148,10 @@ void SkBlitRow::Color32(SkPMColor dst[], const SkPMColor src[], int count, SkPMC invA += invA >> 7; SkASSERT(invA < 256); // We've already handled alpha == 0 above. -#if defined(SK_ARM_HAS_NEON) - uint16x8_t colorHigh = vshll_n_u8((uint8x8_t)vdup_n_u32(color), 8); - uint16x8_t colorAndRound = vaddq_u16(colorHigh, vdupq_n_u16(128)); - uint8x8_t invA8 = vdup_n_u8(invA); - - // Does the core work of blending color onto 4 pixels, returning the resulting 4 pixels. - auto kernel = [&](const uint32x4_t& src4) -> uint32x4_t { - uint16x8_t lo = vmull_u8(vget_low_u8( (uint8x16_t)src4), invA8), - hi = vmull_u8(vget_high_u8((uint8x16_t)src4), invA8); - return (uint32x4_t) - vcombine_u8(vaddhn_u16(colorAndRound, lo), vaddhn_u16(colorAndRound, hi)); - }; - - while (count >= 8) { - uint32x4_t dst0 = kernel(vld1q_u32(src+0)), - dst4 = kernel(vld1q_u32(src+4)); - vst1q_u32(dst+0, dst0); - vst1q_u32(dst+4, dst4); - src += 8; - dst += 8; - count -= 8; - } - if (count >= 4) { - vst1q_u32(dst, kernel(vld1q_u32(src))); - src += 4; - dst += 4; - count -= 4; - } - if (count >= 2) { - uint32x2_t src2 = vld1_u32(src); - vst1_u32(dst, vget_low_u32(kernel(vcombine_u32(src2, src2)))); - src += 2; - dst += 2; - count -= 2; - } - if (count >= 1) { - vst1q_lane_u32(dst, kernel(vdupq_n_u32(*src)), 0); - } + Sk16h colorHighAndRound = Sk4px(color).widenHi() + Sk16h(128); + Sk16b invA_16x(invA); -#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 - __m128i colorHigh = _mm_unpacklo_epi8(_mm_setzero_si128(), _mm_set1_epi32(color)); - __m128i colorAndRound = _mm_add_epi16(colorHigh, _mm_set1_epi16(128)); - __m128i invA16 = _mm_set1_epi16(invA); - - // Does the core work of blending color onto 4 pixels, returning the resulting 4 pixels. - auto kernel = [&](const __m128i& src4) -> __m128i { - __m128i lo = _mm_mullo_epi16(invA16, _mm_unpacklo_epi8(src4, _mm_setzero_si128())), - hi = _mm_mullo_epi16(invA16, _mm_unpackhi_epi8(src4, _mm_setzero_si128())); - return _mm_packus_epi16(_mm_srli_epi16(_mm_add_epi16(colorAndRound, lo), 8), - _mm_srli_epi16(_mm_add_epi16(colorAndRound, hi), 8)); - }; - - while (count >= 8) { - __m128i dst0 = kernel(_mm_loadu_si128((const __m128i*)(src+0))), - dst4 = kernel(_mm_loadu_si128((const __m128i*)(src+4))); - _mm_storeu_si128((__m128i*)(dst+0), dst0); - _mm_storeu_si128((__m128i*)(dst+4), dst4); - src += 8; - dst += 8; - count -= 8; - } - if (count >= 4) { - _mm_storeu_si128((__m128i*)dst, kernel(_mm_loadu_si128((const __m128i*)src))); - src += 4; - dst += 4; - count -= 4; - } - if (count >= 2) { - _mm_storel_epi64((__m128i*)dst, kernel(_mm_loadl_epi64((const __m128i*)src))); - src += 2; - dst += 2; - count -= 2; - } - if (count >= 1) { - *dst = _mm_cvtsi128_si32(kernel(_mm_cvtsi32_si128(*src))); - } -#else // Neither NEON nor SSE2. - unsigned round = (128 << 16) + (128 << 0); - - while (count --> 0) { - // Our math is 16-bit, so we can do a little bit of SIMD in 32-bit registers. - const uint32_t mask = 0x00FF00FF; - uint32_t rb = (((*src >> 0) & mask) * invA + round) >> 8, // _r_b - ag = (((*src >> 8) & mask) * invA + round) >> 0; // a_g_ - *dst = color + ((rb & mask) | (ag & ~mask)); - src++; - dst++; - } -#endif + Sk4px::MapSrc(count, dst, src, [&](const Sk4px& src4) -> Sk4px { + return src4.mulWiden(invA_16x).addNarrowHi(colorHighAndRound); + }); } - diff --git a/src/core/SkNx.h b/src/core/SkNx.h index 4cfc6e3d9a..9d319623a6 100644 --- a/src/core/SkNx.h +++ b/src/core/SkNx.h @@ -30,7 +30,7 @@ public: bool allTrue() const { return fLo.allTrue() && fHi.allTrue(); } bool anyTrue() const { return fLo.anyTrue() || fHi.anyTrue(); } -private: +protected: REQUIRE(0 == (N & (N-1))); SkNb<N/2, Bytes> fLo, fHi; }; @@ -45,9 +45,12 @@ public: return SkNi(SkNi<N/2,T>::Load(vals), SkNi<N/2,T>::Load(vals+N/2)); } - SkNi(T a, T b) : fLo(a), fHi(b) { REQUIRE(N==2); } - SkNi(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) { REQUIRE(N==4); } - SkNi(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) { REQUIRE(N==8); } + SkNi(T a, T b) : fLo(a), fHi(b) { REQUIRE(N==2); } + SkNi(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) { REQUIRE(N==4); } + SkNi(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) { REQUIRE(N==8); } + SkNi(T a, T b, T c, T d, T e, T f, T g, T h, + T i, T j, T k, T l, T m, T n, T o, T p) + : fLo(a,b,c,d, e,f,g,h), fHi(i,j,k,l, m,n,o,p) { REQUIRE(N==16); } void store(T vals[N]) const { fLo.store(vals); @@ -68,7 +71,7 @@ public: return k < N/2 ? fLo.template kth<k>() : fHi.template kth<k-N/2>(); } -private: +protected: REQUIRE(0 == (N & (N-1))); SkNi<N/2, T> fLo, fHi; @@ -133,7 +136,7 @@ public: return k < N/2 ? fLo.template kth<k>() : fHi.template kth<k-N/2>(); } -private: +protected: REQUIRE(0 == (N & (N-1))); SkNf(const SkNf<N/2, T>& lo, const SkNf<N/2, T>& hi) : fLo(lo), fHi(hi) {} @@ -150,7 +153,7 @@ public: explicit SkNb(bool val) : fVal(val) {} bool allTrue() const { return fVal; } bool anyTrue() const { return fVal; } -private: +protected: bool fVal; }; @@ -175,7 +178,7 @@ public: return fVal; } -private: +protected: T fVal; }; @@ -223,7 +226,7 @@ public: return fVal; } -private: +protected: // We do double sqrts natively, or via floats for any other type. template <typename U> static U Sqrt(U val) { return (U) ::sqrtf((float)val); } @@ -263,9 +266,13 @@ typedef SkNf<4, float> Sk4f; typedef SkNf<4, double> Sk4d; typedef SkNf<4, SkScalar> Sk4s; -typedef SkNi<4, uint16_t> Sk4h; -typedef SkNi<8, uint16_t> Sk8h; +typedef SkNi<4, uint16_t> Sk4h; +typedef SkNi<8, uint16_t> Sk8h; +typedef SkNi<16, uint16_t> Sk16h; -typedef SkNi<4, int> Sk4i; +typedef SkNi<16, uint8_t> Sk16b; + +typedef SkNi<4, int32_t> Sk4i; +typedef SkNi<4, uint32_t> Sk4u; #endif//SkNx_DEFINED diff --git a/src/opts/Sk4px_NEON.h b/src/opts/Sk4px_NEON.h new file mode 100644 index 0000000000..ede5f2cd8e --- /dev/null +++ b/src/opts/Sk4px_NEON.h @@ -0,0 +1,50 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +inline Sk4px::Sk4px(SkPMColor px) : INHERITED((uint8x16_t)vdupq_n_u32(px)) {} + +inline Sk4px Sk4px::Load4(const SkPMColor px[4]) { + return Sk16b((uint8x16_t)vld1q_u32(px)); +} +inline Sk4px Sk4px::Load2(const SkPMColor px[2]) { + uint32x2_t px2 = vld1_u32(px); + return Sk16b((uint8x16_t)vcombine_u32(px2, px2)); +} +inline Sk4px Sk4px::Load1(const SkPMColor px[1]) { + return Sk16b((uint8x16_t)vdupq_n_u32(*px)); +} + +inline void Sk4px::store4(SkPMColor px[4]) const { + vst1q_u32(px, (uint32x4_t)this->fVec); +} +inline void Sk4px::store2(SkPMColor px[2]) const { + vst1_u32(px, (uint32x2_t)vget_low_u8(this->fVec)); +} +inline void Sk4px::store1(SkPMColor px[1]) const { + vst1q_lane_u32(px, (uint32x4_t)this->fVec, 0); +} + +inline Sk4px::Wide Sk4px::widenLo() const { + return Sk16h(vmovl_u8(vget_low_u8 (this->fVec)), + vmovl_u8(vget_high_u8(this->fVec))); +} + +inline Sk4px::Wide Sk4px::widenHi() const { + return Sk16h(vshll_n_u8(vget_low_u8 (this->fVec), 8), + vshll_n_u8(vget_high_u8(this->fVec), 8)); +} + +inline Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const { + return Sk16h(vmull_u8(vget_low_u8 (this->fVec), vget_low_u8 (other.fVec)), + vmull_u8(vget_high_u8(this->fVec), vget_high_u8(other.fVec))); +} + +inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const { + const Sk4px::Wide o(other); // Should be no code, but allows us to access fLo, fHi. + return Sk16b(vcombine_u8(vaddhn_u16(this->fLo.fVec, o.fLo.fVec), + vaddhn_u16(this->fHi.fVec, o.fHi.fVec))); +} diff --git a/src/opts/Sk4px_SSE2.h b/src/opts/Sk4px_SSE2.h new file mode 100644 index 0000000000..d036328c14 --- /dev/null +++ b/src/opts/Sk4px_SSE2.h @@ -0,0 +1,39 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +inline Sk4px::Sk4px(SkPMColor px) : INHERITED(_mm_set1_epi32(px)) {} + +inline Sk4px Sk4px::Load4(const SkPMColor px[4]) { + return Sk16b(_mm_loadu_si128((const __m128i*)px)); +} +inline Sk4px Sk4px::Load2(const SkPMColor px[2]) { + return Sk16b(_mm_loadl_epi64((const __m128i*)px)); +} +inline Sk4px Sk4px::Load1(const SkPMColor px[1]) { return Sk16b(_mm_cvtsi32_si128(*px)); } + +inline void Sk4px::store4(SkPMColor px[4]) const { _mm_storeu_si128((__m128i*)px, this->fVec); } +inline void Sk4px::store2(SkPMColor px[2]) const { _mm_storel_epi64((__m128i*)px, this->fVec); } +inline void Sk4px::store1(SkPMColor px[1]) const { *px = _mm_cvtsi128_si32(this->fVec); } + +inline Sk4px::Wide Sk4px::widenLo() const { + return Sk16h(_mm_unpacklo_epi8(this->fVec, _mm_setzero_si128()), + _mm_unpackhi_epi8(this->fVec, _mm_setzero_si128())); +} + +inline Sk4px::Wide Sk4px::widenHi() const { + return Sk16h(_mm_unpacklo_epi8(_mm_setzero_si128(), this->fVec), + _mm_unpackhi_epi8(_mm_setzero_si128(), this->fVec)); +} + +inline Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const { + return this->widenLo() * Sk4px(other).widenLo(); +} + +inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const { + Sk4px::Wide r = (*this + other) >> 8; + return Sk4px(_mm_packus_epi16(r.fLo.fVec, r.fHi.fVec)); +} diff --git a/src/opts/Sk4px_none.h b/src/opts/Sk4px_none.h new file mode 100644 index 0000000000..c8c33a0d16 --- /dev/null +++ b/src/opts/Sk4px_none.h @@ -0,0 +1,57 @@ +/* + * Copyright 2015 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "SkUtils.h" + +static_assert(sizeof(Sk4px) == 16, "This file uses memcpy / sk_memset32, so exact size matters."); + +inline Sk4px::Sk4px(SkPMColor px) { + sk_memset32((uint32_t*)this, px, 4); +} + +inline Sk4px Sk4px::Load4(const SkPMColor px[4]) { + Sk4px px4 = Sk16b(); + memcpy(&px4, px, 16); + return px4; +} + +inline Sk4px Sk4px::Load2(const SkPMColor px[2]) { + Sk4px px2 = Sk16b(); + memcpy(&px2, px, 8); + return px2; +} + +inline Sk4px Sk4px::Load1(const SkPMColor px[1]) { + Sk4px px1 = Sk16b(); + memcpy(&px1, px, 4); + return px1; +} + +inline void Sk4px::store4(SkPMColor px[4]) const { memcpy(px, this, 16); } +inline void Sk4px::store2(SkPMColor px[2]) const { memcpy(px, this, 8); } +inline void Sk4px::store1(SkPMColor px[1]) const { memcpy(px, this, 4); } + +inline Sk4px::Wide Sk4px::widenLo() const { + return Sk16h(this->kth< 0>(), this->kth< 1>(), this->kth< 2>(), this->kth< 3>(), + this->kth< 4>(), this->kth< 5>(), this->kth< 6>(), this->kth< 7>(), + this->kth< 8>(), this->kth< 9>(), this->kth<10>(), this->kth<11>(), + this->kth<12>(), this->kth<13>(), this->kth<14>(), this->kth<15>()); +} + +inline Sk4px::Wide Sk4px::widenHi() const { return this->widenLo() << 8; } + +inline Sk4px::Wide Sk4px::mulWiden(const Sk16b& other) const { + return this->widenLo() * Sk4px(other).widenLo(); +} + +inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const { + Sk4px::Wide r = (*this + other) >> 8; + return Sk16b(r.kth< 0>(), r.kth< 1>(), r.kth< 2>(), r.kth< 3>(), + r.kth< 4>(), r.kth< 5>(), r.kth< 6>(), r.kth< 7>(), + r.kth< 8>(), r.kth< 9>(), r.kth<10>(), r.kth<11>(), + r.kth<12>(), r.kth<13>(), r.kth<14>(), r.kth<15>()); +} diff --git a/src/opts/SkNx_neon.h b/src/opts/SkNx_neon.h index f1deabc5fe..b9d4357e5a 100644 --- a/src/opts/SkNx_neon.h +++ b/src/opts/SkNx_neon.h @@ -10,6 +10,28 @@ #include <arm_neon.h> +// Well, this is absurd. The shifts require compile-time constant arguments. + +#define SHIFT8(op, v, bits) switch(bits) { \ + case 1: return op(v, 1); case 2: return op(v, 2); case 3: return op(v, 3); \ + case 4: return op(v, 4); case 5: return op(v, 5); case 6: return op(v, 6); \ + case 7: return op(v, 7); \ + } return fVec + +#define SHIFT16(op, v, bits) if (bits < 8) { SHIFT8(op, v, bits); } switch(bits) { \ + case 8: return op(v, 8); case 9: return op(v, 9); \ + case 10: return op(v, 10); case 11: return op(v, 11); case 12: return op(v, 12); \ + case 13: return op(v, 13); case 14: return op(v, 14); case 15: return op(v, 15); \ + } return fVec + +#define SHIFT32(op, v, bits) if (bits < 16) { SHIFT16(op, v, bits); } switch(bits) { \ + case 16: return op(v, 16); case 17: return op(v, 17); case 18: return op(v, 18); \ + case 19: return op(v, 19); case 20: return op(v, 20); case 21: return op(v, 21); \ + case 22: return op(v, 22); case 23: return op(v, 23); case 24: return op(v, 24); \ + case 25: return op(v, 25); case 26: return op(v, 26); case 27: return op(v, 27); \ + case 28: return op(v, 28); case 29: return op(v, 29); case 30: return op(v, 30); \ + case 31: return op(v, 31); } return fVec + template <> class SkNb<2, 4> { public: @@ -18,7 +40,7 @@ public: SkNb() {} bool allTrue() const { return vget_lane_u32(fVec, 0) && vget_lane_u32(fVec, 1); } bool anyTrue() const { return vget_lane_u32(fVec, 0) || vget_lane_u32(fVec, 1); } -private: + uint32x2_t fVec; }; @@ -32,7 +54,7 @@ public: && vgetq_lane_u32(fVec, 2) && vgetq_lane_u32(fVec, 3); } bool anyTrue() const { return vgetq_lane_u32(fVec, 0) || vgetq_lane_u32(fVec, 1) || vgetq_lane_u32(fVec, 2) || vgetq_lane_u32(fVec, 3); } -private: + uint32x4_t fVec; }; @@ -104,7 +126,6 @@ public: return vget_lane_f32(fVec, k&1); } -private: float32x2_t fVec; }; @@ -117,7 +138,7 @@ public: SkNb() {} bool allTrue() const { return vgetq_lane_u64(fVec, 0) && vgetq_lane_u64(fVec, 1); } bool anyTrue() const { return vgetq_lane_u64(fVec, 0) || vgetq_lane_u64(fVec, 1); } -private: + uint64x2_t fVec; }; @@ -181,7 +202,6 @@ public: return vgetq_lane_f64(fVec, k&1); } -private: float64x2_t fVec; }; #endif//defined(SK_CPU_ARM64) @@ -202,29 +222,14 @@ public: SkNi operator - (const SkNi& o) const { return vsubq_s32(fVec, o.fVec); } SkNi operator * (const SkNi& o) const { return vmulq_s32(fVec, o.fVec); } - // Well, this is absurd. The shifts require compile-time constant arguments. -#define SHIFT(op, v, bits) switch(bits) { \ - case 1: return op(v, 1); case 2: return op(v, 2); case 3: return op(v, 3); \ - case 4: return op(v, 4); case 5: return op(v, 5); case 6: return op(v, 6); \ - case 7: return op(v, 7); case 8: return op(v, 8); case 9: return op(v, 9); \ - case 10: return op(v, 10); case 11: return op(v, 11); case 12: return op(v, 12); \ - case 13: return op(v, 13); case 14: return op(v, 14); case 15: return op(v, 15); \ - case 16: return op(v, 16); case 17: return op(v, 17); case 18: return op(v, 18); \ - case 19: return op(v, 19); case 20: return op(v, 20); case 21: return op(v, 21); \ - case 22: return op(v, 22); case 23: return op(v, 23); case 24: return op(v, 24); \ - case 25: return op(v, 25); case 26: return op(v, 26); case 27: return op(v, 27); \ - case 28: return op(v, 28); case 29: return op(v, 29); case 30: return op(v, 30); \ - case 31: return op(v, 31); } return fVec - - SkNi operator << (int bits) const { SHIFT(vshlq_n_s32, fVec, bits); } - SkNi operator >> (int bits) const { SHIFT(vshrq_n_s32, fVec, bits); } -#undef SHIFT + SkNi operator << (int bits) const { SHIFT32(vshlq_n_s32, fVec, bits); } + SkNi operator >> (int bits) const { SHIFT32(vshrq_n_s32, fVec, bits); } template <int k> int kth() const { SkASSERT(0 <= k && k < 4); return vgetq_lane_s32(fVec, k&3); } -protected: + int32x4_t fVec; }; @@ -298,8 +303,75 @@ public: return vgetq_lane_f32(fVec, k&3); } -protected: float32x4_t fVec; }; +template <> +class SkNi<8, uint16_t> { +public: + SkNi(const uint16x8_t& vec) : fVec(vec) {} + + SkNi() {} + explicit SkNi(uint16_t val) : fVec(vdupq_n_u16(val)) {} + static SkNi Load(const uint16_t vals[8]) { return vld1q_u16(vals); } + + SkNi(uint16_t a, uint16_t b, uint16_t c, uint16_t d, + uint16_t e, uint16_t f, uint16_t g, uint16_t h) { + fVec = (uint16x8_t) { a,b,c,d, e,f,g,h }; + } + + void store(uint16_t vals[8]) const { vst1q_u16(vals, fVec); } + + SkNi operator + (const SkNi& o) const { return vaddq_u16(fVec, o.fVec); } + SkNi operator - (const SkNi& o) const { return vsubq_u16(fVec, o.fVec); } + SkNi operator * (const SkNi& o) const { return vmulq_u16(fVec, o.fVec); } + + SkNi operator << (int bits) const { SHIFT16(vshlq_n_u16, fVec, bits); } + SkNi operator >> (int bits) const { SHIFT16(vshrq_n_u16, fVec, bits); } + + template <int k> uint16_t kth() const { + SkASSERT(0 <= k && k < 8); + return vgetq_lane_u16(fVec, k&7); + } + + uint16x8_t fVec; +}; + +template <> +class SkNi<16, uint8_t> { +public: + SkNi(const uint8x16_t& vec) : fVec(vec) {} + + SkNi() {} + explicit SkNi(uint8_t val) : fVec(vdupq_n_u8(val)) {} + static SkNi Load(const uint8_t vals[16]) { return vld1q_u8(vals); } + + SkNi(uint8_t a, uint8_t b, uint8_t c, uint8_t d, + uint8_t e, uint8_t f, uint8_t g, uint8_t h, + uint8_t i, uint8_t j, uint8_t k, uint8_t l, + uint8_t m, uint8_t n, uint8_t o, uint8_t p) { + fVec = (uint8x16_t) { a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p }; + } + + void store(uint8_t vals[16]) const { vst1q_u8(vals, fVec); } + + SkNi operator + (const SkNi& o) const { return vaddq_u8(fVec, o.fVec); } + SkNi operator - (const SkNi& o) const { return vsubq_u8(fVec, o.fVec); } + SkNi operator * (const SkNi& o) const { return vmulq_u8(fVec, o.fVec); } + + SkNi operator << (int bits) const { SHIFT8(vshlq_n_u8, fVec, bits); } + SkNi operator >> (int bits) const { SHIFT8(vshrq_n_u8, fVec, bits); } + + template <int k> uint8_t kth() const { + SkASSERT(0 <= k && k < 15); + return vgetq_lane_u8(fVec, k&16); + } + + uint8x16_t fVec; +}; + +#undef SHIFT32 +#undef SHIFT16 +#undef SHIFT8 + #endif//SkNx_neon_DEFINED diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h index cbe624ba2d..b3339f9957 100644 --- a/src/opts/SkNx_sse.h +++ b/src/opts/SkNx_sse.h @@ -20,7 +20,6 @@ public: bool allTrue() const { return 0xff == (_mm_movemask_epi8(fVec) & 0xff); } bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(fVec) & 0xff); } -private: __m128i fVec; }; @@ -33,7 +32,6 @@ public: bool allTrue() const { return 0xffff == _mm_movemask_epi8(fVec); } bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(fVec); } -private: __m128i fVec; }; @@ -46,7 +44,6 @@ public: bool allTrue() const { return 0xffff == _mm_movemask_epi8(fVec); } bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(fVec); } -private: __m128i fVec; }; @@ -95,7 +92,6 @@ public: return pun.fs[k&1]; } -private: __m128 fVec; }; @@ -141,7 +137,6 @@ public: return pun.ds[k&1]; } -private: __m128d fVec; }; @@ -179,7 +174,7 @@ public: default: SkASSERT(false); return 0; } } -protected: + __m128i fVec; }; @@ -227,7 +222,6 @@ public: return pun.fs[k&3]; } -protected: __m128 fVec; }; @@ -254,7 +248,7 @@ public: SkASSERT(0 <= k && k < 4); return _mm_extract_epi16(fVec, k); } -protected: + __m128i fVec; }; @@ -282,7 +276,41 @@ public: SkASSERT(0 <= k && k < 8); return _mm_extract_epi16(fVec, k); } -protected: + + __m128i fVec; +}; + +template <> +class SkNi<16, uint8_t> { +public: + SkNi(const __m128i& vec) : fVec(vec) {} + + SkNi() {} + explicit SkNi(uint8_t val) : fVec(_mm_set1_epi8(val)) {} + static SkNi Load(const uint8_t vals[16]) { return _mm_loadu_si128((const __m128i*)vals); } + SkNi(uint8_t a, uint8_t b, uint8_t c, uint8_t d, + uint8_t e, uint8_t f, uint8_t g, uint8_t h, + uint8_t i, uint8_t j, uint8_t k, uint8_t l, + uint8_t m, uint8_t n, uint8_t o, uint8_t p) + : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p)) {} + + void store(uint8_t vals[16]) const { _mm_storeu_si128((__m128i*)vals, fVec); } + + SkNi operator + (const SkNi& o) const { return _mm_add_epi8(fVec, o.fVec); } + SkNi operator - (const SkNi& o) const { return _mm_sub_epi8(fVec, o.fVec); } + + // SSE cannot multiply or shift vectors of uint8_t. + SkNi operator * (const SkNi& o) const { SkASSERT(false); return fVec; } + SkNi operator << (int bits) const { SkASSERT(false); return fVec; } + SkNi operator >> (int bits) const { SkASSERT(false); return fVec; } + + template <int k> uint8_t kth() const { + SkASSERT(0 <= k && k < 16); + // SSE4.1 would just `return _mm_extract_epi8(fVec, k)`. We have to read 16-bits instead. + int pair = _mm_extract_epi16(fVec, k/2); + return k % 2 == 0 ? pair : (pair >> 8); + } + __m128i fVec; }; |