diff options
author | mtklein <mtklein@chromium.org> | 2015-12-14 11:25:18 -0800 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2015-12-14 11:25:18 -0800 |
commit | 6f37b4a4757ea3eb00c76162cc37f8a56c3b8bdb (patch) | |
tree | 1cf540ae66132e8ba6e59ce5d6f859cd096c1bf3 /src/opts | |
parent | 4e4155df100b77d11bd14591c7716743369fde9b (diff) |
Unify some SkNx code
- one base case and one N=1 case instead of two each (or three with doubles)
- use SkNx_cast instead of FromBytes/toBytes
- 4-at-a-time Sk4f::ToBytes becomes a special standalone Sk4f_ToBytes
If I did everything right, this'll be perf- and pixel- neutral.
https://gold.skia.org/search2?issue=1526523003&unt=true&query=source_type%3Dgm&master=false
BUG=skia:
CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot
Review URL: https://codereview.chromium.org/1526523003
Diffstat (limited to 'src/opts')
-rw-r--r-- | src/opts/SkColorCubeFilter_opts.h | 10 | ||||
-rw-r--r-- | src/opts/SkNx_avx.h | 36 | ||||
-rw-r--r-- | src/opts/SkNx_neon.h | 63 | ||||
-rw-r--r-- | src/opts/SkNx_sse.h | 91 | ||||
-rw-r--r-- | src/opts/SkXfermode_opts.h | 4 |
5 files changed, 126 insertions, 78 deletions
diff --git a/src/opts/SkColorCubeFilter_opts.h b/src/opts/SkColorCubeFilter_opts.h index 4c394051bb..ab8d1d4c9b 100644 --- a/src/opts/SkColorCubeFilter_opts.h +++ b/src/opts/SkColorCubeFilter_opts.h @@ -59,10 +59,10 @@ void color_cube_filter_span(const SkPMColor src[], const SkColor lutColor10 = colorCube[ix + i10]; const SkColor lutColor11 = colorCube[ix + i11]; - Sk4f sum = Sk4f::FromBytes((const uint8_t*)&lutColor00) * g0b0; - sum = sum + Sk4f::FromBytes((const uint8_t*)&lutColor01) * g0b1; - sum = sum + Sk4f::FromBytes((const uint8_t*)&lutColor10) * g1b0; - sum = sum + Sk4f::FromBytes((const uint8_t*)&lutColor11) * g1b1; + Sk4f sum = SkNx_cast<float>(Sk4b::Load((const uint8_t*)&lutColor00)) * g0b0; + sum = sum + SkNx_cast<float>(Sk4b::Load((const uint8_t*)&lutColor01)) * g0b1; + sum = sum + SkNx_cast<float>(Sk4b::Load((const uint8_t*)&lutColor10)) * g1b0; + sum = sum + SkNx_cast<float>(Sk4b::Load((const uint8_t*)&lutColor11)) * g1b1; color = color + sum * Sk4f((float)colorToFactors[x][r]); } if (a != 255) { @@ -74,7 +74,7 @@ void color_cube_filter_span(const SkPMColor src[], color = SkNx_shuffle<2,1,0,3>(color); #endif uint8_t* dstBytes = (uint8_t*)(dst+i); - color.toBytes(dstBytes); + SkNx_cast<uint8_t>(color).store(dstBytes); dstBytes[SK_A32_SHIFT/8] = a; } } diff --git a/src/opts/SkNx_avx.h b/src/opts/SkNx_avx.h index 6236769652..f635181a92 100644 --- a/src/opts/SkNx_avx.h +++ b/src/opts/SkNx_avx.h @@ -26,27 +26,10 @@ public: SkNx(float val) : fVec(_mm256_set1_ps(val)) {} static SkNx Load(const float vals[8]) { return _mm256_loadu_ps(vals); } - static SkNx FromBytes(const uint8_t bytes[8]) { - __m128i fix8 = _mm_loadl_epi64((const __m128i*)bytes), - fix16 = _mm_unpacklo_epi8 (fix8 , _mm_setzero_si128()), - lo32 = _mm_unpacklo_epi16(fix16, _mm_setzero_si128()), - hi32 = _mm_unpackhi_epi16(fix16, _mm_setzero_si128()); - __m256i fix32 = _mm256_insertf128_si256(_mm256_castsi128_si256(lo32), hi32, 1); - return _mm256_cvtepi32_ps(fix32); - } - SkNx(float a, float b, float c, float d, float e, float f, float g, float h) : fVec(_mm256_setr_ps(a,b,c,d,e,f,g,h)) {} void store(float vals[8]) const { _mm256_storeu_ps(vals, fVec); } - void toBytes(uint8_t bytes[8]) const { - __m256i fix32 = _mm256_cvttps_epi32(fVec); - __m128i lo32 = _mm256_extractf128_si256(fix32, 0), - hi32 = _mm256_extractf128_si256(fix32, 1), - fix16 = _mm_packus_epi32(lo32, hi32), - fix8 = _mm_packus_epi16(fix16, fix16); - _mm_storel_epi64((__m128i*)bytes, fix8); - } SkNx operator + (const SkNx& o) const { return _mm256_add_ps(fVec, o.fVec); } SkNx operator - (const SkNx& o) const { return _mm256_sub_ps(fVec, o.fVec); } @@ -87,6 +70,25 @@ public: __m256 fVec; }; +template<> inline Sk8b SkNx_cast<uint8_t, float, 8>(const Sk8f& src) { + __m256i _32 = _mm256_cvttps_epi32(src.fVec); + __m128i lo = _mm256_extractf128_si256(_32, 0), + hi = _mm256_extractf128_si256(_32, 1), + _16 = _mm_packus_epi32(lo, hi); + return _mm_packus_epi16(_16, _16); +} + +template<> inline Sk8f SkNx_cast<float, uint8_t, 8>(const Sk8b& src) { + /* TODO lo = _mm_cvtepu8_epi32(src.fVec), + * hi = _mm_cvtepu8_epi32(_mm_srli_si128(src.fVec, 4)) + */ + __m128i _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()), + lo = _mm_unpacklo_epi16(_16, _mm_setzero_si128()), + hi = _mm_unpackhi_epi16(_16, _mm_setzero_si128()); + __m256i _32 = _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1); + return _mm256_cvtepi32_ps(_32); +} + } // namespace #endif//SkNx_avx_DEFINED diff --git a/src/opts/SkNx_neon.h b/src/opts/SkNx_neon.h index 43409bee44..0955cb2b5b 100644 --- a/src/opts/SkNx_neon.h +++ b/src/opts/SkNx_neon.h @@ -150,31 +150,9 @@ public: SkNx() {} SkNx(float val) : fVec(vdupq_n_f32(val)) {} static SkNx Load(const float vals[4]) { return vld1q_f32(vals); } - static SkNx FromBytes(const uint8_t vals[4]) { - uint8x8_t fix8 = (uint8x8_t)vld1_dup_u32((const uint32_t*)vals); - uint16x8_t fix8_16 = vmovl_u8(fix8); - uint32x4_t fix8_32 = vmovl_u16(vget_low_u16(fix8_16)); - return SkNx(vcvtq_f32_u32(fix8_32)); - } - SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; } void store(float vals[4]) const { vst1q_f32(vals, fVec); } - void toBytes(uint8_t bytes[4]) const { - uint32x4_t fix8_32 = vcvtq_u32_f32(fVec); - uint16x4_t fix8_16 = vqmovn_u32(fix8_32); - uint8x8_t fix8 = vqmovn_u16(vcombine_u16(fix8_16, vdup_n_u16(0))); - vst1_lane_u32((uint32_t*)bytes, (uint32x2_t)fix8, 0); - } - - static void ToBytes(uint8_t bytes[16], - const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) { - vst1q_u8(bytes, vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec), - (uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0], - vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec), - (uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0]); - } - SkNx approxInvert() const { float32x4_t est0 = vrecpeq_f32(fVec), est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0); @@ -288,6 +266,24 @@ public: }; template <> +class SkNx<4, uint8_t> { +public: + SkNx(const uint8x8_t& vec) : fVec(vec) {} + + SkNx() {} + static SkNx Load(const uint8_t vals[4]) { + return (uint8x8_t)vld1_dup_u32((const uint32_t*)vals); + } + void store(uint8_t vals[4]) const { + return vst1_lane_u32((uint32_t*)vals, (uint32x2_t)fVec, 0); + } + + // TODO as needed + + uint8x8_t fVec; +}; + +template <> class SkNx<16, uint8_t> { public: SkNx(const uint8x16_t& vec) : fVec(vec) {} @@ -329,11 +325,30 @@ public: #undef SHIFT16 #undef SHIFT8 -template<> -inline SkNx<4, int> SkNx_cast<int, float, 4>(const SkNx<4, float>& src) { +template<> inline Sk4i SkNx_cast<int, float, 4>(const Sk4f& src) { return vcvtq_s32_f32(src.fVec); } +template<> inline Sk4b SkNx_cast<uint8_t, float, 4>(const Sk4f& src) { + uint32x4_t _32 = vcvtq_u32_f32(src.fVec); + uint16x4_t _16 = vqmovn_u32(_32); + return vqmovn_u16(vcombine_u16(_16, _16)); +} + +template<> inline Sk4f SkNx_cast<float, uint8_t, 4>(const Sk4b& src) { + uint16x8_t _16 = vmovl_u8 (src.fVec) ; + uint32x4_t _32 = vmovl_u16(vget_low_u16(_16)); + return vcvtq_f32_u32(_32); +} + +static inline void Sk4f_ToBytes(uint8_t bytes[16], + const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) { + vst1q_u8(bytes, vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec), + (uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0], + vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec), + (uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0]); +} + } // namespace #endif//SkNx_neon_DEFINED diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h index 9092f44480..f3952a9768 100644 --- a/src/opts/SkNx_sse.h +++ b/src/opts/SkNx_sse.h @@ -111,37 +111,9 @@ public: SkNx(float val) : fVec( _mm_set1_ps(val) ) {} static SkNx Load(const float vals[4]) { return _mm_loadu_ps(vals); } - static SkNx FromBytes(const uint8_t bytes[4]) { - __m128i fix8 = _mm_cvtsi32_si128(*(const int*)bytes); - #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 - const char _ = ~0; // Zero these bytes. - __m128i fix8_32 = _mm_shuffle_epi8(fix8, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_)); - #else - __m128i fix8_16 = _mm_unpacklo_epi8 (fix8, _mm_setzero_si128()), - fix8_32 = _mm_unpacklo_epi16(fix8_16, _mm_setzero_si128()); - #endif - return SkNx(_mm_cvtepi32_ps(fix8_32)); - // TODO: use _mm_cvtepu8_epi32 w/SSE4.1? - } - SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {} void store(float vals[4]) const { _mm_storeu_ps(vals, fVec); } - void toBytes(uint8_t bytes[4]) const { - __m128i fix8_32 = _mm_cvttps_epi32(fVec), - fix8_16 = _mm_packus_epi16(fix8_32, fix8_32), - fix8 = _mm_packus_epi16(fix8_16, fix8_16); - *(int*)bytes = _mm_cvtsi128_si32(fix8); - } - - static void ToBytes(uint8_t bytes[16], - const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) { - _mm_storeu_si128((__m128i*)bytes, - _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec), - _mm_cvttps_epi32(b.fVec)), - _mm_packus_epi16(_mm_cvttps_epi32(c.fVec), - _mm_cvttps_epi32(d.fVec)))); - } SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); } SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); } @@ -253,6 +225,34 @@ public: }; template <> +class SkNx<4, uint8_t> { +public: + SkNx(const __m128i& vec) : fVec(vec) {} + + SkNx() {} + static SkNx Load(const uint8_t vals[4]) { return _mm_cvtsi32_si128(*(const int*)vals); } + void store(uint8_t vals[4]) const { *(int*)vals = _mm_cvtsi128_si32(fVec); } + + // TODO as needed + + __m128i fVec; +}; + +template <> +class SkNx<8, uint8_t> { +public: + SkNx(const __m128i& vec) : fVec(vec) {} + + SkNx() {} + static SkNx Load(const uint8_t vals[8]) { return _mm_loadl_epi64((const __m128i*)vals); } + void store(uint8_t vals[8]) const { _mm_storel_epi64((__m128i*)vals, fVec); } + + // TODO as needed + + __m128i fVec; +}; + +template <> class SkNx<16, uint8_t> { public: SkNx(const __m128i& vec) : fVec(vec) {} @@ -296,11 +296,42 @@ public: }; -template<> -inline SkNx<4, int> SkNx_cast<int, float, 4>(const SkNx<4, float>& src) { +template<> inline Sk4i SkNx_cast<int, float, 4>(const Sk4f& src) { return _mm_cvttps_epi32(src.fVec); } +template<> inline Sk4b SkNx_cast<uint8_t, float, 4>(const Sk4f& src) { + auto _32 = _mm_cvttps_epi32(src.fVec); +#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 + const int _ = ~0; + return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_)); +#else + auto _16 = _mm_packus_epi16(_32, _32); + return _mm_packus_epi16(_16, _16); +#endif +} + +template<> inline Sk4f SkNx_cast<float, uint8_t, 4>(const Sk4b& src) { +#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 + const int _ = ~0; + auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_)); +#else + auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()), + _32 = _mm_unpacklo_epi16(_16, _mm_setzero_si128()); +#endif + return _mm_cvtepi32_ps(_32); +} + +static inline void Sk4f_ToBytes(uint8_t bytes[16], + const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) { + _mm_storeu_si128((__m128i*)bytes, + _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec), + _mm_cvttps_epi32(b.fVec)), + _mm_packus_epi16(_mm_cvttps_epi32(c.fVec), + _mm_cvttps_epi32(d.fVec)))); +} + + } // namespace #endif//SkNx_sse_DEFINED diff --git a/src/opts/SkXfermode_opts.h b/src/opts/SkXfermode_opts.h index 69f2b420f5..93559ab8eb 100644 --- a/src/opts/SkXfermode_opts.h +++ b/src/opts/SkXfermode_opts.h @@ -265,11 +265,11 @@ public: private: static Sk4f Load(SkPMColor c) { - return Sk4f::FromBytes((uint8_t*)&c) * Sk4f(1.0f/255); + return SkNx_cast<float>(Sk4b::Load((uint8_t*)&c)) * Sk4f(1.0f/255); } static SkPMColor Round(const Sk4f& f) { SkPMColor c; - (f * Sk4f(255) + Sk4f(0.5f)).toBytes((uint8_t*)&c); + SkNx_cast<uint8_t>(f * Sk4f(255) + Sk4f(0.5f)).store((uint8_t*)&c); return c; } inline SkPMColor xfer32(SkPMColor dst, SkPMColor src) const { |