diff options
author | mtklein <mtklein@google.com> | 2016-02-12 14:19:06 -0800 |
---|---|---|
committer | Commit bot <commit-bot@chromium.org> | 2016-02-12 14:19:06 -0800 |
commit | 97120a7ed193b2e081c9503a7c58657e0c6f4920 (patch) | |
tree | caf73648c4e3c23f60aea6062cc51412f2cf44ed | |
parent | fed90d4712422e5f417169083c59ca749daeae72 (diff) |
Revert of SkNx refactoring (patchset #4 id:60001 of https://codereview.chromium.org/1690633003/ )
Reason for revert:
Precautionary revert for chromium:586487
Original issue's description:
> SkNx refactoring
>
> - add back Sk4i typedef
> - define SSE casts in terms of Sk4i
> * uint8 <-> float becomes uint8 <-> int <-> float
> * uint16 <-> float becomes uint16 <-> int <-> float
>
> This has the nice side effect of specializing uint8 <-> int
> and uint16 <-> int, which are useful in their own right.
>
> There are many cast specializations now, some of which call each other.
> I have tried to arrange them in some sort of sensible order, subject to
> the constraint that those called must precede those who call.
>
> BUG=skia:
> GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1690633003
> CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot
>
> Committed: https://skia.googlesource.com/skia/+/c1eb311f4e98934476f1b2ad5d6de772cf140d60
TBR=herb@google.com,mtklein@chromium.org
# Not skipping CQ checks because original CL landed more than 1 days ago.
BUG=chromium:586487
Review URL: https://codereview.chromium.org/1696903002
-rw-r--r-- | src/core/SkNx.h | 2 | ||||
-rw-r--r-- | src/opts/SkNx_sse.h | 81 |
2 files changed, 33 insertions, 50 deletions
diff --git a/src/core/SkNx.h b/src/core/SkNx.h index 8722bf6dfa..986c133415 100644 --- a/src/core/SkNx.h +++ b/src/core/SkNx.h @@ -199,8 +199,6 @@ typedef SkNx<16, uint8_t> Sk16b; typedef SkNx<4, uint16_t> Sk4h; typedef SkNx<16, uint16_t> Sk16h; -typedef SkNx<4, int> Sk4i; - // Include platform specific specializations if available. #if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 #include "../opts/SkNx_sse.h" diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h index 6c72dbbc45..7d00ddf9ba 100644 --- a/src/opts/SkNx_sse.h +++ b/src/opts/SkNx_sse.h @@ -295,77 +295,62 @@ public: }; -template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { - return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); -} -template<> /*static*/ inline Sk4i SkNx_cast< int, uint8_t>(const Sk4b& src) { +template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) { + auto _32 = _mm_cvttps_epi32(src.fVec); + // Ideally we'd use _mm_packus_epi32 here. But that's SSE4.1+. #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 + // With SSSE3, we can just shuffle the low 2 bytes from each lane right into place. const int _ = ~0; - return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_)); + return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_,_,_)); #else - return _mm_unpacklo_epi16(SkNx_cast<uint16_t>(src).fVec, _mm_setzero_si128()); + // With SSE2, we have to emulate _mm_packus_epi32 with _mm_packs_epi32: + _32 = _mm_sub_epi32(_32, _mm_set1_epi32((int)0x00008000)); + return _mm_add_epi16(_mm_packs_epi32(_32, _32), _mm_set1_epi16((short)0x8000)); #endif } -template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { - return _mm_packus_epi16(src.fVec, src.fVec); -} -template<> /*static*/ inline Sk4i SkNx_cast< int, uint16_t>(const Sk4h& src) { - return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128()); -} - -template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, int>(const Sk4i& src) { +template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) { + auto _32 = _mm_cvttps_epi32(src.fVec); #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 const int _ = ~0; - return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_)); + return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_)); #else - // We're on our way to 8-bit anyway, so we don't care that _mm_packs_epi32 clamps to int16_t. - Sk4h _16 = _mm_packs_epi32(src.fVec, src.fVec); - return SkNx_cast<uint8_t>(_16); + auto _16 = _mm_packus_epi16(_32, _32); + return _mm_packus_epi16(_16, _16); #endif } -template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, int>(const Sk4i& src) { - auto _32 = src.fVec; - // Ideally we'd use _mm_packus_epi32 here. But that's SSE4.1+. + +template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) { #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 - // With SSSE3, we can just shuffle the low 2 bytes from each lane right into place. const int _ = ~0; - return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_,_,_)); + auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_)); #else - // With SSE2, we have to emulate _mm_packus_epi32 with _mm_packs_epi32: - _32 = _mm_sub_epi32(_32, _mm_set1_epi32((int)0x00008000)); - return _mm_add_epi16(_mm_packs_epi32(_32, _32), _mm_set1_epi16((short)0x8000)); + auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()), + _32 = _mm_unpacklo_epi16(_16, _mm_setzero_si128()); #endif + return _mm_cvtepi32_ps(_32); } -template<> /*static*/ inline Sk4f SkNx_cast<float, int>(const Sk4i& src) { - return _mm_cvtepi32_ps(src.fVec); -} template<> /*static*/ inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) { - return SkNx_cast<float>(SkNx_cast<int>(src)); -} -template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) { - return SkNx_cast<float>(SkNx_cast<int>(src)); -} - -template<> /*static*/ inline Sk4i SkNx_cast< int, float>(const Sk4f& src) { - return _mm_cvttps_epi32(src.fVec); -} -template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) { - return SkNx_cast<uint16_t>(SkNx_cast<int>(src)); -} -template<> /*static*/ inline Sk4b SkNx_cast< uint8_t, float>(const Sk4f& src) { - return SkNx_cast<uint8_t>(SkNx_cast<int>(src)); + auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128()); + return _mm_cvtepi32_ps(_32); } static inline void Sk4f_ToBytes(uint8_t bytes[16], const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) { - // We're on our way to 8-bit anyway, so we don't care that _mm_packs_epi32 clamps to int16_t. _mm_storeu_si128((__m128i*)bytes, - _mm_packus_epi16(_mm_packs_epi32(_mm_cvttps_epi32(a.fVec), - _mm_cvttps_epi32(b.fVec)), - _mm_packs_epi32(_mm_cvttps_epi32(c.fVec), - _mm_cvttps_epi32(d.fVec)))); + _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec), + _mm_cvttps_epi32(b.fVec)), + _mm_packus_epi16(_mm_cvttps_epi32(c.fVec), + _mm_cvttps_epi32(d.fVec)))); +} + +template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) { + return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()); +} + +template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) { + return _mm_packus_epi16(src.fVec, src.fVec); } #endif//SkNx_sse_DEFINED |