aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/opts/Sk4px_SSE2.h
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2016-01-08 11:45:21 -0800
committerGravatar Commit bot <commit-bot@chromium.org>2016-01-08 11:45:21 -0800
commitdefa0daa6a0f4e97a3527a522ae602c6771a7c80 (patch)
tree1d47593806af59cc46dd58a40e99e3b27e2c55ad /src/opts/Sk4px_SSE2.h
parent1a1efeacf7cc94a8c2977114dfe230fed3efc105 (diff)
Clean up SkXfermode_opts.h
It seems that MSVC + __vectorcall don't play well together, so back ourselves out into a situation where we don't need it. - Inline transfermode functions. This removes the need for SK_VECTORCALL. - Remove 565 destination specializations. Blending into 565 is not speed-critical enough to merit the code bloat. - Removing 565 specializations means a bunch of Sk4px code is now dead. 8888 xfermodes generally speed up a bit from inlining, smoothly ranging from no change down to 0.65x for the fastest functions like Plus or Modulate. 565 xfermodes generally slow down because we're doing 565 -> 8888 and 8888->565 conversion serially[1] and using the stack, smoothly ranging from no change up to 2x slower for the fastest functions like Plus and Modulate. [1] the 565->8888 conversion is actually being autovectorized BUG=skia:4765,skia:4776 GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1565223002 CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot No public API changes. TBR=reed@google.com Review URL: https://codereview.chromium.org/1565223002
Diffstat (limited to 'src/opts/Sk4px_SSE2.h')
-rw-r--r--src/opts/Sk4px_SSE2.h75
1 files changed, 0 insertions, 75 deletions
diff --git a/src/opts/Sk4px_SSE2.h b/src/opts/Sk4px_SSE2.h
index 96f21db399..dc0c8ace70 100644
--- a/src/opts/Sk4px_SSE2.h
+++ b/src/opts/Sk4px_SSE2.h
@@ -101,79 +101,4 @@ inline Sk4px Sk4px::zeroAlphas() const {
return Sk16b(_mm_andnot_si128(_mm_set1_epi32(0xFF << SK_A32_SHIFT), this->fVec));
}
-static inline __m128i widen_low_half_to_8888(__m128i v) {
- // RGB565 format: |R....|G.....|B....|
- // Bit: 16 11 5 0
-
- // First get each pixel into its own 32-bit lane.
- // v == ____ ____ ____ ____ rgb3 rgb2 rgb1 rgb0
- // spread == 0000 rgb3 0000 rgb2 0000 rgb1 0000 rgb0
- auto spread = _mm_unpacklo_epi16(v, _mm_setzero_si128());
-
- // Get each color independently, still in 565 precison but down at bit 0.
- auto r5 = _mm_srli_epi32(spread, 11),
- g6 = _mm_and_si128(_mm_set1_epi32(63), _mm_srli_epi32(spread, 5)),
- b5 = _mm_and_si128(_mm_set1_epi32(31), spread);
-
- // Scale 565 precision up to 8-bit each, filling low 323 bits with high bits of each component.
- auto r8 = _mm_or_si128(_mm_slli_epi32(r5, 3), _mm_srli_epi32(r5, 2)),
- g8 = _mm_or_si128(_mm_slli_epi32(g6, 2), _mm_srli_epi32(g6, 4)),
- b8 = _mm_or_si128(_mm_slli_epi32(b5, 3), _mm_srli_epi32(b5, 2));
-
- // Now put all the 8-bit components into SkPMColor order.
- return _mm_or_si128(_mm_slli_epi32(r8, SK_R32_SHIFT), // TODO: one of these shifts is zero...
- _mm_or_si128(_mm_slli_epi32(g8, SK_G32_SHIFT),
- _mm_or_si128(_mm_slli_epi32(b8, SK_B32_SHIFT),
- _mm_set1_epi32(0xFF << SK_A32_SHIFT))));
-}
-
-static inline __m128i narrow_to_565(__m128i w) {
- // Extract out top RGB 565 bits of each pixel, with no rounding.
- auto r5 = _mm_and_si128(_mm_set1_epi32(31), _mm_srli_epi32(w, SK_R32_SHIFT + 3)),
- g6 = _mm_and_si128(_mm_set1_epi32(63), _mm_srli_epi32(w, SK_G32_SHIFT + 2)),
- b5 = _mm_and_si128(_mm_set1_epi32(31), _mm_srli_epi32(w, SK_B32_SHIFT + 3));
-
- // Now put the bits in place in the low 16-bits of each 32-bit lane.
- auto spread = _mm_or_si128(_mm_slli_epi32(r5, 11),
- _mm_or_si128(_mm_slli_epi32(g6, 5),
- b5));
-
- // We want to pack the bottom 16-bits of spread down into the low half of the register, v.
- // spread == 0000 rgb3 0000 rgb2 0000 rgb1 0000 rgb0
- // v == ____ ____ ____ ____ rgb3 rgb2 rgb1 rgb0
-
- // Ideally now we'd use _mm_packus_epi32(spread, <anything>) to pack v. But that's from SSE4.
- // With only SSE2, we need to use _mm_packs_epi32. That does signed saturation, and
- // we need to preserve all 16 bits. So we pretend our data is signed by sign-extending first.
- // TODO: is it faster to just _mm_shuffle_epi8 this when we have SSSE3?
- auto signExtended = _mm_srai_epi32(_mm_slli_epi32(spread, 16), 16);
- auto v = _mm_packs_epi32(signExtended, signExtended);
- return v;
-}
-
-inline Sk4px Sk4px::Load4(const SkPMColor16 src[4]) {
- return Sk16b(widen_low_half_to_8888(_mm_loadl_epi64((const __m128i*)src)));
-}
-inline Sk4px Sk4px::Load2(const SkPMColor16 src[2]) {
- auto src2 = ((uint32_t)src[0] )
- | ((uint32_t)src[1] << 16);
- return Sk16b(widen_low_half_to_8888(_mm_cvtsi32_si128(src2)));
-}
-inline Sk4px Sk4px::Load1(const SkPMColor16 src[1]) {
- return Sk16b(widen_low_half_to_8888(_mm_insert_epi16(_mm_setzero_si128(), src[0], 0)));
-}
-
-inline void Sk4px::store4(SkPMColor16 dst[4]) const {
- _mm_storel_epi64((__m128i*)dst, narrow_to_565(this->fVec));
-}
-inline void Sk4px::store2(SkPMColor16 dst[2]) const {
- uint32_t dst2 = _mm_cvtsi128_si32(narrow_to_565(this->fVec));
- dst[0] = dst2;
- dst[1] = dst2 >> 16;
-}
-inline void Sk4px::store1(SkPMColor16 dst[1]) const {
- uint32_t dst2 = _mm_cvtsi128_si32(narrow_to_565(this->fVec));
- dst[0] = dst2;
-}
-
} // namespace