diff options
-rw-r--r-- | src/core/SkPx.h | 89 | ||||
-rw-r--r-- | src/opts/SkBlitMask_opts.h | 213 | ||||
-rw-r--r-- | src/opts/SkPx_neon.h | 214 | ||||
-rw-r--r-- | src/opts/SkPx_none.h | 106 | ||||
-rw-r--r-- | src/opts/SkPx_sse.h | 150 |
5 files changed, 173 insertions, 599 deletions
diff --git a/src/core/SkPx.h b/src/core/SkPx.h deleted file mode 100644 index 129fc07fbc..0000000000 --- a/src/core/SkPx.h +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2015 Google Inc. - * - * Use of this source code is governed by a BSD-style license that can be - * found in the LICENSE file. - */ - -#ifndef SkPx_DEFINED -#define SkPx_DEFINED - -#include "SkTypes.h" -#include "SkColorPriv.h" - -// We'll include one of src/opts/SkPx_{sse,neon,none}.h to define a type SkPx. -// -// SkPx represents up to SkPx::N 8888 pixels. It's agnostic to whether these -// are SkColors or SkPMColors; it only assumes that alpha is the high byte. -static_assert(SK_A32_SHIFT == 24, "For both SkColor and SkPMColor, alpha is always the high byte."); -// -// SkPx::Alpha represents up to SkPx::N 8-bit values, usually coverage or alpha. -// SkPx::Wide represents up to SkPx::N pixels with 16 bits per component. -// -// SkPx supports the following methods: -// static SkPx Dup(uint32_t); -// static SkPx Load(const uint32_t*); -// static SkPx Load(const uint32_t*, int n); // where 0<n<SkPx::N -// void store(uint32_t*) const; -// void store(uint32_t*, int n) const; // where 0<n<SkPx::N -// -// Alpha alpha() const; // argb -> a -// Wide widenLo() const; // argb -> 0a0r0g0b -// Wide widenHi() const; // argb -> a0r0g0b0 -// Wide widenLoHi() const; // argb -> aarrggbb -// -// SkPx operator+(const SkPx&) const; -// SkPx operator-(const SkPx&) const; -// SkPx saturatedAdd(const SkPx&) const; -// -// Wide operator*(const Alpha&) const; // argb * A -> (a*A)(r*A)(g*A)(b*A) -// -// // Fast approximate (px*a+127)/255. -// // Never off by more than 1, and always correct when px or a is 0 or 255. -// // We use the approximation (px*a+px)/256. -// SkPx approxMulDiv255(const Alpha&) const; -// -// SkPx addAlpha(const Alpha&) const; // argb + A -> (a+A)rgb -// -// SkPx::Alpha supports the following methods: -// static Alpha Dup(uint8_t); -// static Alpha Load(const uint8_t*); -// static Alpha Load(const uint8_t*, int n); // where 0<n<SkPx::N -// -// Alpha inv() const; // a -> 255-a -// -// SkPx::Wide supports the following methods: -// Wide operator+(const Wide&); -// Wide operator-(const Wide&); -// Wide operator<<(int bits); -// Wide operator>>(int bits); -// -// // Return the high byte of each component of (*this + o.widenLo()). -// SkPx addNarrowHi(const SkPx& o); -// -// Methods left unwritten, but certainly to come: -// SkPx SkPx::operator<(const SkPx&) const; -// SkPx SkPx::thenElse(const SkPx& then, const SkPx& else) const; -// Wide Wide::operator<(const Wide&) const; -// Wide Wide::thenElse(const Wide& then, const Wide& else) const; -// -// SkPx Wide::div255() const; // Rounds, think (*this + 127) / 255. -// -// The different implementations of SkPx have complete freedom to choose -// SkPx::N and how they represent SkPx, SkPx::Alpha, and SkPx::Wide. -// -// All observable math must remain identical. - -#if defined(SKNX_NO_SIMD) - #include "../opts/SkPx_none.h" -#else - #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 - #include "../opts/SkPx_sse.h" - #elif defined(SK_ARM_HAS_NEON) - #include "../opts/SkPx_neon.h" - #else - #include "../opts/SkPx_none.h" - #endif -#endif - -#endif//SkPx_DEFINED diff --git a/src/opts/SkBlitMask_opts.h b/src/opts/SkBlitMask_opts.h index f4d7e7973f..2f4fe6ffb8 100644 --- a/src/opts/SkBlitMask_opts.h +++ b/src/opts/SkBlitMask_opts.h @@ -9,62 +9,195 @@ #define SkBlitMask_opts_DEFINED #include "Sk4px.h" -#include "SkPx.h" namespace SK_OPTS_NS { -template <typename Fn> -static void blit_mask_d32_a8(const Fn& fn, SkPMColor* dst, size_t dstRB, - const SkAlpha* mask, size_t maskRB, - int w, int h) { - while (h --> 0) { - int n = w; - while (n >= SkPx::N) { - fn(SkPx::Load(dst), SkPx::Alpha::Load(mask)).store(dst); - dst += SkPx::N; mask += SkPx::N; n -= SkPx::N; +#if defined(SK_ARM_HAS_NEON) + // The Sk4px versions below will work fine with NEON, but we have had many indications + // that it doesn't perform as well as this NEON-specific code. TODO(mtklein): why? + #include "SkColor_opts_neon.h" + + template <bool isColor> + static void D32_A8_Opaque_Color_neon(void* SK_RESTRICT dst, size_t dstRB, + const void* SK_RESTRICT maskPtr, size_t maskRB, + SkColor color, int width, int height) { + SkPMColor pmc = SkPreMultiplyColor(color); + SkPMColor* SK_RESTRICT device = (SkPMColor*)dst; + const uint8_t* SK_RESTRICT mask = (const uint8_t*)maskPtr; + uint8x8x4_t vpmc; + + maskRB -= width; + dstRB -= (width << 2); + + if (width >= 8) { + vpmc.val[NEON_A] = vdup_n_u8(SkGetPackedA32(pmc)); + vpmc.val[NEON_R] = vdup_n_u8(SkGetPackedR32(pmc)); + vpmc.val[NEON_G] = vdup_n_u8(SkGetPackedG32(pmc)); + vpmc.val[NEON_B] = vdup_n_u8(SkGetPackedB32(pmc)); } - if (n > 0) { - fn(SkPx::Load(dst, n), SkPx::Alpha::Load(mask, n)).store(dst, n); - dst += n; mask += n; + do { + int w = width; + while (w >= 8) { + uint8x8_t vmask = vld1_u8(mask); + uint16x8_t vscale, vmask256 = SkAlpha255To256_neon8(vmask); + if (isColor) { + vscale = vsubw_u8(vdupq_n_u16(256), + SkAlphaMul_neon8(vpmc.val[NEON_A], vmask256)); + } else { + vscale = vsubw_u8(vdupq_n_u16(256), vmask); + } + uint8x8x4_t vdev = vld4_u8((uint8_t*)device); + + vdev.val[NEON_A] = SkAlphaMul_neon8(vpmc.val[NEON_A], vmask256) + + SkAlphaMul_neon8(vdev.val[NEON_A], vscale); + vdev.val[NEON_R] = SkAlphaMul_neon8(vpmc.val[NEON_R], vmask256) + + SkAlphaMul_neon8(vdev.val[NEON_R], vscale); + vdev.val[NEON_G] = SkAlphaMul_neon8(vpmc.val[NEON_G], vmask256) + + SkAlphaMul_neon8(vdev.val[NEON_G], vscale); + vdev.val[NEON_B] = SkAlphaMul_neon8(vpmc.val[NEON_B], vmask256) + + SkAlphaMul_neon8(vdev.val[NEON_B], vscale); + + vst4_u8((uint8_t*)device, vdev); + + mask += 8; + device += 8; + w -= 8; + } + + while (w--) { + unsigned aa = *mask++; + if (isColor) { + *device = SkBlendARGB32(pmc, *device, aa); + } else { + *device = SkAlphaMulQ(pmc, SkAlpha255To256(aa)) + + SkAlphaMulQ(*device, SkAlpha255To256(255 - aa)); + } + device += 1; + }; + + device = (uint32_t*)((char*)device + dstRB); + mask += maskRB; + + } while (--height != 0); + } + + static void blit_mask_d32_a8_general(SkPMColor* dst, size_t dstRB, + const SkAlpha* mask, size_t maskRB, + SkColor color, int w, int h) { + D32_A8_Opaque_Color_neon<true>(dst, dstRB, mask, maskRB, color, w, h); + } + + // As above, but made slightly simpler by requiring that color is opaque. + static void blit_mask_d32_a8_opaque(SkPMColor* dst, size_t dstRB, + const SkAlpha* mask, size_t maskRB, + SkColor color, int w, int h) { + D32_A8_Opaque_Color_neon<false>(dst, dstRB, mask, maskRB, color, w, h); + } + + // Same as _opaque, but assumes color == SK_ColorBLACK, a very common and even simpler case. + static void blit_mask_d32_a8_black(SkPMColor* dst, size_t dstRB, + const SkAlpha* maskPtr, size_t maskRB, + int width, int height) { + SkPMColor* SK_RESTRICT device = (SkPMColor*)dst; + const uint8_t* SK_RESTRICT mask = (const uint8_t*)maskPtr; + + maskRB -= width; + dstRB -= (width << 2); + do { + int w = width; + while (w >= 8) { + uint8x8_t vmask = vld1_u8(mask); + uint16x8_t vscale = vsubw_u8(vdupq_n_u16(256), vmask); + uint8x8x4_t vdevice = vld4_u8((uint8_t*)device); + + vdevice = SkAlphaMulQ_neon8(vdevice, vscale); + vdevice.val[NEON_A] += vmask; + + vst4_u8((uint8_t*)device, vdevice); + + mask += 8; + device += 8; + w -= 8; + } + while (w-- > 0) { + unsigned aa = *mask++; + *device = (aa << SK_A32_SHIFT) + + SkAlphaMulQ(*device, SkAlpha255To256(255 - aa)); + device += 1; + }; + device = (uint32_t*)((char*)device + dstRB); + mask += maskRB; + } while (--height != 0); + } + +#else + static void blit_mask_d32_a8_general(SkPMColor* dst, size_t dstRB, + const SkAlpha* mask, size_t maskRB, + SkColor color, int w, int h) { + auto s = Sk4px::DupPMColor(SkPreMultiplyColor(color)); + auto fn = [&](const Sk4px& d, const Sk4px& aa) { + // = (s + d(1-sa))aa + d(1-aa) + // = s*aa + d(1-sa*aa) + auto left = s.approxMulDiv255(aa), + right = d.approxMulDiv255(left.alphas().inv()); + return left + right; // This does not overflow (exhaustively checked). + }; + while (h --> 0) { + Sk4px::MapDstAlpha(w, dst, mask, fn); + dst += dstRB / sizeof(*dst); + mask += maskRB / sizeof(*mask); } - dst += dstRB / sizeof(*dst) - w; - mask += maskRB / sizeof(*mask) - w; } -} -static void blit_mask_d32_a8(SkPMColor* dst, size_t dstRB, - const SkAlpha* mask, size_t maskRB, - SkColor color, int w, int h) { - auto s = SkPx::Dup(SkPreMultiplyColor(color)); + // As above, but made slightly simpler by requiring that color is opaque. + static void blit_mask_d32_a8_opaque(SkPMColor* dst, size_t dstRB, + const SkAlpha* mask, size_t maskRB, + SkColor color, int w, int h) { + SkASSERT(SkColorGetA(color) == 0xFF); + auto s = Sk4px::DupPMColor(SkPreMultiplyColor(color)); + auto fn = [&](const Sk4px& d, const Sk4px& aa) { + // = (s + d(1-sa))aa + d(1-aa) + // = s*aa + d(1-sa*aa) + // ~~~> + // = s*aa + d(1-aa) + return s.approxMulDiv255(aa) + d.approxMulDiv255(aa.inv()); + }; + while (h --> 0) { + Sk4px::MapDstAlpha(w, dst, mask, fn); + dst += dstRB / sizeof(*dst); + mask += maskRB / sizeof(*mask); + } + } - if (color == SK_ColorBLACK) { - auto fn = [](const SkPx& d, const SkPx::Alpha& aa) { + // Same as _opaque, but assumes color == SK_ColorBLACK, a very common and even simpler case. + static void blit_mask_d32_a8_black(SkPMColor* dst, size_t dstRB, + const SkAlpha* mask, size_t maskRB, + int w, int h) { + auto fn = [](const Sk4px& d, const Sk4px& aa) { // = (s + d(1-sa))aa + d(1-aa) // = s*aa + d(1-sa*aa) // ~~~> // a = 1*aa + d(1-1*aa) = aa + d(1-aa) // c = 0*aa + d(1-1*aa) = d(1-aa) - return d.approxMulDiv255(aa.inv()).addAlpha(aa); + return aa.zeroColors() + d.approxMulDiv255(aa.inv()); }; - blit_mask_d32_a8(fn, dst, dstRB, mask, maskRB, w, h); + while (h --> 0) { + Sk4px::MapDstAlpha(w, dst, mask, fn); + dst += dstRB / sizeof(*dst); + mask += maskRB / sizeof(*mask); + } + } +#endif + +static void blit_mask_d32_a8(SkPMColor* dst, size_t dstRB, + const SkAlpha* mask, size_t maskRB, + SkColor color, int w, int h) { + if (color == SK_ColorBLACK) { + blit_mask_d32_a8_black(dst, dstRB, mask, maskRB, w, h); } else if (SkColorGetA(color) == 0xFF) { - auto fn = [&](const SkPx& d, const SkPx::Alpha& aa) { - // = (s + d(1-sa))aa + d(1-aa) - // = s*aa + d(1-sa*aa) - // ~~~> - // = s*aa + d(1-aa) - return s.approxMulDiv255(aa) + d.approxMulDiv255(aa.inv()); - }; - blit_mask_d32_a8(fn, dst, dstRB, mask, maskRB, w, h); + blit_mask_d32_a8_opaque(dst, dstRB, mask, maskRB, color, w, h); } else { - auto fn = [&](const SkPx& d, const SkPx::Alpha& aa) { - // = (s + d(1-sa))aa + d(1-aa) - // = s*aa + d(1-sa*aa) - auto left = s.approxMulDiv255(aa), - right = d.approxMulDiv255(left.alpha().inv()); - return left + right; // This does not overflow (exhaustively checked). - }; - blit_mask_d32_a8(fn, dst, dstRB, mask, maskRB, w, h); + blit_mask_d32_a8_general(dst, dstRB, mask, maskRB, color, w, h); } } diff --git a/src/opts/SkPx_neon.h b/src/opts/SkPx_neon.h deleted file mode 100644 index 8daa5297f1..0000000000 --- a/src/opts/SkPx_neon.h +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright 2015 Google Inc. - * - * Use of this source code is governed by a BSD-style license that can be - * found in the LICENSE file. - */ - -#ifndef SkPx_neon_DEFINED -#define SkPx_neon_DEFINED - -// When we have NEON, we like to work 8 pixels at a time. -// This lets us exploit vld4/vst4 and represent SkPx as planar uint8x8x4_t, -// Wide as planar uint16x8x4_t, and Alpha as a single uint8x8_t plane. - -struct SkPx_neon { - static const int N = 8; - - uint8x8x4_t fVec; - SkPx_neon(uint8x8x4_t vec) : fVec(vec) {} - - static SkPx_neon Dup(uint32_t px) { return vld4_dup_u8((const uint8_t*)&px); } - static SkPx_neon Load(const uint32_t* px) { return vld4_u8((const uint8_t*)px); } - static SkPx_neon Load(const uint32_t* px, int n) { - SkASSERT(0 < n && n < 8); - uint8x8x4_t v = vld4_dup_u8((const uint8_t*)px); // n>=1, so start all lanes with pixel 0. - switch (n) { - case 7: v = vld4_lane_u8((const uint8_t*)(px+6), v, 6); // fall through - case 6: v = vld4_lane_u8((const uint8_t*)(px+5), v, 5); // fall through - case 5: v = vld4_lane_u8((const uint8_t*)(px+4), v, 4); // fall through - case 4: v = vld4_lane_u8((const uint8_t*)(px+3), v, 3); // fall through - case 3: v = vld4_lane_u8((const uint8_t*)(px+2), v, 2); // fall through - case 2: v = vld4_lane_u8((const uint8_t*)(px+1), v, 1); - } - return v; - } - - void store(uint32_t* px) const { vst4_u8((uint8_t*)px, fVec); } - void store(uint32_t* px, int n) const { - SkASSERT(0 < n && n < 8); - switch (n) { - case 7: vst4_lane_u8((uint8_t*)(px+6), fVec, 6); - case 6: vst4_lane_u8((uint8_t*)(px+5), fVec, 5); - case 5: vst4_lane_u8((uint8_t*)(px+4), fVec, 4); - case 4: vst4_lane_u8((uint8_t*)(px+3), fVec, 3); - case 3: vst4_lane_u8((uint8_t*)(px+2), fVec, 2); - case 2: vst4_lane_u8((uint8_t*)(px+1), fVec, 1); - case 1: vst4_lane_u8((uint8_t*)(px+0), fVec, 0); - } - } - - struct Alpha { - uint8x8_t fA; - Alpha(uint8x8_t a) : fA(a) {} - - static Alpha Dup(uint8_t a) { return vdup_n_u8(a); } - static Alpha Load(const uint8_t* a) { return vld1_u8(a); } - static Alpha Load(const uint8_t* a, int n) { - SkASSERT(0 < n && n < 8); - uint8x8_t v = vld1_dup_u8(a); // n>=1, so start all lanes with alpha 0. - switch (n) { - case 7: v = vld1_lane_u8(a+6, v, 6); // fall through - case 6: v = vld1_lane_u8(a+5, v, 5); // fall through - case 5: v = vld1_lane_u8(a+4, v, 4); // fall through - case 4: v = vld1_lane_u8(a+3, v, 3); // fall through - case 3: v = vld1_lane_u8(a+2, v, 2); // fall through - case 2: v = vld1_lane_u8(a+1, v, 1); - } - return v; - } - Alpha inv() const { return vsub_u8(vdup_n_u8(255), fA); } - }; - - struct Wide { - uint16x8x4_t fVec; - Wide(uint16x8x4_t vec) : fVec(vec) {} - - Wide operator+(const Wide& o) const { - return (uint16x8x4_t) {{ - vaddq_u16(fVec.val[0], o.fVec.val[0]), - vaddq_u16(fVec.val[1], o.fVec.val[1]), - vaddq_u16(fVec.val[2], o.fVec.val[2]), - vaddq_u16(fVec.val[3], o.fVec.val[3]), - }}; - } - Wide operator-(const Wide& o) const { - return (uint16x8x4_t) {{ - vsubq_u16(fVec.val[0], o.fVec.val[0]), - vsubq_u16(fVec.val[1], o.fVec.val[1]), - vsubq_u16(fVec.val[2], o.fVec.val[2]), - vsubq_u16(fVec.val[3], o.fVec.val[3]), - }}; - } - Wide operator<<(int bits) const { - #if defined(SK_DEBUG) - return (uint16x8x4_t) {{ - shift_slow(fVec.val[0], -bits), - shift_slow(fVec.val[1], -bits), - shift_slow(fVec.val[2], -bits), - shift_slow(fVec.val[3], -bits), - }}; - #else - return (uint16x8x4_t) {{ - vshlq_n_u16(fVec.val[0], bits), - vshlq_n_u16(fVec.val[1], bits), - vshlq_n_u16(fVec.val[2], bits), - vshlq_n_u16(fVec.val[3], bits), - }}; - #endif - } - Wide operator>>(int bits) const { - #if defined(SK_DEBUG) - return (uint16x8x4_t) {{ - shift_slow(fVec.val[0], bits), - shift_slow(fVec.val[1], bits), - shift_slow(fVec.val[2], bits), - shift_slow(fVec.val[3], bits), - }}; - #else - return (uint16x8x4_t) {{ - vshrq_n_u16(fVec.val[0], bits), - vshrq_n_u16(fVec.val[1], bits), - vshrq_n_u16(fVec.val[2], bits), - vshrq_n_u16(fVec.val[3], bits), - }}; - #endif - } - - // v >> bits, for bits in [-15, 16]. - static uint16x8_t shift_slow(uint16x8_t v, int bits) { - SkASSERT(bits >= -16 && bits <= 16); - switch (bits) { - #define L(n) case -n: return vshlq_n_u16(v, n); - #define R(n) case n: return vshrq_n_u16(v, n); - L(15) L(14) L(13) L(10) L(9) L(8) L(7) L(6) L(5) L(4) L(3) L(2) L(1) - R(16) R(15) R(14) R(13) R(10) R(9) R(8) R(7) R(6) R(5) R(4) R(3) R(2) R(1) - #undef L - #undef R - } - return v; - } - - SkPx_neon addNarrowHi(const SkPx_neon& o) const { - return (uint8x8x4_t) {{ - vshrn_n_u16(vaddw_u8(fVec.val[0], o.fVec.val[0]), 8), - vshrn_n_u16(vaddw_u8(fVec.val[1], o.fVec.val[1]), 8), - vshrn_n_u16(vaddw_u8(fVec.val[2], o.fVec.val[2]), 8), - vshrn_n_u16(vaddw_u8(fVec.val[3], o.fVec.val[3]), 8), - }}; - } - }; - - Alpha alpha() const { return fVec.val[3]; } - - Wide widenLo() const { - return (uint16x8x4_t) {{ - vmovl_u8(fVec.val[0]), - vmovl_u8(fVec.val[1]), - vmovl_u8(fVec.val[2]), - vmovl_u8(fVec.val[3]), - }}; - } - // TODO: these two can probably be done faster. - Wide widenHi() const { return this->widenLo() << 8; } - Wide widenLoHi() const { return this->widenLo() + this->widenHi(); } - - SkPx_neon operator+(const SkPx_neon& o) const { - return (uint8x8x4_t) {{ - vadd_u8(fVec.val[0], o.fVec.val[0]), - vadd_u8(fVec.val[1], o.fVec.val[1]), - vadd_u8(fVec.val[2], o.fVec.val[2]), - vadd_u8(fVec.val[3], o.fVec.val[3]), - }}; - } - SkPx_neon operator-(const SkPx_neon& o) const { - return (uint8x8x4_t) {{ - vsub_u8(fVec.val[0], o.fVec.val[0]), - vsub_u8(fVec.val[1], o.fVec.val[1]), - vsub_u8(fVec.val[2], o.fVec.val[2]), - vsub_u8(fVec.val[3], o.fVec.val[3]), - }}; - } - SkPx_neon saturatedAdd(const SkPx_neon& o) const { - return (uint8x8x4_t) {{ - vqadd_u8(fVec.val[0], o.fVec.val[0]), - vqadd_u8(fVec.val[1], o.fVec.val[1]), - vqadd_u8(fVec.val[2], o.fVec.val[2]), - vqadd_u8(fVec.val[3], o.fVec.val[3]), - }}; - } - - Wide operator*(const Alpha& a) const { - return (uint16x8x4_t) {{ - vmull_u8(fVec.val[0], a.fA), - vmull_u8(fVec.val[1], a.fA), - vmull_u8(fVec.val[2], a.fA), - vmull_u8(fVec.val[3], a.fA), - }}; - } - SkPx_neon approxMulDiv255(const Alpha& a) const { - return (*this * a).addNarrowHi(*this); - } - - SkPx_neon addAlpha(const Alpha& a) const { - return (uint8x8x4_t) {{ - fVec.val[0], - fVec.val[1], - fVec.val[2], - vadd_u8(fVec.val[3], a.fA), - }}; - } -}; -typedef SkPx_neon SkPx; - -#endif//SkPx_neon_DEFINED diff --git a/src/opts/SkPx_none.h b/src/opts/SkPx_none.h deleted file mode 100644 index 3825f03dde..0000000000 --- a/src/opts/SkPx_none.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright 2015 Google Inc. - * - * Use of this source code is governed by a BSD-style license that can be - * found in the LICENSE file. - */ - -#ifndef SkPx_none_DEFINED -#define SkPx_none_DEFINED - -// Nothing fancy here. We're the backup _none case after all. -// Our declared sweet spot is simply a single pixel at a time. - -struct SkPx_none { - static const int N = 1; - uint8_t f8[4]; - - SkPx_none(uint32_t px) { memcpy(f8, &px, 4); } - SkPx_none(uint8_t x, uint8_t y, uint8_t z, uint8_t a) { - f8[0] = x; f8[1] = y; f8[2] = z; f8[3] = a; - } - - static SkPx_none Dup(uint32_t px) { return px; } - static SkPx_none Load(const uint32_t* px) { return *px; } - static SkPx_none Load(const uint32_t* px, int n) { - SkASSERT(false); // There are no 0<n<1. - return 0; - } - - void store(uint32_t* px) const { memcpy(px, f8, 4); } - void store(uint32_t* px, int n) const { - SkASSERT(false); // There are no 0<n<1. - } - - struct Alpha { - uint8_t fA; - Alpha(uint8_t a) : fA(a) {} - - static Alpha Dup(uint8_t a) { return a; } - static Alpha Load(const uint8_t* a) { return *a; } - static Alpha Load(const uint8_t* a, int n) { - SkASSERT(false); // There are no 0<n<1. - return 0; - } - Alpha inv() const { return 255 - fA; } - }; - - struct Wide { - uint16_t f16[4]; - - Wide(uint16_t x, uint16_t y, uint16_t z, uint16_t a) { - f16[0] = x; f16[1] = y; f16[2] = z; f16[3] = a; - } - - Wide operator+(const Wide& o) const { - return Wide(f16[0]+o.f16[0], f16[1]+o.f16[1], f16[2]+o.f16[2], f16[3]+o.f16[3]); - } - Wide operator-(const Wide& o) const { - return Wide(f16[0]-o.f16[0], f16[1]-o.f16[1], f16[2]-o.f16[2], f16[3]-o.f16[3]); - } - Wide operator<<(int bits) const { - return Wide(f16[0]<<bits, f16[1]<<bits, f16[2]<<bits, f16[3]<<bits); - } - Wide operator>>(int bits) const { - return Wide(f16[0]>>bits, f16[1]>>bits, f16[2]>>bits, f16[3]>>bits); - } - - SkPx_none addNarrowHi(const SkPx_none& o) const { - Wide sum = (*this + o.widenLo()) >> 8; - return SkPx_none(sum.f16[0], sum.f16[1], sum.f16[2], sum.f16[3]); - } - }; - - Alpha alpha() const { return f8[3]; } - - Wide widenLo() const { return Wide(f8[0], f8[1], f8[2], f8[3]); } - Wide widenHi() const { return this->widenLo() << 8; } - Wide widenLoHi() const { return this->widenLo() + this->widenHi(); } - - SkPx_none operator+(const SkPx_none& o) const { - return SkPx_none(f8[0]+o.f8[0], f8[1]+o.f8[1], f8[2]+o.f8[2], f8[3]+o.f8[3]); - } - SkPx_none operator-(const SkPx_none& o) const { - return SkPx_none(f8[0]-o.f8[0], f8[1]-o.f8[1], f8[2]-o.f8[2], f8[3]-o.f8[3]); - } - SkPx_none saturatedAdd(const SkPx_none& o) const { - return SkPx_none(SkTMax(0, SkTMin(255, f8[0]+o.f8[0])), - SkTMax(0, SkTMin(255, f8[1]+o.f8[1])), - SkTMax(0, SkTMin(255, f8[2]+o.f8[2])), - SkTMax(0, SkTMin(255, f8[3]+o.f8[3]))); - } - - Wide operator*(const Alpha& a) const { - return Wide(f8[0]*a.fA, f8[1]*a.fA, f8[2]*a.fA, f8[3]*a.fA); - } - SkPx_none approxMulDiv255(const Alpha& a) const { - return (*this * a).addNarrowHi(*this); - } - - SkPx_none addAlpha(const Alpha& a) const { - return SkPx_none(f8[0], f8[1], f8[2], f8[3]+a.fA); - } -}; -typedef SkPx_none SkPx; - -#endif//SkPx_none_DEFINED diff --git a/src/opts/SkPx_sse.h b/src/opts/SkPx_sse.h deleted file mode 100644 index b82d4e5f93..0000000000 --- a/src/opts/SkPx_sse.h +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Copyright 2015 Google Inc. - * - * Use of this source code is governed by a BSD-style license that can be - * found in the LICENSE file. - */ - -#ifndef SkPx_sse_DEFINED -#define SkPx_sse_DEFINED - -// SkPx_sse's sweet spot is to work with 4 pixels at a time, -// stored interlaced, just as they sit in memory: rgba rgba rgba rgba. - -// SkPx_sse's best way to work with alphas is similar, -// replicating the 4 alphas 4 times each across the pixel: aaaa aaaa aaaa aaaa. - -// When working with fewer than 4 pixels, we load the pixels in the low lanes, -// usually filling the top lanes with zeros (but who cares, might be junk). - -struct SkPx_sse { - static const int N = 4; - - __m128i fVec; - SkPx_sse(__m128i vec) : fVec(vec) {} - - static SkPx_sse Dup(uint32_t px) { return _mm_set1_epi32(px); } - static SkPx_sse Load(const uint32_t* px) { return _mm_loadu_si128((const __m128i*)px); } - static SkPx_sse Load(const uint32_t* px, int n) { - SkASSERT(n > 0 && n < 4); - switch (n) { - case 1: return _mm_cvtsi32_si128(px[0]); - case 2: return _mm_loadl_epi64((const __m128i*)px); - case 3: return _mm_or_si128(_mm_loadl_epi64((const __m128i*)px), - _mm_slli_si128(_mm_cvtsi32_si128(px[2]), 8)); - } - return _mm_setzero_si128(); // Not actually reachable. - } - - void store(uint32_t* px) const { _mm_storeu_si128((__m128i*)px, fVec); } - void store(uint32_t* px, int n) const { - SkASSERT(n > 0 && n < 4); - __m128i v = fVec; - if (n & 1) { - *px++ = _mm_cvtsi128_si32(v); - v = _mm_srli_si128(v, 4); - } - if (n & 2) { - _mm_storel_epi64((__m128i*)px, v); - } - } - - struct Alpha { - __m128i fVec; - Alpha(__m128i vec) : fVec(vec) {} - - static Alpha Dup(uint8_t a) { return _mm_set1_epi8(a); } - static Alpha Load(const uint8_t* a) { - __m128i as = _mm_cvtsi32_si128(*(const uint32_t*)a); // ____ ____ ____ 3210 - #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 - return _mm_shuffle_epi8(as, _mm_set_epi8(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0)); - #else - as = _mm_unpacklo_epi8 (as, _mm_setzero_si128()); // ____ ____ _3_2 _1_0 - as = _mm_unpacklo_epi16(as, _mm_setzero_si128()); // ___3 ___2 ___1 ___0 - as = _mm_or_si128(as, _mm_slli_si128(as, 1)); // __33 __22 __11 __00 - return _mm_or_si128(as, _mm_slli_si128(as, 2)); // 3333 2222 1111 0000 - #endif - } - static Alpha Load(const uint8_t* a, int n) { - SkASSERT(n > 0 && n < 4); - uint8_t a4[] = { 0,0,0,0 }; - switch (n) { - case 3: a4[2] = a[2]; // fall through - case 2: a4[1] = a[1]; // fall through - case 1: a4[0] = a[0]; - } - return Load(a4); - } - - Alpha inv() const { return _mm_sub_epi8(_mm_set1_epi8(~0), fVec); } - }; - - struct Wide { - __m128i fLo, fHi; - Wide(__m128i lo, __m128i hi) : fLo(lo), fHi(hi) {} - - Wide operator+(const Wide& o) const { - return Wide(_mm_add_epi16(fLo, o.fLo), _mm_add_epi16(fHi, o.fHi)); - } - Wide operator-(const Wide& o) const { - return Wide(_mm_sub_epi16(fLo, o.fLo), _mm_sub_epi16(fHi, o.fHi)); - } - Wide operator<<(int bits) const { - return Wide(_mm_slli_epi16(fLo, bits), _mm_slli_epi16(fHi, bits)); - } - Wide operator>>(int bits) const { - return Wide(_mm_srli_epi16(fLo, bits), _mm_srli_epi16(fHi, bits)); - } - - SkPx_sse addNarrowHi(const SkPx_sse& o) const { - Wide sum = (*this + o.widenLo()) >> 8; - return _mm_packus_epi16(sum.fLo, sum.fHi); - } - }; - - Alpha alpha() const { - #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3 - return _mm_shuffle_epi8(fVec, _mm_set_epi8(15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3)); - #else - __m128i as = _mm_srli_epi32(fVec, 24); // ___3 ___2 ___1 ___0 - as = _mm_or_si128(as, _mm_slli_si128(as, 1)); // __33 __22 __11 __00 - return _mm_or_si128(as, _mm_slli_si128(as, 2)); // 3333 2222 1111 0000 - #endif - } - - Wide widenLo() const { - return Wide(_mm_unpacklo_epi8(fVec, _mm_setzero_si128()), - _mm_unpackhi_epi8(fVec, _mm_setzero_si128())); - } - Wide widenHi() const { - return Wide(_mm_unpacklo_epi8(_mm_setzero_si128(), fVec), - _mm_unpackhi_epi8(_mm_setzero_si128(), fVec)); - } - Wide widenLoHi() const { - return Wide(_mm_unpacklo_epi8(fVec, fVec), - _mm_unpackhi_epi8(fVec, fVec)); - } - - SkPx_sse operator+(const SkPx_sse& o) const { return _mm_add_epi8(fVec, o.fVec); } - SkPx_sse operator-(const SkPx_sse& o) const { return _mm_sub_epi8(fVec, o.fVec); } - SkPx_sse saturatedAdd(const SkPx_sse& o) const { return _mm_adds_epi8(fVec, o.fVec); } - - Wide operator*(const Alpha& a) const { - __m128i pLo = _mm_unpacklo_epi8( fVec, _mm_setzero_si128()), - aLo = _mm_unpacklo_epi8(a.fVec, _mm_setzero_si128()), - pHi = _mm_unpackhi_epi8( fVec, _mm_setzero_si128()), - aHi = _mm_unpackhi_epi8(a.fVec, _mm_setzero_si128()); - return Wide(_mm_mullo_epi16(pLo, aLo), _mm_mullo_epi16(pHi, aHi)); - } - SkPx_sse approxMulDiv255(const Alpha& a) const { - return (*this * a).addNarrowHi(*this); - } - - SkPx_sse addAlpha(const Alpha& a) const { - return _mm_add_epi8(fVec, _mm_and_si128(a.fVec, _mm_set1_epi32(0xFF000000))); - } -}; - -typedef SkPx_sse SkPx; - -#endif//SkPx_sse_DEFINED |