aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2015-09-14 12:43:20 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2015-09-14 12:43:20 -0700
commit82c93b45ed6ac0b628adb8375389c202d1f586f9 (patch)
treebdb517b2f2a05fe22dda1f84f5f5eafda87efa87 /src
parentb5b603241aaa99e07dc4e12ca9f2661aa85e5f74 (diff)
SkPx: new approach to fixed-point SIMD
SkPx is like Sk4px, except each platform implementation of SkPx can declare a different sweet spot of N pixels, with extra loads and stores to handle the ragged edge of 0<n<N pixels. In this case, _sse's sweet spot remains 4 pixels. _neon jumps up to 8 so we can now use NEON's transposing loads and stores, and _none is just 1. This makes operations involving alpha considerably more efficient on NEON, as alpha is its own distinct 8x8 bit plane that's easy to toss around. This incorporates a few other improvements I've been wanting: - no requirement that we're dealing with SkPMColor. SkColor works too. - no anonymous namespace hack to differentiate implementations. Codegen and perf look good on Clang/x86-64 and GCC/ARMv7. The NEON code looks very similar to the old NEON code, as intended. No .skp or GM diffs on my laptop. Don't expect any. I intend this to replace Sk4px. Plan after landing: - port SkXfermode_opts.h - port Color32 in SkBlitRow_D32.cpp (and move to SkBlitRow_opts.h like other SkOpts code) - delete all Sk4px-related code - clean up evolutionary dead ends in SkNx (Sk16b, Sk16h, Sk4i, Sk4d, etc.) leaving Sk2f, Sk4f (and Sk2s, Sk4s). - find a machine with AVX2 to work on, write SkPx_avx2.h handling 8 pixels at a time. In the end we'll have Sk4f for float pixels, SkPx for fixed-point pixels. BUG=skia:4117 Review URL: https://codereview.chromium.org/1317233005
Diffstat (limited to 'src')
-rw-r--r--src/core/SkPx.h89
-rw-r--r--src/opts/SkBlitMask_opts.h213
-rw-r--r--src/opts/SkPx_neon.h182
-rw-r--r--src/opts/SkPx_none.h106
-rw-r--r--src/opts/SkPx_sse.h150
5 files changed, 567 insertions, 173 deletions
diff --git a/src/core/SkPx.h b/src/core/SkPx.h
new file mode 100644
index 0000000000..a4b1978e63
--- /dev/null
+++ b/src/core/SkPx.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPx_DEFINED
+#define SkPx_DEFINED
+
+#include "SkTypes.h"
+#include "SkColorPriv.h"
+
+// We'll include one of src/opts/SkPx_{sse,neon,none}.h to define a type SkPx.
+//
+// SkPx represents up to SkPx::N 8888 pixels. It's agnostic to whether these
+// are SkColors or SkPMColors; it only assumes that alpha is the high byte.
+static_assert(SK_A32_SHIFT == 24, "For both SkColor and SkPMColor, alpha is always the high byte.");
+//
+// SkPx::Alpha represents up to SkPx::N 8-bit values, usually coverage or alpha.
+// SkPx::Wide represents up to SkPx::N pixels with 16 bits per component.
+//
+// SkPx supports the following methods:
+// static SkPx Dup(uint32_t);
+// static SkPx LoadN(const uint32_t*);
+// static SkPx Load(int n, const uint32_t*); // where 0<n<SkPx::N
+// void storeN(uint32_t*) const;
+// void store(int n, uint32_t*) const; // where 0<n<SkPx::N
+//
+// Alpha alpha() const; // argb -> a
+// Wide widenLo() const; // argb -> 0a0r0g0b
+// Wide widenHi() const; // argb -> a0r0g0b0
+// Wide widenLoHi() const; // argb -> aarrggbb
+//
+// SkPx operator+(const SkPx&) const;
+// SkPx operator-(const SkPx&) const;
+// SkPx saturatedAdd(const SkPx&) const;
+//
+// Wide operator*(const Alpha&) const; // argb * A -> (a*A)(r*A)(g*A)(b*A)
+//
+// // Fast approximate (px*a+127)/255.
+// // Never off by more than 1, and always correct when px or a is 0 or 255.
+// // We use the approximation (px*a+px)/256.
+// SkPx approxMulDiv255(const Alpha&) const;
+//
+// SkPx addAlpha(const Alpha&) const; // argb + A -> (a+A)rgb
+//
+// SkPx::Alpha supports the following methods:
+// static Alpha Dup(uint8_t);
+// static Alpha LoadN(const uint8_t*);
+// static Alpha Load(int n, const uint8_t*); // where 0<n<SkPx::N
+//
+// Alpha inv() const; // a -> 255-a
+//
+// SkPx::Wide supports the following methods:
+// Wide operator+(const Wide&);
+// Wide operator-(const Wide&);
+// Wide operator<<(int bits);
+// Wide operator>>(int bits);
+//
+// // Return the high byte of each component of (*this + o.widenLo()).
+// SkPx addNarrowHi(const SkPx& o);
+//
+// Methods left unwritten, but certainly to come:
+// SkPx SkPx::operator<(const SkPx&) const;
+// SkPx SkPx::thenElse(const SkPx& then, const SkPx& else) const;
+// Wide Wide::operator<(const Wide&) const;
+// Wide Wide::thenElse(const Wide& then, const Wide& else) const;
+//
+// SkPx Wide::div255() const; // Rounds, think (*this + 127) / 255.
+//
+// The different implementations of SkPx have complete freedom to choose
+// SkPx::N and how they represent SkPx, SkPx::Alpha, and SkPx::Wide.
+//
+// All observable math must remain identical.
+
+#if defined(SKNX_NO_SIMD)
+ #include "../opts/SkPx_none.h"
+#else
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include "../opts/SkPx_sse.h"
+ #elif defined(SK_ARM_HAS_NEON)
+ #include "../opts/SkPx_neon.h"
+ #else
+ #include "../opts/SkPx_none.h"
+ #endif
+#endif
+
+#endif//SkPx_DEFINED
diff --git a/src/opts/SkBlitMask_opts.h b/src/opts/SkBlitMask_opts.h
index 2f4fe6ffb8..dd7bda3188 100644
--- a/src/opts/SkBlitMask_opts.h
+++ b/src/opts/SkBlitMask_opts.h
@@ -9,195 +9,62 @@
#define SkBlitMask_opts_DEFINED
#include "Sk4px.h"
+#include "SkPx.h"
namespace SK_OPTS_NS {
-#if defined(SK_ARM_HAS_NEON)
- // The Sk4px versions below will work fine with NEON, but we have had many indications
- // that it doesn't perform as well as this NEON-specific code. TODO(mtklein): why?
- #include "SkColor_opts_neon.h"
-
- template <bool isColor>
- static void D32_A8_Opaque_Color_neon(void* SK_RESTRICT dst, size_t dstRB,
- const void* SK_RESTRICT maskPtr, size_t maskRB,
- SkColor color, int width, int height) {
- SkPMColor pmc = SkPreMultiplyColor(color);
- SkPMColor* SK_RESTRICT device = (SkPMColor*)dst;
- const uint8_t* SK_RESTRICT mask = (const uint8_t*)maskPtr;
- uint8x8x4_t vpmc;
-
- maskRB -= width;
- dstRB -= (width << 2);
-
- if (width >= 8) {
- vpmc.val[NEON_A] = vdup_n_u8(SkGetPackedA32(pmc));
- vpmc.val[NEON_R] = vdup_n_u8(SkGetPackedR32(pmc));
- vpmc.val[NEON_G] = vdup_n_u8(SkGetPackedG32(pmc));
- vpmc.val[NEON_B] = vdup_n_u8(SkGetPackedB32(pmc));
+template <typename Fn>
+static void blit_mask_d32_a8(const Fn& fn, SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ int w, int h) {
+ while (h --> 0) {
+ int n = w, N = SkPx::N;
+ while (n >= N) {
+ fn(SkPx::LoadN(dst), SkPx::Alpha::LoadN(mask)).storeN(dst);
+ dst += N; mask += N; n -= N;
}
- do {
- int w = width;
- while (w >= 8) {
- uint8x8_t vmask = vld1_u8(mask);
- uint16x8_t vscale, vmask256 = SkAlpha255To256_neon8(vmask);
- if (isColor) {
- vscale = vsubw_u8(vdupq_n_u16(256),
- SkAlphaMul_neon8(vpmc.val[NEON_A], vmask256));
- } else {
- vscale = vsubw_u8(vdupq_n_u16(256), vmask);
- }
- uint8x8x4_t vdev = vld4_u8((uint8_t*)device);
-
- vdev.val[NEON_A] = SkAlphaMul_neon8(vpmc.val[NEON_A], vmask256)
- + SkAlphaMul_neon8(vdev.val[NEON_A], vscale);
- vdev.val[NEON_R] = SkAlphaMul_neon8(vpmc.val[NEON_R], vmask256)
- + SkAlphaMul_neon8(vdev.val[NEON_R], vscale);
- vdev.val[NEON_G] = SkAlphaMul_neon8(vpmc.val[NEON_G], vmask256)
- + SkAlphaMul_neon8(vdev.val[NEON_G], vscale);
- vdev.val[NEON_B] = SkAlphaMul_neon8(vpmc.val[NEON_B], vmask256)
- + SkAlphaMul_neon8(vdev.val[NEON_B], vscale);
-
- vst4_u8((uint8_t*)device, vdev);
-
- mask += 8;
- device += 8;
- w -= 8;
- }
-
- while (w--) {
- unsigned aa = *mask++;
- if (isColor) {
- *device = SkBlendARGB32(pmc, *device, aa);
- } else {
- *device = SkAlphaMulQ(pmc, SkAlpha255To256(aa))
- + SkAlphaMulQ(*device, SkAlpha255To256(255 - aa));
- }
- device += 1;
- };
-
- device = (uint32_t*)((char*)device + dstRB);
- mask += maskRB;
-
- } while (--height != 0);
- }
-
- static void blit_mask_d32_a8_general(SkPMColor* dst, size_t dstRB,
- const SkAlpha* mask, size_t maskRB,
- SkColor color, int w, int h) {
- D32_A8_Opaque_Color_neon<true>(dst, dstRB, mask, maskRB, color, w, h);
- }
-
- // As above, but made slightly simpler by requiring that color is opaque.
- static void blit_mask_d32_a8_opaque(SkPMColor* dst, size_t dstRB,
- const SkAlpha* mask, size_t maskRB,
- SkColor color, int w, int h) {
- D32_A8_Opaque_Color_neon<false>(dst, dstRB, mask, maskRB, color, w, h);
- }
-
- // Same as _opaque, but assumes color == SK_ColorBLACK, a very common and even simpler case.
- static void blit_mask_d32_a8_black(SkPMColor* dst, size_t dstRB,
- const SkAlpha* maskPtr, size_t maskRB,
- int width, int height) {
- SkPMColor* SK_RESTRICT device = (SkPMColor*)dst;
- const uint8_t* SK_RESTRICT mask = (const uint8_t*)maskPtr;
-
- maskRB -= width;
- dstRB -= (width << 2);
- do {
- int w = width;
- while (w >= 8) {
- uint8x8_t vmask = vld1_u8(mask);
- uint16x8_t vscale = vsubw_u8(vdupq_n_u16(256), vmask);
- uint8x8x4_t vdevice = vld4_u8((uint8_t*)device);
-
- vdevice = SkAlphaMulQ_neon8(vdevice, vscale);
- vdevice.val[NEON_A] += vmask;
-
- vst4_u8((uint8_t*)device, vdevice);
-
- mask += 8;
- device += 8;
- w -= 8;
- }
- while (w-- > 0) {
- unsigned aa = *mask++;
- *device = (aa << SK_A32_SHIFT)
- + SkAlphaMulQ(*device, SkAlpha255To256(255 - aa));
- device += 1;
- };
- device = (uint32_t*)((char*)device + dstRB);
- mask += maskRB;
- } while (--height != 0);
- }
-
-#else
- static void blit_mask_d32_a8_general(SkPMColor* dst, size_t dstRB,
- const SkAlpha* mask, size_t maskRB,
- SkColor color, int w, int h) {
- auto s = Sk4px::DupPMColor(SkPreMultiplyColor(color));
- auto fn = [&](const Sk4px& d, const Sk4px& aa) {
- // = (s + d(1-sa))aa + d(1-aa)
- // = s*aa + d(1-sa*aa)
- auto left = s.approxMulDiv255(aa),
- right = d.approxMulDiv255(left.alphas().inv());
- return left + right; // This does not overflow (exhaustively checked).
- };
- while (h --> 0) {
- Sk4px::MapDstAlpha(w, dst, mask, fn);
- dst += dstRB / sizeof(*dst);
- mask += maskRB / sizeof(*mask);
+ if (n > 0) {
+ fn(SkPx::Load(n, dst), SkPx::Alpha::Load(n, mask)).store(n, dst);
+ dst += n; mask += n;
}
+ dst += dstRB / sizeof(*dst) - w;
+ mask += maskRB / sizeof(*mask) - w;
}
+}
- // As above, but made slightly simpler by requiring that color is opaque.
- static void blit_mask_d32_a8_opaque(SkPMColor* dst, size_t dstRB,
- const SkAlpha* mask, size_t maskRB,
- SkColor color, int w, int h) {
- SkASSERT(SkColorGetA(color) == 0xFF);
- auto s = Sk4px::DupPMColor(SkPreMultiplyColor(color));
- auto fn = [&](const Sk4px& d, const Sk4px& aa) {
- // = (s + d(1-sa))aa + d(1-aa)
- // = s*aa + d(1-sa*aa)
- // ~~~>
- // = s*aa + d(1-aa)
- return s.approxMulDiv255(aa) + d.approxMulDiv255(aa.inv());
- };
- while (h --> 0) {
- Sk4px::MapDstAlpha(w, dst, mask, fn);
- dst += dstRB / sizeof(*dst);
- mask += maskRB / sizeof(*mask);
- }
- }
+static void blit_mask_d32_a8(SkPMColor* dst, size_t dstRB,
+ const SkAlpha* mask, size_t maskRB,
+ SkColor color, int w, int h) {
+ auto s = SkPx::Dup(SkPreMultiplyColor(color));
- // Same as _opaque, but assumes color == SK_ColorBLACK, a very common and even simpler case.
- static void blit_mask_d32_a8_black(SkPMColor* dst, size_t dstRB,
- const SkAlpha* mask, size_t maskRB,
- int w, int h) {
- auto fn = [](const Sk4px& d, const Sk4px& aa) {
+ if (color == SK_ColorBLACK) {
+ auto fn = [](const SkPx& d, const SkPx::Alpha& aa) {
// = (s + d(1-sa))aa + d(1-aa)
// = s*aa + d(1-sa*aa)
// ~~~>
// a = 1*aa + d(1-1*aa) = aa + d(1-aa)
// c = 0*aa + d(1-1*aa) = d(1-aa)
- return aa.zeroColors() + d.approxMulDiv255(aa.inv());
+ return d.approxMulDiv255(aa.inv()).addAlpha(aa);
};
- while (h --> 0) {
- Sk4px::MapDstAlpha(w, dst, mask, fn);
- dst += dstRB / sizeof(*dst);
- mask += maskRB / sizeof(*mask);
- }
- }
-#endif
-
-static void blit_mask_d32_a8(SkPMColor* dst, size_t dstRB,
- const SkAlpha* mask, size_t maskRB,
- SkColor color, int w, int h) {
- if (color == SK_ColorBLACK) {
- blit_mask_d32_a8_black(dst, dstRB, mask, maskRB, w, h);
+ blit_mask_d32_a8(fn, dst, dstRB, mask, maskRB, w, h);
} else if (SkColorGetA(color) == 0xFF) {
- blit_mask_d32_a8_opaque(dst, dstRB, mask, maskRB, color, w, h);
+ auto fn = [&](const SkPx& d, const SkPx::Alpha& aa) {
+ // = (s + d(1-sa))aa + d(1-aa)
+ // = s*aa + d(1-sa*aa)
+ // ~~~>
+ // = s*aa + d(1-aa)
+ return s.approxMulDiv255(aa) + d.approxMulDiv255(aa.inv());
+ };
+ blit_mask_d32_a8(fn, dst, dstRB, mask, maskRB, w, h);
} else {
- blit_mask_d32_a8_general(dst, dstRB, mask, maskRB, color, w, h);
+ auto fn = [&](const SkPx& d, const SkPx::Alpha& aa) {
+ // = (s + d(1-sa))aa + d(1-aa)
+ // = s*aa + d(1-sa*aa)
+ auto left = s.approxMulDiv255(aa),
+ right = d.approxMulDiv255(left.alpha().inv());
+ return left + right; // This does not overflow (exhaustively checked).
+ };
+ blit_mask_d32_a8(fn, dst, dstRB, mask, maskRB, w, h);
}
}
diff --git a/src/opts/SkPx_neon.h b/src/opts/SkPx_neon.h
new file mode 100644
index 0000000000..d026d4de8c
--- /dev/null
+++ b/src/opts/SkPx_neon.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPx_neon_DEFINED
+#define SkPx_neon_DEFINED
+
+// When we have NEON, we like to work 8 pixels at a time.
+// This lets us exploit vld4/vst4 and represent SkPx as planar uint8x8x4_t,
+// Wide as planar uint16x8x4_t, and Alpha as a single uint8x8_t plane.
+
+struct SkPx_neon {
+ static const int N = 8;
+
+ uint8x8x4_t fVec;
+ SkPx_neon(uint8x8x4_t vec) : fVec(vec) {}
+
+ static SkPx_neon Dup(uint32_t px) { return vld4_dup_u8((const uint8_t*)&px); }
+ static SkPx_neon LoadN(const uint32_t* px) { return vld4_u8((const uint8_t*)px); }
+ static SkPx_neon Load(int n, const uint32_t* px) {
+ SkASSERT(0 < n && n < 8);
+ uint8x8x4_t v = vld4_dup_u8((const uint8_t*)px); // n>=1, so start all lanes with pixel 0.
+ switch (n) {
+ case 7: v = vld4_lane_u8((const uint8_t*)(px+6), v, 6); // fall through
+ case 6: v = vld4_lane_u8((const uint8_t*)(px+5), v, 5); // fall through
+ case 5: v = vld4_lane_u8((const uint8_t*)(px+4), v, 4); // fall through
+ case 4: v = vld4_lane_u8((const uint8_t*)(px+3), v, 3); // fall through
+ case 3: v = vld4_lane_u8((const uint8_t*)(px+2), v, 2); // fall through
+ case 2: v = vld4_lane_u8((const uint8_t*)(px+1), v, 1);
+ }
+ return v;
+ }
+
+ void storeN(uint32_t* px) const { vst4_u8((uint8_t*)px, fVec); }
+ void store(int n, uint32_t* px) const {
+ SkASSERT(0 < n && n < 8);
+ switch (n) {
+ case 7: vst4_lane_u8((uint8_t*)(px+6), fVec, 6);
+ case 6: vst4_lane_u8((uint8_t*)(px+5), fVec, 5);
+ case 5: vst4_lane_u8((uint8_t*)(px+4), fVec, 4);
+ case 4: vst4_lane_u8((uint8_t*)(px+3), fVec, 3);
+ case 3: vst4_lane_u8((uint8_t*)(px+2), fVec, 2);
+ case 2: vst4_lane_u8((uint8_t*)(px+1), fVec, 1);
+ case 1: vst4_lane_u8((uint8_t*)(px+0), fVec, 0);
+ }
+ }
+
+ struct Alpha {
+ uint8x8_t fA;
+ Alpha(uint8x8_t a) : fA(a) {}
+
+ static Alpha Dup(uint8_t a) { return vdup_n_u8(a); }
+ static Alpha LoadN(const uint8_t* a) { return vld1_u8(a); }
+ static Alpha Load(int n, const uint8_t* a) {
+ SkASSERT(0 < n && n < 8);
+ uint8x8_t v = vld1_dup_u8(a); // n>=1, so start all lanes with alpha 0.
+ switch (n) {
+ case 7: v = vld1_lane_u8(a+6, v, 6); // fall through
+ case 6: v = vld1_lane_u8(a+5, v, 5); // fall through
+ case 5: v = vld1_lane_u8(a+4, v, 4); // fall through
+ case 4: v = vld1_lane_u8(a+3, v, 3); // fall through
+ case 3: v = vld1_lane_u8(a+2, v, 2); // fall through
+ case 2: v = vld1_lane_u8(a+1, v, 1);
+ }
+ return v;
+ }
+ Alpha inv() const { return vsub_u8(vdup_n_u8(255), fA); }
+ };
+
+ struct Wide {
+ uint16x8x4_t fVec;
+ Wide(uint16x8x4_t vec) : fVec(vec) {}
+
+ Wide operator+(const Wide& o) const {
+ return (uint16x8x4_t) {{
+ vaddq_u16(fVec.val[0], o.fVec.val[0]),
+ vaddq_u16(fVec.val[1], o.fVec.val[1]),
+ vaddq_u16(fVec.val[2], o.fVec.val[2]),
+ vaddq_u16(fVec.val[3], o.fVec.val[3]),
+ }};
+ }
+ Wide operator-(const Wide& o) const {
+ return (uint16x8x4_t) {{
+ vsubq_u16(fVec.val[0], o.fVec.val[0]),
+ vsubq_u16(fVec.val[1], o.fVec.val[1]),
+ vsubq_u16(fVec.val[2], o.fVec.val[2]),
+ vsubq_u16(fVec.val[3], o.fVec.val[3]),
+ }};
+ }
+ SK_ALWAYS_INLINE Wide operator<<(int bits) const {
+ return (uint16x8x4_t) {{
+ vshlq_n_u16(fVec.val[0], bits),
+ vshlq_n_u16(fVec.val[1], bits),
+ vshlq_n_u16(fVec.val[2], bits),
+ vshlq_n_u16(fVec.val[3], bits),
+ }};
+ }
+ SK_ALWAYS_INLINE Wide operator>>(int bits) const {
+ return (uint16x8x4_t) {{
+ vshrq_n_u16(fVec.val[0], bits),
+ vshrq_n_u16(fVec.val[1], bits),
+ vshrq_n_u16(fVec.val[2], bits),
+ vshrq_n_u16(fVec.val[3], bits),
+ }};
+ }
+
+ SkPx_neon addNarrowHi(const SkPx_neon& o) const {
+ return (uint8x8x4_t) {{
+ vshrn_n_u16(vaddw_u8(fVec.val[0], o.fVec.val[0]), 8),
+ vshrn_n_u16(vaddw_u8(fVec.val[1], o.fVec.val[1]), 8),
+ vshrn_n_u16(vaddw_u8(fVec.val[2], o.fVec.val[2]), 8),
+ vshrn_n_u16(vaddw_u8(fVec.val[3], o.fVec.val[3]), 8),
+ }};
+ }
+ };
+
+ Alpha alpha() const { return fVec.val[3]; }
+
+ Wide widenLo() const {
+ return (uint16x8x4_t) {{
+ vmovl_u8(fVec.val[0]),
+ vmovl_u8(fVec.val[1]),
+ vmovl_u8(fVec.val[2]),
+ vmovl_u8(fVec.val[3]),
+ }};
+ }
+ // TODO: these two can probably be done faster.
+ Wide widenHi() const { return this->widenLo() << 8; }
+ Wide widenLoHi() const { return this->widenLo() + this->widenHi(); }
+
+ SkPx_neon operator+(const SkPx_neon& o) const {
+ return (uint8x8x4_t) {{
+ vadd_u8(fVec.val[0], o.fVec.val[0]),
+ vadd_u8(fVec.val[1], o.fVec.val[1]),
+ vadd_u8(fVec.val[2], o.fVec.val[2]),
+ vadd_u8(fVec.val[3], o.fVec.val[3]),
+ }};
+ }
+ SkPx_neon operator-(const SkPx_neon& o) const {
+ return (uint8x8x4_t) {{
+ vsub_u8(fVec.val[0], o.fVec.val[0]),
+ vsub_u8(fVec.val[1], o.fVec.val[1]),
+ vsub_u8(fVec.val[2], o.fVec.val[2]),
+ vsub_u8(fVec.val[3], o.fVec.val[3]),
+ }};
+ }
+ SkPx_neon saturatedAdd(const SkPx_neon& o) const {
+ return (uint8x8x4_t) {{
+ vqadd_u8(fVec.val[0], o.fVec.val[0]),
+ vqadd_u8(fVec.val[1], o.fVec.val[1]),
+ vqadd_u8(fVec.val[2], o.fVec.val[2]),
+ vqadd_u8(fVec.val[3], o.fVec.val[3]),
+ }};
+ }
+
+ Wide operator*(const Alpha& a) const {
+ return (uint16x8x4_t) {{
+ vmull_u8(fVec.val[0], a.fA),
+ vmull_u8(fVec.val[1], a.fA),
+ vmull_u8(fVec.val[2], a.fA),
+ vmull_u8(fVec.val[3], a.fA),
+ }};
+ }
+ SkPx_neon approxMulDiv255(const Alpha& a) const {
+ return (*this * a).addNarrowHi(*this);
+ }
+
+ SkPx_neon addAlpha(const Alpha& a) const {
+ return (uint8x8x4_t) {{
+ fVec.val[0],
+ fVec.val[1],
+ fVec.val[2],
+ vadd_u8(fVec.val[3], a.fA),
+ }};
+ }
+};
+typedef SkPx_neon SkPx;
+
+#endif//SkPx_neon_DEFINED
diff --git a/src/opts/SkPx_none.h b/src/opts/SkPx_none.h
new file mode 100644
index 0000000000..a4758c1004
--- /dev/null
+++ b/src/opts/SkPx_none.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPx_none_DEFINED
+#define SkPx_none_DEFINED
+
+// Nothing fancy here. We're the backup _none case after all.
+// Our declared sweet spot is simply a single pixel at a time.
+
+struct SkPx_none {
+ static const int N = 1;
+ uint8_t f8[4];
+
+ SkPx_none(uint32_t px) { memcpy(f8, &px, 4); }
+ SkPx_none(uint8_t x, uint8_t y, uint8_t z, uint8_t a) {
+ f8[0] = x; f8[1] = y; f8[2] = z; f8[3] = a;
+ }
+
+ static SkPx_none Dup(uint32_t px) { return px; }
+ static SkPx_none LoadN(const uint32_t* px) { return *px; }
+ static SkPx_none Load(int n, const uint32_t* px) {
+ SkASSERT(false); // There are no 0<n<1.
+ return 0;
+ }
+
+ void storeN(uint32_t* px) const { memcpy(px, f8, 4); }
+ void store(int n, uint32_t* px) const {
+ SkASSERT(false); // There are no 0<n<1.
+ }
+
+ struct Alpha {
+ uint8_t fA;
+ Alpha(uint8_t a) : fA(a) {}
+
+ static Alpha Dup(uint8_t a) { return a; }
+ static Alpha LoadN(const uint8_t* a) { return *a; }
+ static Alpha Load(int n, const uint8_t* a) {
+ SkASSERT(false); // There are no 0<n<1.
+ return 0;
+ }
+ Alpha inv() const { return 255 - fA; }
+ };
+
+ struct Wide {
+ uint16_t f16[4];
+
+ Wide(uint16_t x, uint16_t y, uint16_t z, uint16_t a) {
+ f16[0] = x; f16[1] = y; f16[2] = z; f16[3] = a;
+ }
+
+ Wide operator+(const Wide& o) const {
+ return Wide(f16[0]+o.f16[0], f16[1]+o.f16[1], f16[2]+o.f16[2], f16[3]+o.f16[3]);
+ }
+ Wide operator-(const Wide& o) const {
+ return Wide(f16[0]-o.f16[0], f16[1]-o.f16[1], f16[2]-o.f16[2], f16[3]-o.f16[3]);
+ }
+ Wide operator<<(int bits) const {
+ return Wide(f16[0]<<bits, f16[1]<<bits, f16[2]<<bits, f16[3]<<bits);
+ }
+ Wide operator>>(int bits) const {
+ return Wide(f16[0]>>bits, f16[1]>>bits, f16[2]>>bits, f16[3]>>bits);
+ }
+
+ SkPx_none addNarrowHi(const SkPx_none& o) const {
+ Wide sum = (*this + o.widenLo()) >> 8;
+ return SkPx_none(sum.f16[0], sum.f16[1], sum.f16[2], sum.f16[3]);
+ }
+ };
+
+ Alpha alpha() const { return f8[3]; }
+
+ Wide widenLo() const { return Wide(f8[0], f8[1], f8[2], f8[3]); }
+ Wide widenHi() const { return this->widenLo() << 8; }
+ Wide widenLoHi() const { return this->widenLo() + this->widenHi(); }
+
+ SkPx_none operator+(const SkPx_none& o) const {
+ return SkPx_none(f8[0]+o.f8[0], f8[1]+o.f8[1], f8[2]+o.f8[2], f8[3]+o.f8[3]);
+ }
+ SkPx_none operator-(const SkPx_none& o) const {
+ return SkPx_none(f8[0]-o.f8[0], f8[1]-o.f8[1], f8[2]-o.f8[2], f8[3]-o.f8[3]);
+ }
+ SkPx_none saturatedAdd(const SkPx_none& o) const {
+ return SkPx_none(SkTMax(0, SkTMin(255, f8[0]+o.f8[0])),
+ SkTMax(0, SkTMin(255, f8[1]+o.f8[1])),
+ SkTMax(0, SkTMin(255, f8[2]+o.f8[2])),
+ SkTMax(0, SkTMin(255, f8[3]+o.f8[3])));
+ }
+
+ Wide operator*(const Alpha& a) const {
+ return Wide(f8[0]*a.fA, f8[1]*a.fA, f8[2]*a.fA, f8[3]*a.fA);
+ }
+ SkPx_none approxMulDiv255(const Alpha& a) const {
+ return (*this * a).addNarrowHi(*this);
+ }
+
+ SkPx_none addAlpha(const Alpha& a) const {
+ return SkPx_none(f8[0], f8[1], f8[2], f8[3]+a.fA);
+ }
+};
+typedef SkPx_none SkPx;
+
+#endif//SkPx_none_DEFINED
diff --git a/src/opts/SkPx_sse.h b/src/opts/SkPx_sse.h
new file mode 100644
index 0000000000..bc5ccd11ea
--- /dev/null
+++ b/src/opts/SkPx_sse.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkPx_sse_DEFINED
+#define SkPx_sse_DEFINED
+
+// SkPx_sse's sweet spot is to work with 4 pixels at a time,
+// stored interlaced, just as they sit in memory: rgba rgba rgba rgba.
+
+// SkPx_sse's best way to work with alphas is similar,
+// replicating the 4 alphas 4 times each across the pixel: aaaa aaaa aaaa aaaa.
+
+// When working with fewer than 4 pixels, we load the pixels in the low lanes,
+// usually filling the top lanes with zeros (but who cares, might be junk).
+
+struct SkPx_sse {
+ static const int N = 4;
+
+ __m128i fVec;
+ SkPx_sse(__m128i vec) : fVec(vec) {}
+
+ static SkPx_sse Dup(uint32_t px) { return _mm_set1_epi32(px); }
+ static SkPx_sse LoadN(const uint32_t* px) { return _mm_loadu_si128((const __m128i*)px); }
+ static SkPx_sse Load(int n, const uint32_t* px) {
+ SkASSERT(n > 0 && n < 4);
+ switch (n) {
+ case 1: return _mm_cvtsi32_si128(px[0]);
+ case 2: return _mm_loadl_epi64((const __m128i*)px);
+ case 3: return _mm_or_si128(_mm_loadl_epi64((const __m128i*)px),
+ _mm_slli_si128(_mm_cvtsi32_si128(px[2]), 8));
+ }
+ return _mm_setzero_si128(); // Not actually reachable.
+ }
+
+ void storeN(uint32_t* px) const { _mm_storeu_si128((__m128i*)px, fVec); }
+ void store(int n, uint32_t* px) const {
+ SkASSERT(n > 0 && n < 4);
+ __m128i v = fVec;
+ if (n & 1) {
+ *px++ = _mm_cvtsi128_si32(v);
+ v = _mm_srli_si128(v, 4);
+ }
+ if (n & 2) {
+ _mm_storel_epi64((__m128i*)px, v);
+ }
+ }
+
+ struct Alpha {
+ __m128i fVec;
+ Alpha(__m128i vec) : fVec(vec) {}
+
+ static Alpha Dup(uint8_t a) { return _mm_set1_epi8(a); }
+ static Alpha LoadN(const uint8_t* a) {
+ __m128i as = _mm_cvtsi32_si128(*(const uint32_t*)a); // ____ ____ ____ 3210
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ return _mm_shuffle_epi8(as, _mm_set_epi8(3,3,3,3, 2,2,2,2, 1,1,1,1, 0,0,0,0));
+ #else
+ as = _mm_unpacklo_epi8 (as, _mm_setzero_si128()); // ____ ____ _3_2 _1_0
+ as = _mm_unpacklo_epi16(as, _mm_setzero_si128()); // ___3 ___2 ___1 ___0
+ as = _mm_or_si128(as, _mm_slli_si128(as, 1)); // __33 __22 __11 __00
+ return _mm_or_si128(as, _mm_slli_si128(as, 2)); // 3333 2222 1111 0000
+ #endif
+ }
+ static Alpha Load(int n, const uint8_t* a) {
+ SkASSERT(n > 0 && n < 4);
+ uint8_t a4[] = { 0,0,0,0 };
+ switch (n) {
+ case 3: a4[2] = a[2]; // fall through
+ case 2: a4[1] = a[1]; // fall through
+ case 1: a4[0] = a[0];
+ }
+ return LoadN(a4);
+ }
+
+ Alpha inv() const { return _mm_sub_epi8(_mm_set1_epi8(~0), fVec); }
+ };
+
+ struct Wide {
+ __m128i fLo, fHi;
+ Wide(__m128i lo, __m128i hi) : fLo(lo), fHi(hi) {}
+
+ Wide operator+(const Wide& o) const {
+ return Wide(_mm_add_epi16(fLo, o.fLo), _mm_add_epi16(fHi, o.fHi));
+ }
+ Wide operator-(const Wide& o) const {
+ return Wide(_mm_sub_epi16(fLo, o.fLo), _mm_sub_epi16(fHi, o.fHi));
+ }
+ Wide operator<<(int bits) const {
+ return Wide(_mm_slli_epi16(fLo, bits), _mm_slli_epi16(fHi, bits));
+ }
+ Wide operator>>(int bits) const {
+ return Wide(_mm_srli_epi16(fLo, bits), _mm_srli_epi16(fHi, bits));
+ }
+
+ SkPx_sse addNarrowHi(const SkPx_sse& o) const {
+ Wide sum = (*this + o.widenLo()) >> 8;
+ return _mm_packus_epi16(sum.fLo, sum.fHi);
+ }
+ };
+
+ Alpha alpha() const {
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ return _mm_shuffle_epi8(fVec, _mm_set_epi8(15,15,15,15, 11,11,11,11, 7,7,7,7, 3,3,3,3));
+ #else
+ __m128i as = _mm_srli_epi32(fVec, 24); // ___3 ___2 ___1 ___0
+ as = _mm_or_si128(as, _mm_slli_si128(as, 1)); // __33 __22 __11 __00
+ return _mm_or_si128(as, _mm_slli_si128(as, 2)); // 3333 2222 1111 0000
+ #endif
+ }
+
+ Wide widenLo() const {
+ return Wide(_mm_unpacklo_epi8(fVec, _mm_setzero_si128()),
+ _mm_unpackhi_epi8(fVec, _mm_setzero_si128()));
+ }
+ Wide widenHi() const {
+ return Wide(_mm_unpacklo_epi8(_mm_setzero_si128(), fVec),
+ _mm_unpackhi_epi8(_mm_setzero_si128(), fVec));
+ }
+ Wide widenLoHi() const {
+ return Wide(_mm_unpacklo_epi8(fVec, fVec),
+ _mm_unpackhi_epi8(fVec, fVec));
+ }
+
+ SkPx_sse operator+(const SkPx_sse& o) const { return _mm_add_epi8(fVec, o.fVec); }
+ SkPx_sse operator-(const SkPx_sse& o) const { return _mm_sub_epi8(fVec, o.fVec); }
+ SkPx_sse saturatedAdd(const SkPx_sse& o) const { return _mm_adds_epi8(fVec, o.fVec); }
+
+ Wide operator*(const Alpha& a) const {
+ __m128i pLo = _mm_unpacklo_epi8( fVec, _mm_setzero_si128()),
+ aLo = _mm_unpacklo_epi8(a.fVec, _mm_setzero_si128()),
+ pHi = _mm_unpackhi_epi8( fVec, _mm_setzero_si128()),
+ aHi = _mm_unpackhi_epi8(a.fVec, _mm_setzero_si128());
+ return Wide(_mm_mullo_epi16(pLo, aLo), _mm_mullo_epi16(pHi, aHi));
+ }
+ SkPx_sse approxMulDiv255(const Alpha& a) const {
+ return (*this * a).addNarrowHi(*this);
+ }
+
+ SkPx_sse addAlpha(const Alpha& a) const {
+ return _mm_add_epi8(fVec, _mm_and_si128(a.fVec, _mm_set1_epi32(0xFF000000)));
+ }
+};
+
+typedef SkPx_sse SkPx;
+
+#endif//SkPx_sse_DEFINED