aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2016-02-09 10:35:27 -0800
committerGravatar Commit bot <commit-bot@chromium.org>2016-02-09 10:35:28 -0800
commite4c0beed744d09dae4757c1893d8caa64ee09cd2 (patch)
tree0e35dcab1c2ab7a1b75609c6dd1dd11231a572eb
parentf1d415188ffb4c34e2886c2cfceb363a148333f1 (diff)
sknx refactoring
- trim unused specializations (Sk4i, Sk2d) and apis (SkNx_dup) - expand apis a little * v[0] == v.kth<0>() * SkNx_shuffle can now convert to different-sized vectors, e.g. Sk2f <-> Sk4f - remove anonymous namespace I believe it's safe to remove the anonymous namespace right now. We're worried about violating the One Definition Rule; the anonymous namespace protected us from that. In Release builds, this is mostly moot, as everything tends to inline completely. In Debug builds, violating the ODR is at worst an inconvenience, time spent trying to figure out why the bot is broken. Now that we're building with SSE2/NEON everywhere, very few bots have even a chance about getting confused by two definitions of the same type or function. Where we do compile variants depending on, e.g., SSSE3, we do so in static inline functions. These are not subject to the ODR. I plan to follow up with a tedious .kth<...>() -> [...] auto-replace. BUG=skia: GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1683543002 CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot Review URL: https://codereview.chromium.org/1683543002
-rw-r--r--src/core/SkColorMatrixFilterRowMajor255.cpp8
-rw-r--r--src/core/SkNx.h219
-rw-r--r--src/opts/SkNx_neon.h75
-rw-r--r--src/opts/SkNx_sse.h151
-rw-r--r--src/opts/SkXfermode_opts.h2
-rw-r--r--tests/SkNxTest.cpp36
6 files changed, 159 insertions, 332 deletions
diff --git a/src/core/SkColorMatrixFilterRowMajor255.cpp b/src/core/SkColorMatrixFilterRowMajor255.cpp
index b9508ff2c7..33bf042233 100644
--- a/src/core/SkColorMatrixFilterRowMajor255.cpp
+++ b/src/core/SkColorMatrixFilterRowMajor255.cpp
@@ -108,10 +108,10 @@ void filter_span(const float array[], const T src[], int count, T dst[]) {
srcf = unpremul(srcf);
}
- Sk4f r4 = SkNx_dup<SK_R32_SHIFT/8>(srcf);
- Sk4f g4 = SkNx_dup<SK_G32_SHIFT/8>(srcf);
- Sk4f b4 = SkNx_dup<SK_B32_SHIFT/8>(srcf);
- Sk4f a4 = SkNx_dup<SK_A32_SHIFT/8>(srcf);
+ Sk4f r4 = srcf.kth<SK_R32_SHIFT/8>();
+ Sk4f g4 = srcf.kth<SK_G32_SHIFT/8>();
+ Sk4f b4 = srcf.kth<SK_B32_SHIFT/8>();
+ Sk4f a4 = srcf.kth<SK_A32_SHIFT/8>();
// apply matrix
Sk4f dst4 = c0 * r4 + c1 * g4 + c2 * b4 + c3 * a4 + c4;
diff --git a/src/core/SkNx.h b/src/core/SkNx.h
index 7ae5d82976..69295d4fc3 100644
--- a/src/core/SkNx.h
+++ b/src/core/SkNx.h
@@ -8,20 +8,11 @@
#ifndef SkNx_DEFINED
#define SkNx_DEFINED
-
//#define SKNX_NO_SIMD
#include "SkScalar.h"
#include "SkTypes.h"
#include <math.h>
-#define REQUIRE(x) static_assert(x, #x)
-
-// This file may be included multiple times by .cpp files with different flags, leading
-// to different definitions. Usually that doesn't matter because it's all inlined, but
-// in Debug modes the compilers may not inline everything. So wrap everything in an
-// anonymous namespace to give each includer their own silo of this code (or the linker
-// will probably pick one randomly for us, which is rarely correct).
-namespace {
// The default implementations just fall back on a pair of size N/2.
// These support the union of operations we might do to ints and floats, but
@@ -30,68 +21,61 @@ template <int N, typename T>
class SkNx {
public:
SkNx() {}
- SkNx(const SkNx<N/2, T>& lo, const SkNx<N/2, T>& hi) : fLo(lo), fHi(hi) {}
SkNx(T val) : fLo(val), fHi(val) {}
+
+ typedef SkNx<N/2, T> Half;
+ SkNx(const Half& lo, const Half& hi) : fLo(lo), fHi(hi) {}
+
+ SkNx(T a, T b) : fLo(a), fHi(b) {}
+ SkNx(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) {}
+ SkNx(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) {}
+ SkNx(T a, T b, T c, T d, T e, T f, T g, T h,
+ T i, T j, T k, T l, T m, T n, T o, T p) : fLo(a,b,c,d, e,f,g,h), fHi(i,j,k,l, m,n,o,p) {}
+
static SkNx Load(const void* ptr) {
auto vals = (const T*)ptr;
- return SkNx(SkNx<N/2,T>::Load(vals), SkNx<N/2,T>::Load(vals+N/2));
+ return SkNx(Half::Load(vals), Half::Load(vals+N/2));
}
- SkNx(T a, T b) : fLo(a), fHi(b) { REQUIRE(N==2); }
- SkNx(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) { REQUIRE(N==4); }
- SkNx(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) { REQUIRE(N==8); }
- SkNx(T a, T b, T c, T d, T e, T f, T g, T h,
- T i, T j, T k, T l, T m, T n, T o, T p)
- : fLo(a,b,c,d, e,f,g,h), fHi(i,j,k,l, m,n,o,p) { REQUIRE(N==16); }
-
void store(void* ptr) const {
auto vals = (T*)ptr;
fLo.store(vals);
fHi.store(vals+N/2);
}
- SkNx saturatedAdd(const SkNx& o) const {
- return SkNx(fLo.saturatedAdd(o.fLo), fHi.saturatedAdd(o.fHi));
- }
+#define OP(op) SkNx operator op(const SkNx& o) const { return {fLo op o.fLo, fHi op o.fHi}; }
+ OP(+) OP(-) OP(*) OP(/)
+ OP(&) OP(|) OP(^)
+ OP(==) OP(!=) OP(<) OP(>) OP(<=) OP(>=)
+#undef OP
- SkNx operator + (const SkNx& o) const { return SkNx(fLo + o.fLo, fHi + o.fHi); }
- SkNx operator - (const SkNx& o) const { return SkNx(fLo - o.fLo, fHi - o.fHi); }
- SkNx operator * (const SkNx& o) const { return SkNx(fLo * o.fLo, fHi * o.fHi); }
- SkNx operator / (const SkNx& o) const { return SkNx(fLo / o.fLo, fHi / o.fHi); }
+#define OP(op) SkNx op() const { return {fLo.op(), fHi.op()}; }
+ OP(abs)
+ OP(sqrt) OP(rsqrt0) OP(rsqrt1) OP(rsqrt2)
+ OP(invert) OP(approxInvert)
+#undef OP
SkNx operator << (int bits) const { return SkNx(fLo << bits, fHi << bits); }
SkNx operator >> (int bits) const { return SkNx(fLo >> bits, fHi >> bits); }
- SkNx operator == (const SkNx& o) const { return SkNx(fLo == o.fLo, fHi == o.fHi); }
- SkNx operator != (const SkNx& o) const { return SkNx(fLo != o.fLo, fHi != o.fHi); }
- SkNx operator < (const SkNx& o) const { return SkNx(fLo < o.fLo, fHi < o.fHi); }
- SkNx operator > (const SkNx& o) const { return SkNx(fLo > o.fLo, fHi > o.fHi); }
- SkNx operator <= (const SkNx& o) const { return SkNx(fLo <= o.fLo, fHi <= o.fHi); }
- SkNx operator >= (const SkNx& o) const { return SkNx(fLo >= o.fLo, fHi >= o.fHi); }
+ SkNx saturatedAdd(const SkNx& o) const {
+ return {fLo.saturatedAdd(o.fLo), fHi.saturatedAdd(o.fHi)};
+ }
static SkNx Min(const SkNx& a, const SkNx& b) {
- return SkNx(SkNx<N/2, T>::Min(a.fLo, b.fLo), SkNx<N/2, T>::Min(a.fHi, b.fHi));
+ return {Half::Min(a.fLo, b.fLo), Half::Min(a.fHi, b.fHi)};
}
static SkNx Max(const SkNx& a, const SkNx& b) {
- return SkNx(SkNx<N/2, T>::Max(a.fLo, b.fLo), SkNx<N/2, T>::Max(a.fHi, b.fHi));
+ return {Half::Max(a.fLo, b.fLo), Half::Max(a.fHi, b.fHi)};
}
- SkNx abs() const { return SkNx(fLo.abs(), fHi.abs()); }
-
- SkNx sqrt() const { return SkNx(fLo.sqrt(), fHi.sqrt()); }
- // Generally, increasing precision, increasing cost.
- SkNx rsqrt0() const { return SkNx(fLo.rsqrt0(), fHi.rsqrt0()); }
- SkNx rsqrt1() const { return SkNx(fLo.rsqrt1(), fHi.rsqrt1()); }
- SkNx rsqrt2() const { return SkNx(fLo.rsqrt2(), fHi.rsqrt2()); }
-
- SkNx invert() const { return SkNx(fLo. invert(), fHi. invert()); }
- SkNx approxInvert() const { return SkNx(fLo.approxInvert(), fHi.approxInvert()); }
-
- template <int k> T kth() const {
+ T operator[](int k) const {
SkASSERT(0 <= k && k < N);
- return k < N/2 ? fLo.template kth<k>() : fHi.template kth<k-N/2>();
+ return k < N/2 ? fLo[k] : fHi[k-N/2];
}
+ template <int k> T kth() const { return (*this)[k]; }
+
bool allTrue() const { return fLo.allTrue() && fHi.allTrue(); }
bool anyTrue() const { return fLo.anyTrue() || fHi.anyTrue(); }
SkNx thenElse(const SkNx& t, const SkNx& e) const {
@@ -99,17 +83,18 @@ public:
}
protected:
- REQUIRE(0 == (N & (N-1)));
+ static_assert(0 == (N & (N-1)), "N must be a power of 2.");
- SkNx<N/2, T> fLo, fHi;
+ Half fLo, fHi;
};
// Bottom out the default implementations with scalars when nothing's been specialized.
template <typename T>
-class SkNx<1,T> {
+class SkNx<1, T> {
public:
SkNx() {}
SkNx(T val) : fVal(val) {}
+
static SkNx Load(const void* ptr) {
auto vals = (const T*)ptr;
return SkNx(vals[0]);
@@ -120,45 +105,41 @@ public:
vals[0] = fVal;
}
+#define OP(op) SkNx operator op(const SkNx& o) const { return fVal op o.fVal; }
+ OP(+) OP(-) OP(*) OP(/)
+ OP(&) OP(|) OP(^)
+ OP(==) OP(!=) OP(<) OP(>) OP(<=) OP(>=)
+#undef OP
+
+ SkNx operator << (int bits) const { return fVal << bits; }
+ SkNx operator >> (int bits) const { return fVal >> bits; }
+
SkNx saturatedAdd(const SkNx& o) const {
- SkASSERT((T)(~0) > 0); // TODO: support signed T
+ SkASSERT((T)(~0) > 0); // TODO: support signed T?
T sum = fVal + o.fVal;
- return SkNx(sum < fVal ? (T)(~0) : sum);
+ return sum < fVal ? (T)(~0) : sum;
}
- SkNx operator + (const SkNx& o) const { return SkNx(fVal + o.fVal); }
- SkNx operator - (const SkNx& o) const { return SkNx(fVal - o.fVal); }
- SkNx operator * (const SkNx& o) const { return SkNx(fVal * o.fVal); }
- SkNx operator / (const SkNx& o) const { return SkNx(fVal / o.fVal); }
-
- SkNx operator << (int bits) const { return SkNx(fVal << bits); }
- SkNx operator >> (int bits) const { return SkNx(fVal >> bits); }
-
- SkNx operator == (const SkNx& o) const { return SkNx(fVal == o.fVal); }
- SkNx operator != (const SkNx& o) const { return SkNx(fVal != o.fVal); }
- SkNx operator < (const SkNx& o) const { return SkNx(fVal < o.fVal); }
- SkNx operator > (const SkNx& o) const { return SkNx(fVal > o.fVal); }
- SkNx operator <= (const SkNx& o) const { return SkNx(fVal <= o.fVal); }
- SkNx operator >= (const SkNx& o) const { return SkNx(fVal >= o.fVal); }
-
- static SkNx Min(const SkNx& a, const SkNx& b) { return SkNx(SkTMin(a.fVal, b.fVal)); }
- static SkNx Max(const SkNx& a, const SkNx& b) { return SkNx(SkTMax(a.fVal, b.fVal)); }
+ static SkNx Min(const SkNx& a, const SkNx& b) { return SkTMin(a.fVal, b.fVal); }
+ static SkNx Max(const SkNx& a, const SkNx& b) { return SkTMax(a.fVal, b.fVal); }
SkNx abs() const { return SkTAbs(fVal); }
- SkNx sqrt () const { return SkNx(Sqrt(fVal)); }
- SkNx rsqrt0() const { return this->sqrt().invert(); }
+ SkNx sqrt () const { return Sqrt(fVal); }
+ SkNx rsqrt0() const { return this->sqrt().invert(); }
SkNx rsqrt1() const { return this->rsqrt0(); }
SkNx rsqrt2() const { return this->rsqrt1(); }
- SkNx invert() const { return SkNx(1) / SkNx(fVal); }
- SkNx approxInvert() const { return this->invert(); }
+ SkNx invert() const { return 1 / fVal; }
+ SkNx approxInvert() const { return this->invert(); }
- template <int k> T kth() const {
+ T operator[](int k) const {
SkASSERT(0 == k);
return fVal;
}
+ template <int k> T kth() const { return (*this)[k]; }
+
bool allTrue() const { return fVal != 0; }
bool anyTrue() const { return fVal != 0; }
SkNx thenElse(const SkNx& t, const SkNx& e) const { return fVal != 0 ? t : e; }
@@ -170,72 +151,51 @@ protected:
T fVal;
};
-// This default implementation can be specialized by ../opts/SkNx_foo.h
-// if there's a better platform-specific shuffle strategy.
-template <typename Nx, int... Ix>
-inline Nx SkNx_shuffle_impl(const Nx& src) { return Nx( src.template kth<Ix>()... ); }
-
-// This generic shuffle can be called with 1 or N indices:
+// This generic shuffle can be called to create any valid SkNx<N,T>.
// Sk4f f(a,b,c,d);
-// SkNx_shuffle<3>(f); // ~~~> Sk4f(d,d,d,d)
-// SkNx_shuffle<2,1,0,3>(f); // ~~~> Sk4f(c,b,a,d)
-template <int... Ix, typename Nx>
-inline Nx SkNx_shuffle(const Nx& src) { return SkNx_shuffle_impl<Nx, Ix...>(src); }
-
-// A reminder alias that shuffles can be used to duplicate a single index across a vector.
-template <int Ix, typename Nx>
-inline Nx SkNx_dup(const Nx& src) { return SkNx_shuffle<Ix>(src); }
-
-// This is a poor-man's std::make_index_sequence from C++14.
-// I'd implement it fully, but it hurts my head.
-template <int...> struct SkIntSequence {};
-template <int N> struct MakeSkIntSequence;
-template <> struct MakeSkIntSequence< 1> : SkIntSequence<0 >{};
-template <> struct MakeSkIntSequence< 2> : SkIntSequence<0,1 >{};
-template <> struct MakeSkIntSequence< 4> : SkIntSequence<0,1,2,3 >{};
-template <> struct MakeSkIntSequence< 8> : SkIntSequence<0,1,2,3,4,5,6,7 >{};
-template <> struct MakeSkIntSequence<16> : SkIntSequence<0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15>{};
-
-// This is the default/fallback implementation for SkNx_cast. Best to specialize SkNx_cast!
-template <typename D, typename S, int N, int... Ix>
-SkNx<N,D> SkNx_cast_fallback(const SkNx<N,S>& src, SkIntSequence<Ix...>) {
- return SkNx<N,D>( (D)src.template kth<Ix>()... );
-}
+// Sk2f t = SkNx_shuffle<2,1>(f); // ~~~> Sk2f(c,b)
+// f = SkNx_shuffle<0,1,1,0>(t); // ~~~> Sk4f(c,b,b,c)
+template <int... Ix, int N, typename T>
+static inline SkNx<sizeof...(Ix), T> SkNx_shuffle(const SkNx<N,T>& src) { return { src[Ix]... }; }
// This is a generic cast between two SkNx with the same number of elements N. E.g.
-// Sk4b bs = ...; // Load 4 bytes.
-// Sk4f fs = SkNx_cast<float>(bs); // Cast each byte to a float.
-// Sk4i is = SkNx_cast<int>(fs); // Cast each float to int.
-// This can be specialized in ../opts/SkNx_foo.h if there's a better platform-specific cast.
-template <typename D, typename S, int N>
-SkNx<N,D> SkNx_cast(const SkNx<N,S>& src) {
- return SkNx_cast_fallback<D,S,N>(src, MakeSkIntSequence<N>());
+// Sk4b bs = ...; // Load 4 bytes.
+// Sk4f fs = SkNx_cast<float>(bs); // Cast each byte to a float.
+// Sk4h hs = SkNx_cast<uint16_t>(fs); // Cast each float to uint16_t.
+template <typename D, typename S>
+static inline SkNx<2,D> SkNx_cast(const SkNx<2,S>& src) {
+ return { (D)src[0], (D)src[1] };
}
-} // namespace
+template <typename D, typename S>
+static inline SkNx<4,D> SkNx_cast(const SkNx<4,S>& src) {
+ return { (D)src[0], (D)src[1], (D)src[2], (D)src[3] };
+}
-typedef SkNx<2, float> Sk2f;
-typedef SkNx<4, float> Sk4f;
-typedef SkNx<8, float> Sk8f;
+template <typename D, typename S>
+static inline SkNx<8,D> SkNx_cast(const SkNx<8,S>& src) {
+ return { (D)src[0], (D)src[1], (D)src[2], (D)src[3],
+ (D)src[4], (D)src[5], (D)src[6], (D)src[7] };
+}
-typedef SkNx<2, double> Sk2d;
-typedef SkNx<4, double> Sk4d;
-typedef SkNx<8, double> Sk8d;
+template <typename D, typename S>
+static inline SkNx<16,D> SkNx_cast(const SkNx<16,S>& src) {
+ return { (D)src[ 0], (D)src[ 1], (D)src[ 2], (D)src[ 3],
+ (D)src[ 4], (D)src[ 5], (D)src[ 6], (D)src[ 7],
+ (D)src[ 8], (D)src[ 9], (D)src[10], (D)src[11],
+ (D)src[12], (D)src[13], (D)src[14], (D)src[15] };
+}
-typedef SkNx<2, SkScalar> Sk2s;
-typedef SkNx<4, SkScalar> Sk4s;
-typedef SkNx<8, SkScalar> Sk8s;
+typedef SkNx<2, float> Sk2f;
+typedef SkNx<4, float> Sk4f;
+typedef SkNx<2, SkScalar> Sk2s;
+typedef SkNx<4, SkScalar> Sk4s;
-typedef SkNx< 4, uint16_t> Sk4h;
-typedef SkNx< 8, uint16_t> Sk8h;
+typedef SkNx<4, uint8_t> Sk4b;
+typedef SkNx<16, uint8_t> Sk16b;
+typedef SkNx<4, uint16_t> Sk4h;
typedef SkNx<16, uint16_t> Sk16h;
-typedef SkNx< 4, uint8_t> Sk4b;
-typedef SkNx< 8, uint8_t> Sk8b;
-typedef SkNx<16, uint8_t> Sk16b;
-
-typedef SkNx<4, int> Sk4i;
-
// Include platform specific specializations if available.
#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
#include "../opts/SkNx_sse.h"
@@ -251,7 +211,4 @@ typedef SkNx<4, int> Sk4i;
}
#endif
-#undef REQUIRE
-
-
#endif//SkNx_DEFINED
diff --git a/src/opts/SkNx_neon.h b/src/opts/SkNx_neon.h
index a4b7cd1a73..2cb8eb348d 100644
--- a/src/opts/SkNx_neon.h
+++ b/src/opts/SkNx_neon.h
@@ -10,8 +10,6 @@
#define SKNX_IS_FAST
-namespace { // See SkNx.h
-
// Well, this is absurd. The shifts require compile-time constant arguments.
#define SHIFT8(op, v, bits) switch(bits) { \
@@ -98,10 +96,12 @@ public:
#endif
}
- template <int k> float kth() const {
+ float operator[](int k) const {
SkASSERT(0 <= k && k < 2);
- return vget_lane_f32(fVec, k&1);
+ union { float32x2_t v; float fs[2]; } pun = {fVec};
+ return pun.fs[k&1];
}
+ template <int k> float kth() const { return (*this)[k]; }
bool allTrue() const {
auto v = vreinterpret_u32_f32(fVec);
@@ -116,33 +116,6 @@ public:
};
template <>
-class SkNx<4, int> {
-public:
- SkNx(const int32x4_t& vec) : fVec(vec) {}
-
- SkNx() {}
- SkNx(int val) : fVec(vdupq_n_s32(val)) {}
- static SkNx Load(const void* ptr) { return vld1q_s32((const int*)ptr); }
- SkNx(int a, int b, int c, int d) { fVec = (int32x4_t) { a, b, c, d }; }
-
- void store(void* ptr) const { vst1q_s32((int*)ptr, fVec); }
-
- SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); }
-
- SkNx operator << (int bits) const { SHIFT32(vshlq_n_s32, fVec, bits); }
- SkNx operator >> (int bits) const { SHIFT32(vshrq_n_s32, fVec, bits); }
-
- template <int k> int kth() const {
- SkASSERT(0 <= k && k < 4);
- return vgetq_lane_s32(fVec, k&3);
- }
-
- int32x4_t fVec;
-};
-
-template <>
class SkNx<4, float> {
public:
SkNx(float32x4_t vec) : fVec(vec) {}
@@ -207,10 +180,12 @@ public:
#endif
}
- template <int k> float kth() const {
+ float operator[](int k) const {
SkASSERT(0 <= k && k < 4);
- return vgetq_lane_f32(fVec, k&3);
+ union { float32x4_t v; float fs[4]; } pun = {fVec};
+ return pun.fs[k&3];
}
+ template <int k> float kth() const { return (*this)[k]; }
bool allTrue() const {
auto v = vreinterpretq_u32_f32(fVec);
@@ -257,10 +232,12 @@ public:
static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); }
- template <int k> uint16_t kth() const {
+ uint16_t operator[](int k) const {
SkASSERT(0 <= k && k < 4);
- return vget_lane_u16(fVec, k&3);
+ union { uint16x4_t v; uint16_t us[4]; } pun = {fVec};
+ return pun.us[k&3];
}
+ template <int k> uint16_t kth() const { return (*this)[k]; }
SkNx thenElse(const SkNx& t, const SkNx& e) const {
return vbsl_u16(fVec, t.fVec, e.fVec);
@@ -294,10 +271,12 @@ public:
static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.fVec); }
- template <int k> uint16_t kth() const {
+ uint16_t operator[](int k) const {
SkASSERT(0 <= k && k < 8);
- return vgetq_lane_u16(fVec, k&7);
+ union { uint16x8_t v; uint16_t us[8]; } pun = {fVec};
+ return pun.us[k&7];
}
+ template <int k> uint16_t kth() const { return (*this)[k]; }
SkNx thenElse(const SkNx& t, const SkNx& e) const {
return vbslq_u16(fVec, t.fVec, e.fVec);
@@ -350,10 +329,12 @@ public:
static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fVec); }
SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); }
- template <int k> uint8_t kth() const {
- SkASSERT(0 <= k && k < 15);
- return vgetq_lane_u8(fVec, k&16);
+ uint8_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 16);
+ union { uint8x16_t v; uint8_t us[16]; } pun = {fVec};
+ return pun.us[k&15];
}
+ template <int k> uint8_t kth() const { return (*this)[k]; }
SkNx thenElse(const SkNx& t, const SkNx& e) const {
return vbslq_u8(fVec, t.fVec, e.fVec);
@@ -366,17 +347,13 @@ public:
#undef SHIFT16
#undef SHIFT8
-template<> inline Sk4i SkNx_cast<int, float, 4>(const Sk4f& src) {
- return vcvtq_s32_f32(src.fVec);
-}
-
-template<> inline Sk4b SkNx_cast<uint8_t, float, 4>(const Sk4f& src) {
+template<> inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
uint32x4_t _32 = vcvtq_u32_f32(src.fVec);
uint16x4_t _16 = vqmovn_u32(_32);
return vqmovn_u16(vcombine_u16(_16, _16));
}
-template<> inline Sk4f SkNx_cast<float, uint8_t, 4>(const Sk4b& src) {
+template<> inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
uint16x8_t _16 = vmovl_u8 (src.fVec) ;
uint32x4_t _32 = vmovl_u16(vget_low_u16(_16));
return vcvtq_f32_u32(_32);
@@ -390,14 +367,12 @@ static inline void Sk4f_ToBytes(uint8_t bytes[16],
(uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0]);
}
-template<> inline Sk4h SkNx_cast<uint16_t, uint8_t, 4>(const Sk4b& src) {
+template<> inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
return vget_low_u16(vmovl_u8(src.fVec));
}
-template<> inline Sk4b SkNx_cast<uint8_t, uint16_t, 4>(const Sk4h& src) {
+template<> inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
return vmovn_u16(vcombine_u16(src.fVec, src.fVec));
}
-} // namespace
-
#endif//SkNx_neon_DEFINED
diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h
index 10db1c438f..69d28976db 100644
--- a/src/opts/SkNx_sse.h
+++ b/src/opts/SkNx_sse.h
@@ -9,12 +9,10 @@
#define SkNx_sse_DEFINED
// This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent.
+// If you do, make sure this is in a static inline function... anywhere else risks violating ODR.
#define SKNX_IS_FAST
-namespace { // See SkNx.h
-
-
template <>
class SkNx<2, float> {
public:
@@ -44,7 +42,7 @@ public:
static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
- SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
+ SkNx sqrt () const { return _mm_sqrt_ps (fVec); }
SkNx rsqrt0() const { return _mm_rsqrt_ps(fVec); }
SkNx rsqrt1() const { return this->rsqrt0(); }
SkNx rsqrt2() const { return this->rsqrt1(); }
@@ -52,11 +50,12 @@ public:
SkNx invert() const { return SkNx(1) / *this; }
SkNx approxInvert() const { return _mm_rcp_ps(fVec); }
- template <int k> float kth() const {
+ float operator[](int k) const {
SkASSERT(0 <= k && k < 2);
union { __m128 v; float fs[4]; } pun = {fVec};
return pun.fs[k&1];
}
+ template <int k> float kth() const { return (*this)[k]; }
bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
@@ -65,90 +64,6 @@ public:
};
template <>
-class SkNx<2, double> {
-public:
- SkNx(const __m128d& vec) : fVec(vec) {}
-
- SkNx() {}
- SkNx(double val) : fVec(_mm_set1_pd(val)) {}
- static SkNx Load(const void* ptr) { return _mm_loadu_pd((const double*)ptr); }
- SkNx(double a, double b) : fVec(_mm_setr_pd(a,b)) {}
-
- void store(void* ptr) const { _mm_storeu_pd((double*)ptr, fVec); }
-
- SkNx operator + (const SkNx& o) const { return _mm_add_pd(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return _mm_sub_pd(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const { return _mm_mul_pd(fVec, o.fVec); }
- SkNx operator / (const SkNx& o) const { return _mm_div_pd(fVec, o.fVec); }
-
- SkNx operator == (const SkNx& o) const { return _mm_cmpeq_pd (fVec, o.fVec); }
- SkNx operator != (const SkNx& o) const { return _mm_cmpneq_pd(fVec, o.fVec); }
- SkNx operator < (const SkNx& o) const { return _mm_cmplt_pd (fVec, o.fVec); }
- SkNx operator > (const SkNx& o) const { return _mm_cmpgt_pd (fVec, o.fVec); }
- SkNx operator <= (const SkNx& o) const { return _mm_cmple_pd (fVec, o.fVec); }
- SkNx operator >= (const SkNx& o) const { return _mm_cmpge_pd (fVec, o.fVec); }
-
- static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_pd(l.fVec, r.fVec); }
- static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_pd(l.fVec, r.fVec); }
-
- SkNx sqrt() const { return _mm_sqrt_pd(fVec); }
-
- template <int k> double kth() const {
- SkASSERT(0 <= k && k < 2);
- union { __m128d v; double fs[2]; } pun = {fVec};
- return pun.fs[k&1];
- }
-
- bool allTrue() const { return 0x3 == _mm_movemask_pd(fVec); }
- bool anyTrue() const { return 0x0 != _mm_movemask_pd(fVec); }
-
- SkNx thenElse(const SkNx& t, const SkNx& e) const {
- return _mm_or_pd(_mm_and_pd (fVec, t.fVec),
- _mm_andnot_pd(fVec, e.fVec));
- }
-
- __m128d fVec;
-};
-
-template <>
-class SkNx<4, int> {
-public:
- SkNx(const __m128i& vec) : fVec(vec) {}
-
- SkNx() {}
- SkNx(int val) : fVec(_mm_set1_epi32(val)) {}
- static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
- SkNx(int a, int b, int c, int d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
-
- void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
-
- SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const {
- __m128i mul20 = _mm_mul_epu32(fVec, o.fVec),
- mul31 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.fVec, 4));
- return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0)),
- _mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0)));
- }
-
- SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
- SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); }
-
- template <int k> int kth() const {
- SkASSERT(0 <= k && k < 4);
- switch (k) {
- case 0: return _mm_cvtsi128_si32(fVec);
- case 1: return _mm_cvtsi128_si32(_mm_srli_si128(fVec, 4));
- case 2: return _mm_cvtsi128_si32(_mm_srli_si128(fVec, 8));
- case 3: return _mm_cvtsi128_si32(_mm_srli_si128(fVec, 12));
- default: SkASSERT(false); return 0;
- }
- }
-
- __m128i fVec;
-};
-
-template <>
class SkNx<4, float> {
public:
SkNx(const __m128& vec) : fVec(vec) {}
@@ -178,7 +93,7 @@ public:
SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
- SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
+ SkNx sqrt () const { return _mm_sqrt_ps (fVec); }
SkNx rsqrt0() const { return _mm_rsqrt_ps(fVec); }
SkNx rsqrt1() const { return this->rsqrt0(); }
SkNx rsqrt2() const { return this->rsqrt1(); }
@@ -186,11 +101,12 @@ public:
SkNx invert() const { return SkNx(1) / *this; }
SkNx approxInvert() const { return _mm_rcp_ps(fVec); }
- template <int k> float kth() const {
+ float operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { __m128 v; float fs[4]; } pun = {fVec};
return pun.fs[k&3];
}
+ template <int k> float kth() const { return (*this)[k]; }
bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(fVec)); }
bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(fVec)); }
@@ -222,10 +138,12 @@ public:
SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
- template <int k> uint16_t kth() const {
+ uint16_t operator[](int k) const {
SkASSERT(0 <= k && k < 4);
- return _mm_extract_epi16(fVec, k);
+ union { __m128i v; uint16_t us[8]; } pun = {fVec};
+ return pun.us[k&3];
}
+ template <int k> uint16_t kth() const { return (*this)[k]; }
__m128i fVec;
};
@@ -264,10 +182,12 @@ public:
_mm_andnot_si128(fVec, e.fVec));
}
- template <int k> uint16_t kth() const {
+ uint16_t operator[](int k) const {
SkASSERT(0 <= k && k < 8);
- return _mm_extract_epi16(fVec, k);
+ union { __m128i v; uint16_t us[8]; } pun = {fVec};
+ return pun.us[k&7];
}
+ template <int k> uint16_t kth() const { return (*this)[k]; }
__m128i fVec;
};
@@ -287,20 +207,6 @@ public:
};
template <>
-class SkNx<8, uint8_t> {
-public:
- SkNx(const __m128i& vec) : fVec(vec) {}
-
- SkNx() {}
- static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
- void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
-
- // TODO as needed
-
- __m128i fVec;
-};
-
-template <>
class SkNx<16, uint8_t> {
public:
SkNx(const __m128i& vec) : fVec(vec) {}
@@ -328,12 +234,12 @@ public:
return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec));
}
- template <int k> uint8_t kth() const {
+ uint8_t operator[](int k) const {
SkASSERT(0 <= k && k < 16);
- // SSE4.1 would just `return _mm_extract_epi8(fVec, k)`. We have to read 16-bits instead.
- int pair = _mm_extract_epi16(fVec, k/2);
- return k % 2 == 0 ? pair : (pair >> 8);
+ union { __m128i v; uint8_t us[16]; } pun = {fVec};
+ return pun.us[k&15];
}
+ template <int k> uint8_t kth() const { return (*this)[k]; }
SkNx thenElse(const SkNx& t, const SkNx& e) const {
return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
@@ -344,11 +250,7 @@ public:
};
-template<> inline Sk4i SkNx_cast<int, float, 4>(const Sk4f& src) {
- return _mm_cvttps_epi32(src.fVec);
-}
-
-template<> inline Sk4h SkNx_cast<uint16_t, float, 4>(const Sk4f& src) {
+template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
auto _32 = _mm_cvttps_epi32(src.fVec);
// Ideally we'd use _mm_packus_epi32 here. But that's SSE4.1+.
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
@@ -362,7 +264,7 @@ template<> inline Sk4h SkNx_cast<uint16_t, float, 4>(const Sk4f& src) {
#endif
}
-template<> inline Sk4b SkNx_cast<uint8_t, float, 4>(const Sk4f& src) {
+template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
auto _32 = _mm_cvttps_epi32(src.fVec);
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
const int _ = ~0;
@@ -373,7 +275,7 @@ template<> inline Sk4b SkNx_cast<uint8_t, float, 4>(const Sk4f& src) {
#endif
}
-template<> inline Sk4f SkNx_cast<float, uint8_t, 4>(const Sk4b& src) {
+template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
const int _ = ~0;
auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_));
@@ -384,7 +286,7 @@ template<> inline Sk4f SkNx_cast<float, uint8_t, 4>(const Sk4b& src) {
return _mm_cvtepi32_ps(_32);
}
-template<> inline Sk4f SkNx_cast<float, uint16_t, 4>(const Sk4h& src) {
+template<> /*static*/ inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
return _mm_cvtepi32_ps(_32);
}
@@ -398,15 +300,12 @@ static inline void Sk4f_ToBytes(uint8_t bytes[16],
_mm_cvttps_epi32(d.fVec))));
}
-template<> inline Sk4h SkNx_cast<uint16_t, uint8_t, 4>(const Sk4b& src) {
+template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
}
-template<> inline Sk4b SkNx_cast<uint8_t, uint16_t, 4>(const Sk4h& src) {
+template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
return _mm_packus_epi16(src.fVec, src.fVec);
}
-
-} // namespace
-
#endif//SkNx_sse_DEFINED
diff --git a/src/opts/SkXfermode_opts.h b/src/opts/SkXfermode_opts.h
index 31817f5f61..b049c6315c 100644
--- a/src/opts/SkXfermode_opts.h
+++ b/src/opts/SkXfermode_opts.h
@@ -122,7 +122,7 @@ static inline Sk4f a_rgb(const Sk4f& a, const Sk4f& rgb) {
return a * Sk4f(0,0,0,1) + rgb * Sk4f(1,1,1,0);
}
static inline Sk4f alphas(const Sk4f& f) {
- return SkNx_dup<SK_A32_SHIFT/8>(f);
+ return f.kth<SK_A32_SHIFT/8>();
}
XFERMODE(ColorDodge) {
diff --git a/tests/SkNxTest.cpp b/tests/SkNxTest.cpp
index 49e920f4b3..13e0a9f61f 100644
--- a/tests/SkNxTest.cpp
+++ b/tests/SkNxTest.cpp
@@ -146,12 +146,12 @@ DEF_TEST(SkNi_min_lt, r) {
for (int i = 0; i < (1<<16); i++) {
uint16_t a = rand.nextU() >> 16,
b = rand.nextU() >> 16;
- REPORTER_ASSERT(r, Sk8h::Min(Sk8h(a), Sk8h(b)).kth<0>() == SkTMin(a, b));
+ REPORTER_ASSERT(r, Sk16h::Min(Sk16h(a), Sk16h(b)).kth<0>() == SkTMin(a, b));
}
#else
for (int a = 0; a < (1<<16); a++) {
for (int b = 0; b < (1<<16); b++) {
- REPORTER_ASSERT(r, Sk8h::Min(Sk8h(a), Sk8h(b)).kth<0>() == SkTMin(a, b));
+ REPORTER_ASSERT(r, Sk16h::Min(Sk16h(a), Sk16h(b)).kth<0>() == SkTMin(a, b));
}}
#endif
}
@@ -207,16 +207,6 @@ DEF_TEST(Sk4px_widening, r) {
REPORTER_ASSERT(r, 0 == memcmp(&wideLoHi, &wideLoHiAlt, sizeof(wideLoHi)));
}
-DEF_TEST(SkNx_cast, r) {
- Sk4f fs(-1.7f, -1.4f, 0.5f, 1.9f);
- Sk4i is = SkNx_cast<int>(fs);
-
- REPORTER_ASSERT(r, is.kth<0>() == -1);
- REPORTER_ASSERT(r, is.kth<1>() == -1);
- REPORTER_ASSERT(r, is.kth<2>() == 0);
- REPORTER_ASSERT(r, is.kth<3>() == 1);
-}
-
DEF_TEST(SkNx_abs, r) {
auto fs = Sk4f(0.0f, -0.0f, 2.0f, -4.0f).abs();
REPORTER_ASSERT(r, fs.kth<0>() == 0.0f);
@@ -225,20 +215,27 @@ DEF_TEST(SkNx_abs, r) {
REPORTER_ASSERT(r, fs.kth<3>() == 4.0f);
}
-#include "SkRandom.h"
+DEF_TEST(SkNx_shuffle, r) {
+ Sk4f f4(0,10,20,30);
-static void dump(const Sk4f& f4, const Sk4h& h4) {
- SkDebugf("%g %g %g %g --> %d %d %d %d\n",
- f4.kth<0>(), f4.kth<1>(), f4.kth<2>(), f4.kth<3>(),
- h4.kth<0>(), h4.kth<1>(), h4.kth<2>(), h4.kth<3>());
+ Sk2f f2 = SkNx_shuffle<2,1>(f4);
+ REPORTER_ASSERT(r, f2[0] == 20);
+ REPORTER_ASSERT(r, f2[1] == 10);
+
+ f4 = SkNx_shuffle<0,1,1,0>(f2);
+ REPORTER_ASSERT(r, f4[0] == 20);
+ REPORTER_ASSERT(r, f4[1] == 10);
+ REPORTER_ASSERT(r, f4[2] == 10);
+ REPORTER_ASSERT(r, f4[3] == 20);
}
+#include "SkRandom.h"
+
DEF_TEST(SkNx_u16_float, r) {
{
// u16 --> float
auto h4 = Sk4h(15, 17, 257, 65535);
auto f4 = SkNx_cast<float>(h4);
- dump(f4, h4);
REPORTER_ASSERT(r, f4.kth<0>() == 15.0f);
REPORTER_ASSERT(r, f4.kth<1>() == 17.0f);
REPORTER_ASSERT(r, f4.kth<2>() == 257.0f);
@@ -248,7 +245,6 @@ DEF_TEST(SkNx_u16_float, r) {
// float -> u16
auto f4 = Sk4f(15, 17, 257, 65535);
auto h4 = SkNx_cast<uint16_t>(f4);
- dump(f4, h4);
REPORTER_ASSERT(r, h4.kth<0>() == 15);
REPORTER_ASSERT(r, h4.kth<1>() == 17);
REPORTER_ASSERT(r, h4.kth<2>() == 257);
@@ -258,7 +254,7 @@ DEF_TEST(SkNx_u16_float, r) {
// starting with any u16 value, we should be able to have a perfect round-trip in/out of floats
//
SkRandom rand;
- for (int i = 0; i < 0; ++i) {
+ for (int i = 0; i < 10000; ++i) {
const uint16_t s16[4] {
(uint16_t)rand.nextU16(), (uint16_t)rand.nextU16(),
(uint16_t)rand.nextU16(), (uint16_t)rand.nextU16(),