aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2015-03-30 10:50:27 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2015-03-30 10:50:27 -0700
commitc9adb05b64fa0bfadf9d1a782afcda470da68c9e (patch)
tree6413cc149b70ae36181e9f0789246b9db24447f0 /src
parent23ac62c83a49d675a38f1c20462b5537f3c8af01 (diff)
Refactor Sk2x<T> + Sk4x<T> into SkNf<N,T> and SkNi<N,T>
The primary feature this delivers is SkNf and SkNd for arbitrary power-of-two N. Non-specialized types or types larger than 128 bits should now Just Work (and we can drop in a specialization to make them faster). Sk4s is now just a typedef for SkNf<4, SkScalar>; Sk4d is SkNf<4, double>, Sk2f SkNf<2, float>, etc. This also makes implementing new specializations easier and more encapsulated. We're now using template specialization, which means the specialized versions don't have to leak out so much from SkNx_sse.h and SkNx_neon.h. This design leaves us room to grow up, e.g to SkNf<8, SkScalar> == Sk8s, and to grown down too, to things like SkNi<8, uint16_t> == Sk8h. To simplify things, I've stripped away most APIs (swizzles, casts, reinterpret_casts) that no one's using yet. I will happily add them back if they seem useful. You shouldn't feel bad about using any of the typedef Sk4s, Sk4f, Sk4d, Sk2s, Sk2f, Sk2d, Sk4i, etc. Here's how you should feel: - Sk4f, Sk4s, Sk2d: feel awesome - Sk2f, Sk2s, Sk4d: feel pretty good No public API changes. TBR=reed@google.com BUG=skia:3592 Review URL: https://codereview.chromium.org/1048593002
Diffstat (limited to 'src')
-rw-r--r--src/core/Sk2x.h96
-rw-r--r--src/core/Sk4x.h123
-rw-r--r--src/core/SkGeometry.cpp6
-rw-r--r--src/core/SkMatrix.cpp34
-rw-r--r--src/core/SkNx.h174
-rw-r--r--src/core/SkPMFloat.h68
-rw-r--r--src/core/SkRect.cpp21
-rw-r--r--src/effects/SkColorMatrixFilter.cpp30
-rw-r--r--src/opts/Sk2x_neon.h158
-rw-r--r--src/opts/Sk2x_none.h72
-rw-r--r--src/opts/Sk2x_sse.h82
-rw-r--r--src/opts/Sk4x_neon.h166
-rw-r--r--src/opts/Sk4x_none.h120
-rw-r--r--src/opts/Sk4x_sse.h177
-rw-r--r--src/opts/SkNx_neon.h257
-rw-r--r--src/opts/SkNx_sse.h190
-rw-r--r--src/opts/SkPMFloat_SSE2.h17
-rw-r--r--src/opts/SkPMFloat_SSSE3.h19
-rw-r--r--src/opts/SkPMFloat_neon.h11
-rw-r--r--src/opts/SkPMFloat_none.h5
20 files changed, 709 insertions, 1117 deletions
diff --git a/src/core/Sk2x.h b/src/core/Sk2x.h
deleted file mode 100644
index f49442563a..0000000000
--- a/src/core/Sk2x.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-#ifndef Sk2x_DEFINED
-#define Sk2x_DEFINED
-
-#include "SkTypes.h"
-#include "SkNx.h"
-
-#define SK2X_PREAMBLE 1
- #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 && !defined(SKNX_NO_SIMD)
- #include "../opts/Sk2x_sse.h"
- #elif defined(SK_ARM_HAS_NEON) && !defined(SKNX_NO_SIMD)
- #include "../opts/Sk2x_neon.h"
- #else
- #include "../opts/Sk2x_none.h"
- #endif
-#undef SK2X_PREAMBLE
-
-template <typename T> class Sk2x;
-typedef Sk2x<float> Sk2f;
-typedef Sk2x<double> Sk2d;
-
-#if SK_SCALAR_IS_FLOAT
- typedef Sk2f Sk2s;
-#elif SK_SCALAR_IS_DOUBLE
- typedef Sk2d Sk2s;
-#endif
-
-// This API is meant to be manageably small, not comprehensive.
-// Please talk to mtklein if you find yourself wanting more.
-template <typename T> class Sk2x {
-public:
- Sk2x(); // Uninitialized; use Sk2x(0) for zero.
- explicit Sk2x(T); // Same as Sk2x(T,T);
- Sk2x(T, T);
-
- Sk2x(const Sk2x& o) { *this = o; }
- Sk2x& operator=(const Sk2x&);
-
- // These assume no particular alignment.
- static Sk2x Load(const T[2]);
- void store(T[2]) const;
-
- Sk2x add(const Sk2x&) const;
- Sk2x subtract(const Sk2x&) const;
- Sk2x multiply(const Sk2x&) const;
- Sk2x divide(const Sk2x&) const;
-
- Sk2x operator +(const Sk2x& o) const { return this->add(o); }
- Sk2x operator -(const Sk2x& o) const { return this->subtract(o); }
- Sk2x operator *(const Sk2x& o) const { return this->multiply(o); }
- Sk2x operator /(const Sk2x& o) const { return this->divide(o); }
-
- Sk2x& operator +=(const Sk2x& o) { return (*this = *this + o); }
- Sk2x& operator -=(const Sk2x& o) { return (*this = *this - o); }
- Sk2x& operator *=(const Sk2x& o) { return (*this = *this * o); }
- Sk2x& operator /=(const Sk2x& o) { return (*this = *this / o); }
-
- Sk2x negate() const { return Sk2x((T)0) - *this; }
- Sk2x operator -() const { return this->negate(); }
-
- Sk2x rsqrt() const; // Approximate 1/this->sqrt().
- Sk2x sqrt() const; // this->multiply(this->rsqrt()) may be faster, but less precise.
-
- Sk2x invert() const; // 1/this.
- Sk2x approxInvert() const; // Approximate 1/this, usually faster but less precise.
-
- static Sk2x Min(const Sk2x&, const Sk2x&);
- static Sk2x Max(const Sk2x&, const Sk2x&);
-
-private:
-#define SK2X_PRIVATE 1
- #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 && !defined(SKNX_NO_SIMD)
- #include "../opts/Sk2x_sse.h"
- #elif defined(SK_ARM_HAS_NEON) && !defined(SKNX_NO_SIMD)
- #include "../opts/Sk2x_neon.h"
- #else
- #include "../opts/Sk2x_none.h"
- #endif
-#undef SK2X_PRIVATE
-};
-
-#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 && !defined(SKNX_NO_SIMD)
- #include "../opts/Sk2x_sse.h"
-#elif defined(SK_ARM_HAS_NEON) && !defined(SKNX_NO_SIMD)
- #include "../opts/Sk2x_neon.h"
-#else
- #include "../opts/Sk2x_none.h"
-#endif
-
-#endif//Sk2x_DEFINED
diff --git a/src/core/Sk4x.h b/src/core/Sk4x.h
deleted file mode 100644
index c72583abcd..0000000000
--- a/src/core/Sk4x.h
+++ /dev/null
@@ -1,123 +0,0 @@
-#ifndef Sk4x_DEFINED
-#define Sk4x_DEFINED
-
-#include "SkTypes.h"
-#include "SkNx.h"
-
-#define SK4X_PREAMBLE 1
- #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 && !defined(SKNX_NO_SIMD)
- #include "../opts/Sk4x_sse.h"
- #elif defined(SK_ARM_HAS_NEON) && !defined(SKNX_NO_SIMD)
- #include "../opts/Sk4x_neon.h"
- #else
- #include "../opts/Sk4x_none.h"
- #endif
-#undef SK4X_PREAMBLE
-
-template <typename T> class Sk4x;
-typedef Sk4x<float> Sk4f;
-typedef Sk4x<int32_t> Sk4i;
-
-// Some Sk4x methods are implemented only for Sk4f or Sk4i.
-// They might be unavailable, really slow, or just a bad idea.
-// Talk to mtklein if you find yourself unable to link and
-// really need one of those methods.
-
-template <typename T> class Sk4x {
-public:
- Sk4x(); // Uninitialized; use Sk4x(0) for zero.
- explicit Sk4x(T); // Same as Sk4x(T,T,T,T);
- Sk4x(T, T, T, T);
-
- Sk4x(const Sk4x&);
- Sk4x& operator=(const Sk4x&);
-
- static Sk4x Load (const T[4]);
- static Sk4x LoadAligned(const T[4]);
-
- void store (T[4]) const;
- void storeAligned(T[4]) const;
-
- template <typename Dst> Dst reinterpret() const;
- template <typename Dst> Dst cast() const;
-
- bool allTrue() const;
- bool anyTrue() const;
-
- Sk4x bitNot() const;
- Sk4x bitAnd(const Sk4x&) const;
- Sk4x bitOr(const Sk4x&) const;
- // TODO: Sk4x bitAndNot(const Sk4x&) const; is efficient in SSE.
- Sk4x add(const Sk4x&) const;
- Sk4x subtract(const Sk4x&) const;
- Sk4x multiply(const Sk4x&) const;
- Sk4x divide(const Sk4x&) const;
-
- // TODO: why doesn't MSVC like operator~() ?
- //Sk4x operator ~() const { return this->bitNot(); }
- Sk4x operator &(const Sk4x& o) const { return this->bitAnd(o); }
- Sk4x operator |(const Sk4x& o) const { return this->bitOr (o); }
- Sk4x operator +(const Sk4x& o) const { return this->add(o); }
- Sk4x operator -(const Sk4x& o) const { return this->subtract(o); }
- Sk4x operator *(const Sk4x& o) const { return this->multiply(o); }
- Sk4x operator /(const Sk4x& o) const { return this->divide(o); }
-
- Sk4x& operator &=(const Sk4x& o) { return (*this = *this & o); }
- Sk4x& operator |=(const Sk4x& o) { return (*this = *this | o); }
- Sk4x& operator +=(const Sk4x& o) { return (*this = *this + o); }
- Sk4x& operator -=(const Sk4x& o) { return (*this = *this - o); }
- Sk4x& operator *=(const Sk4x& o) { return (*this = *this * o); }
- Sk4x& operator /=(const Sk4x& o) { return (*this = *this / o); }
-
- Sk4x negate() const { return Sk4x((T)0) - *this; }
- Sk4x operator -() const { return this->negate(); }
-
- Sk4x rsqrt() const; // Approximate reciprocal sqrt().
- Sk4x sqrt() const; // this->multiply(this->rsqrt()) may be faster, but less precise.
-
- Sk4i equal(const Sk4x&) const;
- Sk4i notEqual(const Sk4x&) const;
- Sk4i lessThan(const Sk4x&) const;
- Sk4i greaterThan(const Sk4x&) const;
- Sk4i lessThanEqual(const Sk4x&) const;
- Sk4i greaterThanEqual(const Sk4x&) const;
-
- Sk4i operator ==(const Sk4x& o) const { return this->equal(o); }
- Sk4i operator !=(const Sk4x& o) const { return this->notEqual(o); }
- Sk4i operator <(const Sk4x& o) const { return this->lessThan(o); }
- Sk4i operator >(const Sk4x& o) const { return this->greaterThan(o); }
- Sk4i operator <=(const Sk4x& o) const { return this->lessThanEqual(o); }
- Sk4i operator >=(const Sk4x& o) const { return this->greaterThanEqual(o); }
-
- static Sk4x Min(const Sk4x& a, const Sk4x& b);
- static Sk4x Max(const Sk4x& a, const Sk4x& b);
-
- // Swizzles, where this == abcd.
- Sk4x aacc() const;
- Sk4x bbdd() const;
- Sk4x badc() const;
-
-private:
- // It's handy to have Sk4f and Sk4i be mutual friends.
- template <typename S> friend class Sk4x;
-
-#define SK4X_PRIVATE 1
- #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 && !defined(SKNX_NO_SIMD)
- #include "../opts/Sk4x_sse.h"
- #elif defined(SK_ARM_HAS_NEON) && !defined(SKNX_NO_SIMD)
- #include "../opts/Sk4x_neon.h"
- #else
- #include "../opts/Sk4x_none.h"
- #endif
-#undef SK4X_PRIVATE
-};
-
-#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2 && !defined(SKNX_NO_SIMD)
- #include "../opts/Sk4x_sse.h"
-#elif defined(SK_ARM_HAS_NEON) && !defined(SKNX_NO_SIMD)
- #include "../opts/Sk4x_neon.h"
-#else
- #include "../opts/Sk4x_none.h"
-#endif
-
-#endif//Sk4x_DEFINED
diff --git a/src/core/SkGeometry.cpp b/src/core/SkGeometry.cpp
index c5abe2b9d1..50af22de37 100644
--- a/src/core/SkGeometry.cpp
+++ b/src/core/SkGeometry.cpp
@@ -7,7 +7,7 @@
#include "SkGeometry.h"
#include "SkMatrix.h"
-#include "Sk2x.h"
+#include "SkNx.h"
static Sk2s from_point(const SkPoint& point) {
return Sk2s::Load(&point.fX);
@@ -190,7 +190,7 @@ void SkChopQuadAt(const SkPoint src[3], SkPoint dst[5], SkScalar t) {
Sk2s p1 = from_point(src[1]);
Sk2s p2 = from_point(src[2]);
Sk2s tt(t);
-
+
Sk2s p01 = interp(p0, p1, tt);
Sk2s p12 = interp(p1, p2, tt);
@@ -442,7 +442,7 @@ void SkChopCubicAt(const SkPoint src[4], SkPoint dst[7], SkScalar t) {
Sk2s abc = interp(ab, bc, tt);
Sk2s bcd = interp(bc, cd, tt);
Sk2s abcd = interp(abc, bcd, tt);
-
+
dst[0] = src[0];
dst[1] = to_point(ab);
dst[2] = to_point(abc);
diff --git a/src/core/SkMatrix.cpp b/src/core/SkMatrix.cpp
index 0f56b724f5..0ac026bc9f 100644
--- a/src/core/SkMatrix.cpp
+++ b/src/core/SkMatrix.cpp
@@ -8,7 +8,7 @@
#include "SkMatrix.h"
#include "SkFloatBits.h"
#include "SkString.h"
-#include "Sk4x.h"
+#include "SkNx.h"
#include <stddef.h>
@@ -878,7 +878,7 @@ void SkMatrix::Identity_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[
void SkMatrix::Trans_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[], int count) {
SkASSERT(m.getType() <= kTranslate_Mask);
-
+
if (count > 0) {
SkScalar tx = m.getTranslateX();
SkScalar ty = m.getTranslateY();
@@ -888,17 +888,17 @@ void SkMatrix::Trans_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[],
src += 1;
dst += 1;
}
- Sk4f trans4(tx, ty, tx, ty);
+ Sk4s trans4(tx, ty, tx, ty);
count >>= 1;
if (count & 1) {
- (Sk4f::Load(&src->fX) + trans4).store(&dst->fX);
+ (Sk4s::Load(&src->fX) + trans4).store(&dst->fX);
src += 2;
dst += 2;
}
count >>= 1;
for (int i = 0; i < count; ++i) {
- (Sk4f::Load(&src[0].fX) + trans4).store(&dst[0].fX);
- (Sk4f::Load(&src[2].fX) + trans4).store(&dst[2].fX);
+ (Sk4s::Load(&src[0].fX) + trans4).store(&dst[0].fX);
+ (Sk4s::Load(&src[2].fX) + trans4).store(&dst[2].fX);
src += 4;
dst += 4;
}
@@ -907,7 +907,7 @@ void SkMatrix::Trans_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[],
void SkMatrix::Scale_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[], int count) {
SkASSERT(m.getType() <= (kScale_Mask | kTranslate_Mask));
-
+
if (count > 0) {
SkScalar tx = m.getTranslateX();
SkScalar ty = m.getTranslateY();
@@ -919,18 +919,18 @@ void SkMatrix::Scale_pts(const SkMatrix& m, SkPoint dst[], const SkPoint src[],
src += 1;
dst += 1;
}
- Sk4f trans4(tx, ty, tx, ty);
- Sk4f scale4(sx, sy, sx, sy);
+ Sk4s trans4(tx, ty, tx, ty);
+ Sk4s scale4(sx, sy, sx, sy);
count >>= 1;
if (count & 1) {
- (Sk4f::Load(&src->fX) * scale4 + trans4).store(&dst->fX);
+ (Sk4s::Load(&src->fX) * scale4 + trans4).store(&dst->fX);
src += 2;
dst += 2;
}
count >>= 1;
for (int i = 0; i < count; ++i) {
- (Sk4f::Load(&src[0].fX) * scale4 + trans4).store(&dst[0].fX);
- (Sk4f::Load(&src[2].fX) * scale4 + trans4).store(&dst[2].fX);
+ (Sk4s::Load(&src[0].fX) * scale4 + trans4).store(&dst[0].fX);
+ (Sk4s::Load(&src[2].fX) * scale4 + trans4).store(&dst[2].fX);
src += 4;
dst += 4;
}
@@ -1028,13 +1028,13 @@ void SkMatrix::Affine_vpts(const SkMatrix& m, SkPoint dst[], const SkPoint src[]
src += 1;
dst += 1;
}
- Sk4f trans4(tx, ty, tx, ty);
- Sk4f scale4(sx, sy, sx, sy);
- Sk4f skew4(kx, ky, kx, ky); // applied to swizzle of src4
+ Sk4s trans4(tx, ty, tx, ty);
+ Sk4s scale4(sx, sy, sx, sy);
+ Sk4s skew4(kx, ky, kx, ky); // applied to swizzle of src4
count >>= 1;
for (int i = 0; i < count; ++i) {
- Sk4f src4 = Sk4f::Load(&src->fX);
- Sk4f swz4(src[0].fY, src[0].fX, src[1].fY, src[1].fX); // need ABCD -> BADC
+ Sk4s src4 = Sk4s::Load(&src->fX);
+ Sk4s swz4(src[0].fY, src[0].fX, src[1].fY, src[1].fX); // need ABCD -> BADC
(src4 * scale4 + swz4 * skew4 + trans4).store(&dst->fX);
src += 2;
dst += 2;
diff --git a/src/core/SkNx.h b/src/core/SkNx.h
index 5926f36e85..e0a4aa56ec 100644
--- a/src/core/SkNx.h
+++ b/src/core/SkNx.h
@@ -8,6 +8,178 @@
#ifndef SkNx_DEFINED
#define SkNx_DEFINED
-#define SKNX_NO_SIMDx // Remove the x to disable SIMD in Sk2x and Sk4x.
+
+#define SKNX_NO_SIMDx // Remove the x to disable SIMD for all SkNx types.
+
+
+#include "SkScalar.h"
+#include "SkTypes.h"
+#include <math.h>
+#define REQUIRE(x) static_assert(x, #x)
+
+// The default implementations of SkNi<N,T> and SkNf<N,T> just fall back on a pair of size N/2.
+template <int N, typename T>
+class SkNi {
+public:
+ // For now SkNi is a _very_ minimal sketch just to support comparison operators on SkNf.
+ SkNi() {}
+ SkNi(const SkNi<N/2, T>& lo, const SkNi<N/2, T>& hi) : fLo(lo), fHi(hi) {}
+ bool allTrue() const { return fLo.allTrue() && fHi.allTrue(); }
+ bool anyTrue() const { return fLo.anyTrue() || fHi.anyTrue(); }
+
+private:
+ REQUIRE(0 == (N & (N-1)));
+ SkNi<N/2, T> fLo, fHi;
+};
+
+template <int N, typename T>
+class SkNf {
+ static SkNi<N,int32_t> ToNi(float);
+ static SkNi<N,int64_t> ToNi(double);
+ typedef decltype(ToNi(T())) Ni;
+public:
+ SkNf() {}
+ explicit SkNf(T val) : fLo(val), fHi(val) {}
+ static SkNf Load(const T vals[N]) {
+ return SkNf(SkNf<N/2,T>::Load(vals), SkNf<N/2,T>::Load(vals+N/2));
+ }
+
+ SkNf(T a, T b) : fLo(a), fHi(b) { REQUIRE(N==2); }
+ SkNf(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) { REQUIRE(N==4); }
+ SkNf(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) { REQUIRE(N==8); }
+
+ void store(T vals[N]) const {
+ fLo.store(vals);
+ fHi.store(vals+N/2);
+ }
+
+ SkNf operator + (const SkNf& o) const { return SkNf(fLo + o.fLo, fHi + o.fHi); }
+ SkNf operator - (const SkNf& o) const { return SkNf(fLo - o.fLo, fHi - o.fHi); }
+ SkNf operator * (const SkNf& o) const { return SkNf(fLo * o.fLo, fHi * o.fHi); }
+ SkNf operator / (const SkNf& o) const { return SkNf(fLo / o.fLo, fHi / o.fHi); }
+
+ Ni operator == (const SkNf& o) const { return Ni(fLo == o.fLo, fHi == o.fHi); }
+ Ni operator != (const SkNf& o) const { return Ni(fLo != o.fLo, fHi != o.fHi); }
+ Ni operator < (const SkNf& o) const { return Ni(fLo < o.fLo, fHi < o.fHi); }
+ Ni operator > (const SkNf& o) const { return Ni(fLo > o.fLo, fHi > o.fHi); }
+ Ni operator <= (const SkNf& o) const { return Ni(fLo <= o.fLo, fHi <= o.fHi); }
+ Ni operator >= (const SkNf& o) const { return Ni(fLo >= o.fLo, fHi >= o.fHi); }
+
+ static SkNf Min(const SkNf& l, const SkNf& r) {
+ return SkNf(SkNf<N/2,T>::Min(l.fLo, r.fLo), SkNf<N/2,T>::Min(l.fHi, r.fHi));
+ }
+ static SkNf Max(const SkNf& l, const SkNf& r) {
+ return SkNf(SkNf<N/2,T>::Max(l.fLo, r.fLo), SkNf<N/2,T>::Max(l.fHi, r.fHi));
+ }
+
+ SkNf sqrt() const { return SkNf(fLo. sqrt(), fHi. sqrt()); }
+ SkNf rsqrt() const { return SkNf(fLo.rsqrt(), fHi.rsqrt()); }
+
+ SkNf invert() const { return SkNf(fLo. invert(), fHi. invert()); }
+ SkNf approxInvert() const { return SkNf(fLo.approxInvert(), fHi.approxInvert()); }
+
+ T operator[] (int k) const {
+ SkASSERT(0 <= k && k < N);
+ return k < N/2 ? fLo[k] : fHi[k-N/2];
+ }
+
+private:
+ REQUIRE(0 == (N & (N-1)));
+ SkNf(const SkNf<N/2, T>& lo, const SkNf<N/2, T>& hi) : fLo(lo), fHi(hi) {}
+
+ SkNf<N/2, T> fLo, fHi;
+};
+
+
+// Bottom out the default implementation with scalars when nothing's been specialized.
+template <typename T>
+class SkNi<1,T> {
+public:
+ SkNi() {}
+ explicit SkNi(T val) : fVal(val) {}
+ bool allTrue() const { return (bool)fVal; }
+ bool anyTrue() const { return (bool)fVal; }
+
+private:
+ T fVal;
+};
+
+template <typename T>
+class SkNf<1,T> {
+ static SkNi<1,int32_t> ToNi(float);
+ static SkNi<1,int64_t> ToNi(double);
+ typedef decltype(ToNi(T())) Ni;
+public:
+ SkNf() {}
+ explicit SkNf(T val) : fVal(val) {}
+ static SkNf Load(const T vals[1]) { return SkNf(vals[0]); }
+
+ void store(T vals[1]) const { vals[0] = fVal; }
+
+ SkNf operator + (const SkNf& o) const { return SkNf(fVal + o.fVal); }
+ SkNf operator - (const SkNf& o) const { return SkNf(fVal - o.fVal); }
+ SkNf operator * (const SkNf& o) const { return SkNf(fVal * o.fVal); }
+ SkNf operator / (const SkNf& o) const { return SkNf(fVal / o.fVal); }
+
+ Ni operator == (const SkNf& o) const { return Ni(fVal == o.fVal); }
+ Ni operator != (const SkNf& o) const { return Ni(fVal != o.fVal); }
+ Ni operator < (const SkNf& o) const { return Ni(fVal < o.fVal); }
+ Ni operator > (const SkNf& o) const { return Ni(fVal > o.fVal); }
+ Ni operator <= (const SkNf& o) const { return Ni(fVal <= o.fVal); }
+ Ni operator >= (const SkNf& o) const { return Ni(fVal >= o.fVal); }
+
+ static SkNf Min(const SkNf& l, const SkNf& r) { return SkNf(SkTMin(l.fVal, r.fVal)); }
+ static SkNf Max(const SkNf& l, const SkNf& r) { return SkNf(SkTMax(l.fVal, r.fVal)); }
+
+ SkNf sqrt() const { return SkNf(Sqrt(fVal)); }
+ SkNf rsqrt() const { return SkNf((T)1 / Sqrt(fVal)); }
+
+ SkNf invert() const { return SkNf((T)1 / fVal); }
+ SkNf approxInvert() const { return this->invert(); }
+
+ T operator[] (int SkDEBUGCODE(k)) const {
+ SkASSERT(k == 0);
+ return fVal;
+ }
+
+private:
+ // We do double sqrts natively, or via floats for any other type.
+ template <typename U>
+ static U Sqrt(U val) { return (U) ::sqrtf((float)val); }
+ static double Sqrt(double val) { return ::sqrt ( val); }
+
+ T fVal;
+};
+
+
+// Generic syntax sugar that should work equally well for all SkNi and SkNf implementations.
+template <typename SkNx> SkNx operator - (const SkNx& l) { return SkNx((decltype(l[0]))0) - l; }
+
+template <typename SkNx> SkNx& operator += (SkNx& l, const SkNx& r) { return (l = l + r); }
+template <typename SkNx> SkNx& operator -= (SkNx& l, const SkNx& r) { return (l = l - r); }
+template <typename SkNx> SkNx& operator *= (SkNx& l, const SkNx& r) { return (l = l * r); }
+template <typename SkNx> SkNx& operator /= (SkNx& l, const SkNx& r) { return (l = l / r); }
+
+
+// Include platform specific specializations if available.
+#ifndef SKNX_NO_SIMD
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include "../opts/SkNx_sse.h"
+ #elif defined(SK_ARM_HAS_NEON)
+ #include "../opts/SkNx_neon.h"
+ #endif
+#endif
+
+#undef REQUIRE
+
+typedef SkNf<2, float> Sk2f;
+typedef SkNf<2, double> Sk2d;
+typedef SkNf<2, SkScalar> Sk2s;
+
+typedef SkNf<4, float> Sk4f;
+typedef SkNf<4, double> Sk4d;
+typedef SkNf<4, SkScalar> Sk4s;
+
+typedef SkNi<4, int32_t> Sk4i;
#endif//SkNx_DEFINED
diff --git a/src/core/SkPMFloat.h b/src/core/SkPMFloat.h
index 66262a8916..27f6f78f26 100644
--- a/src/core/SkPMFloat.h
+++ b/src/core/SkPMFloat.h
@@ -11,13 +11,7 @@
#include "SkTypes.h"
#include "SkColor.h"
#include "SkColorPriv.h"
-#include "Sk4x.h"
-
-#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
- #include <immintrin.h>
-#elif defined(SK_ARM_HAS_NEON)
- #include <arm_neon.h>
-#endif
+#include "SkNx.h"
// A pre-multiplied color storing each component in the same order as SkPMColor,
// but as a float in the range [0, 255].
@@ -29,29 +23,25 @@ public:
// May be more efficient than one at a time. No special alignment assumed for SkPMColors.
static void From4PMColors(const SkPMColor[4], SkPMFloat*, SkPMFloat*, SkPMFloat*, SkPMFloat*);
- explicit SkPMFloat(SkPMColor);
- SkPMFloat(float a, float r, float g, float b) {
- // TODO: faster when specialized?
- fColor[SK_A32_SHIFT / 8] = a;
- fColor[SK_R32_SHIFT / 8] = r;
- fColor[SK_G32_SHIFT / 8] = g;
- fColor[SK_B32_SHIFT / 8] = b;
- }
-
// Uninitialized.
SkPMFloat() {}
+ explicit SkPMFloat(SkPMColor);
+ SkPMFloat(float a, float r, float g, float b)
+ #ifdef SK_PMCOLOR_IS_RGBA
+ : fColors(r,g,b,a) {}
+ #else
+ : fColors(b,g,r,a) {}
+ #endif
- SkPMFloat(const SkPMFloat& that) { *this = that; }
- SkPMFloat& operator=(const SkPMFloat& that);
- // Freely autoconvert between SkPMFloat and Sk4f. They're always byte-for-byte identical.
- /*implicit*/ SkPMFloat(const Sk4f& fs) { fs.storeAligned(fColor); }
- /*implicit*/ operator Sk4f() const { return Sk4f::LoadAligned(fColor); }
+ // Freely autoconvert between SkPMFloat and Sk4s.
+ /*implicit*/ SkPMFloat(const Sk4s& fs) { fColors = fs; }
+ /*implicit*/ operator Sk4s() const { return fColors; }
- float a() const { return fColor[SK_A32_SHIFT / 8]; }
- float r() const { return fColor[SK_R32_SHIFT / 8]; }
- float g() const { return fColor[SK_G32_SHIFT / 8]; }
- float b() const { return fColor[SK_B32_SHIFT / 8]; }
+ float a() const { return fColors[SK_A32_SHIFT / 8]; }
+ float r() const { return fColors[SK_R32_SHIFT / 8]; }
+ float g() const { return fColors[SK_G32_SHIFT / 8]; }
+ float b() const { return fColors[SK_B32_SHIFT / 8]; }
// get() and clamped() round component values to the nearest integer.
SkPMColor get() const; // May SkASSERT(this->isValid()). Some implementations may clamp.
@@ -75,24 +65,22 @@ public:
}
private:
- union {
- float fColor[4];
-#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
- __m128 fColors;
-#elif defined(SK_ARM_HAS_NEON)
- float32x4_t fColors;
-#endif
- };
+ Sk4s fColors;
};
-#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
- #include "../opts/SkPMFloat_SSSE3.h"
-#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
- #include "../opts/SkPMFloat_SSE2.h"
-#elif defined(SK_ARM_HAS_NEON)
- #include "../opts/SkPMFloat_neon.h"
-#else
+#ifdef SKNX_NO_SIMD
+ // Platform implementations of SkPMFloat assume Sk4s uses SSE or NEON. _none is generic.
#include "../opts/SkPMFloat_none.h"
+#else
+ #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
+ #include "../opts/SkPMFloat_SSSE3.h"
+ #elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ #include "../opts/SkPMFloat_SSE2.h"
+ #elif defined(SK_ARM_HAS_NEON)
+ #include "../opts/SkPMFloat_neon.h"
+ #else
+ #include "../opts/SkPMFloat_none.h"
+ #endif
#endif
#endif//SkPM_DEFINED
diff --git a/src/core/SkRect.cpp b/src/core/SkRect.cpp
index aa2103c638..14d862430a 100644
--- a/src/core/SkRect.cpp
+++ b/src/core/SkRect.cpp
@@ -45,11 +45,10 @@ void SkRect::toQuad(SkPoint quad[4]) const {
quad[3].set(fLeft, fBottom);
}
-//#include "Sk2x.h"
-#include "Sk4x.h"
+#include "SkNx.h"
-static inline bool is_finite(const Sk4f& value) {
- Sk4i finite = value * Sk4f(0) == Sk4f(0);
+static inline bool is_finite(const Sk4s& value) {
+ Sk4i finite = value * Sk4s(0) == Sk4s(0);
return finite.allTrue();
}
@@ -61,26 +60,26 @@ bool SkRect::setBoundsCheck(const SkPoint pts[], int count) {
if (count <= 0) {
sk_bzero(this, sizeof(SkRect));
} else {
- Sk4f min, max, accum;
+ Sk4s min, max, accum;
if (count & 1) {
- min = Sk4f(pts[0].fX, pts[0].fY, pts[0].fX, pts[0].fY);
+ min = Sk4s(pts[0].fX, pts[0].fY, pts[0].fX, pts[0].fY);
pts += 1;
count -= 1;
} else {
- min = Sk4f::Load(&pts[0].fX);
+ min = Sk4s::Load(&pts[0].fX);
pts += 2;
count -= 2;
}
accum = max = min;
- accum *= Sk4f(0);
+ accum *= Sk4s(0);
count >>= 1;
for (int i = 0; i < count; ++i) {
- Sk4f xy = Sk4f::Load(&pts->fX);
+ Sk4s xy = Sk4s::Load(&pts->fX);
accum *= xy;
- min = Sk4f::Min(min, xy);
- max = Sk4f::Max(max, xy);
+ min = Sk4s::Min(min, xy);
+ max = Sk4s::Max(max, xy);
pts += 2;
}
diff --git a/src/effects/SkColorMatrixFilter.cpp b/src/effects/SkColorMatrixFilter.cpp
index a406eda564..7c86e2e046 100644
--- a/src/effects/SkColorMatrixFilter.cpp
+++ b/src/effects/SkColorMatrixFilter.cpp
@@ -255,9 +255,9 @@ uint32_t SkColorMatrixFilter::getFlags() const {
*/
static const float gInv255 = 0.0039215683f; // (1.0f / 255) - ULP == SkBits2Float(0x3B808080)
-static Sk4f premul(const Sk4f& x) {
+static Sk4s premul(const Sk4s& x) {
float scale = SkPMFloat(x).a() * gInv255;
- Sk4f pm = x * Sk4f(scale, scale, scale, 1);
+ Sk4s pm = x * Sk4s(scale, scale, scale, 1);
#ifdef SK_DEBUG
SkPMFloat pmf(pm);
@@ -267,9 +267,9 @@ static Sk4f premul(const Sk4f& x) {
return pm;
}
-static Sk4f unpremul(const SkPMFloat& pm) {
+static Sk4s unpremul(const SkPMFloat& pm) {
float scale = 255 / pm.a(); // candidate for fast/approx invert?
- return Sk4f(pm) * Sk4f(scale, scale, scale, 1);
+ return Sk4s(pm) * Sk4s(scale, scale, scale, 1);
}
void SkColorMatrixFilter::filterSpan(const SkPMColor src[], int count, SkPMColor dst[]) const {
@@ -288,11 +288,11 @@ void SkColorMatrixFilter::filterSpan(const SkPMColor src[], int count, SkPMColor
#endif
if (use_floats) {
- const Sk4f c0 = Sk4f::Load(fTranspose + 0);
- const Sk4f c1 = Sk4f::Load(fTranspose + 4);
- const Sk4f c2 = Sk4f::Load(fTranspose + 8);
- const Sk4f c3 = Sk4f::Load(fTranspose + 12);
- const Sk4f c4 = Sk4f::Load(fTranspose + 16); // translates
+ const Sk4s c0 = Sk4s::Load(fTranspose + 0);
+ const Sk4s c1 = Sk4s::Load(fTranspose + 4);
+ const Sk4s c2 = Sk4s::Load(fTranspose + 8);
+ const Sk4s c3 = Sk4s::Load(fTranspose + 12);
+ const Sk4s c4 = Sk4s::Load(fTranspose + 16); // translates
SkPMColor matrix_translate_pmcolor = SkPMFloat(premul(c4)).clamped();
@@ -309,16 +309,16 @@ void SkColorMatrixFilter::filterSpan(const SkPMColor src[], int count, SkPMColor
srcf = unpremul(srcf);
}
- Sk4f r4 = Sk4f(srcf.r());
- Sk4f g4 = Sk4f(srcf.g());
- Sk4f b4 = Sk4f(srcf.b());
- Sk4f a4 = Sk4f(srcf.a());
+ Sk4s r4 = Sk4s(srcf.r());
+ Sk4s g4 = Sk4s(srcf.g());
+ Sk4s b4 = Sk4s(srcf.b());
+ Sk4s a4 = Sk4s(srcf.a());
// apply matrix
- Sk4f dst4 = c0 * r4 + c1 * g4 + c2 * b4 + c3 * a4 + c4;
+ Sk4s dst4 = c0 * r4 + c1 * g4 + c2 * b4 + c3 * a4 + c4;
// pin before re-premul (convention for color-matrix???)
- dst4 = Sk4f::Max(Sk4f(0), Sk4f::Min(Sk4f(255), dst4));
+ dst4 = Sk4s::Max(Sk4s(0), Sk4s::Min(Sk4s(255), dst4));
// re-premul and write
dst[i] = SkPMFloat(premul(dst4)).get();
diff --git a/src/opts/Sk2x_neon.h b/src/opts/Sk2x_neon.h
deleted file mode 100644
index 8e6e46164b..0000000000
--- a/src/opts/Sk2x_neon.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-// It is important _not_ to put header guards here.
-// This file will be intentionally included three times.
-
-#include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362
-
-#if defined(SK2X_PREAMBLE)
- #include <arm_neon.h>
- #include <math.h>
- template <typename T> struct SkScalarToSIMD;
- template <> struct SkScalarToSIMD< float> { typedef float32x2_t Type; };
- #if defined(SK_CPU_ARM64)
- template <> struct SkScalarToSIMD<double> { typedef float64x2_t Type; };
- #else
- template <> struct SkScalarToSIMD<double> { typedef double Type[2]; };
- #endif
-
-
-#elif defined(SK2X_PRIVATE)
- typename SkScalarToSIMD<T>::Type fVec;
- /*implicit*/ Sk2x(const typename SkScalarToSIMD<T>::Type vec) { fVec = vec; }
-
-#else
-
-#define M(...) template <> inline __VA_ARGS__ Sk2x<float>::
-
-M() Sk2x() {}
-M() Sk2x(float val) { fVec = vdup_n_f32(val); }
-M() Sk2x(float a, float b) { fVec = (float32x2_t) { a, b }; }
-M(Sk2f&) operator=(const Sk2f& o) { fVec = o.fVec; return *this; }
-
-M(Sk2f) Load(const float vals[2]) { return vld1_f32(vals); }
-M(void) store(float vals[2]) const { vst1_f32(vals, fVec); }
-
-M(Sk2f) approxInvert() const {
- float32x2_t est0 = vrecpe_f32(fVec),
- est1 = vmul_f32(vrecps_f32(est0, fVec), est0);
- return est1;
-}
-
-M(Sk2f) invert() const {
- float32x2_t est1 = this->approxInvert().fVec,
- est2 = vmul_f32(vrecps_f32(est1, fVec), est1);
- return est2;
-}
-
-M(Sk2f) add(const Sk2f& o) const { return vadd_f32(fVec, o.fVec); }
-M(Sk2f) subtract(const Sk2f& o) const { return vsub_f32(fVec, o.fVec); }
-M(Sk2f) multiply(const Sk2f& o) const { return vmul_f32(fVec, o.fVec); }
-M(Sk2f) divide(const Sk2f& o) const {
-#if defined(SK_CPU_ARM64)
- return vdiv_f32(fVec, o.fVec);
-#else
- return vmul_f32(fVec, o.invert().fVec);
-#endif
-}
-
-M(Sk2f) Min(const Sk2f& a, const Sk2f& b) { return vmin_f32(a.fVec, b.fVec); }
-M(Sk2f) Max(const Sk2f& a, const Sk2f& b) { return vmax_f32(a.fVec, b.fVec); }
-
-M(Sk2f) rsqrt() const {
- float32x2_t est0 = vrsqrte_f32(fVec),
- est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0);
- return est1;
-}
-M(Sk2f) sqrt() const {
-#if defined(SK_CPU_ARM64)
- return vsqrt_f32(fVec);
-#else
- float32x2_t est1 = this->rsqrt().fVec,
- // An extra step of Newton's method to refine the estimate of 1/sqrt(this).
- est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1);
- return vmul_f32(fVec, est2);
-#endif
-}
-
-#undef M
-
-#define M(...) template <> inline __VA_ARGS__ Sk2x<double>::
-
-#if defined(SK_CPU_ARM64)
- M() Sk2x() {}
- M() Sk2x(double val) { fVec = vdupq_n_f64(val); }
- M() Sk2x(double a, double b) { fVec = (float64x2_t) { a, b }; }
- M(Sk2d&) operator=(const Sk2d& o) { fVec = o.fVec; return *this; }
-
- M(Sk2d) Load(const double vals[2]) { return vld1q_f64(vals); }
- M(void) store(double vals[2]) const { vst1q_f64(vals, fVec); }
-
- M(Sk2d) add(const Sk2d& o) const { return vaddq_f64(fVec, o.fVec); }
- M(Sk2d) subtract(const Sk2d& o) const { return vsubq_f64(fVec, o.fVec); }
- M(Sk2d) multiply(const Sk2d& o) const { return vmulq_f64(fVec, o.fVec); }
- M(Sk2d) divide(const Sk2d& o) const { return vdivq_f64(fVec, o.fVec); }
-
- M(Sk2d) Min(const Sk2d& a, const Sk2d& b) { return vminq_f64(a.fVec, b.fVec); }
- M(Sk2d) Max(const Sk2d& a, const Sk2d& b) { return vmaxq_f64(a.fVec, b.fVec); }
-
- M(Sk2d) rsqrt() const {
- float64x2_t est0 = vrsqrteq_f64(fVec),
- est1 = vmulq_f64(vrsqrtsq_f64(fVec, vmulq_f64(est0, est0)), est0);
- return est1;
- }
- M(Sk2d) sqrt() const { return vsqrtq_f64(fVec); }
-
- M(Sk2d) approxInvert() const {
- float64x2_t est0 = vrecpeq_f64(fVec),
- est1 = vmulq_f64(vrecpsq_f64(est0, fVec), est0);
- return est1;
- }
-
- M(Sk2d) invert() const {
- float64x2_t est1 = this->approxInvert().fVec,
- est2 = vmulq_f64(vrecpsq_f64(est1, fVec), est1),
- est3 = vmulq_f64(vrecpsq_f64(est2, fVec), est2);
- return est3;
- }
-
-#else // Scalar implementation for 32-bit chips, which don't have float64x2_t.
- M() Sk2x() {}
- M() Sk2x(double val) { fVec[0] = fVec[1] = val; }
- M() Sk2x(double a, double b) { fVec[0] = a; fVec[1] = b; }
- M(Sk2d&) operator=(const Sk2d& o) {
- fVec[0] = o.fVec[0];
- fVec[1] = o.fVec[1];
- return *this;
- }
-
- M(Sk2d) Load(const double vals[2]) { return Sk2d(vals[0], vals[1]); }
- M(void) store(double vals[2]) const { vals[0] = fVec[0]; vals[1] = fVec[1]; }
-
- M(Sk2d) add(const Sk2d& o) const { return Sk2d(fVec[0] + o.fVec[0], fVec[1] + o.fVec[1]); }
- M(Sk2d) subtract(const Sk2d& o) const { return Sk2d(fVec[0] - o.fVec[0], fVec[1] - o.fVec[1]); }
- M(Sk2d) multiply(const Sk2d& o) const { return Sk2d(fVec[0] * o.fVec[0], fVec[1] * o.fVec[1]); }
- M(Sk2d) divide(const Sk2d& o) const { return Sk2d(fVec[0] / o.fVec[0], fVec[1] / o.fVec[1]); }
-
- M(Sk2d) Min(const Sk2d& a, const Sk2d& b) {
- return Sk2d(SkTMin(a.fVec[0], b.fVec[0]), SkTMin(a.fVec[1], b.fVec[1]));
- }
- M(Sk2d) Max(const Sk2d& a, const Sk2d& b) {
- return Sk2d(SkTMax(a.fVec[0], b.fVec[0]), SkTMax(a.fVec[1], b.fVec[1]));
- }
-
- M(Sk2d) rsqrt() const { return Sk2d(1.0/::sqrt(fVec[0]), 1.0/::sqrt(fVec[1])); }
- M(Sk2d) sqrt() const { return Sk2d( ::sqrt(fVec[0]), ::sqrt(fVec[1])); }
-
- M(Sk2d) invert() const { return Sk2d(1.0 / fVec[0], 1.0 / fVec[1]); }
- M(Sk2d) approxInvert() const { return this->invert(); }
-#endif
-
-#undef M
-
-#endif
diff --git a/src/opts/Sk2x_none.h b/src/opts/Sk2x_none.h
deleted file mode 100644
index 2c68e736f4..0000000000
--- a/src/opts/Sk2x_none.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-// It is important _not_ to put header guards here.
-// This file will be intentionally included three times.
-
-#include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362
-
-#if defined(SK2X_PREAMBLE)
- #include "SkFloatingPoint.h"
- #include <math.h>
-
-#elif defined(SK2X_PRIVATE)
- T fVec[2];
-
-#else
-
-#define M(...) template <typename T> __VA_ARGS__ Sk2x<T>::
-
-M() Sk2x() {}
-M() Sk2x(T val) { fVec[0] = fVec[1] = val; }
-M() Sk2x(T a, T b) { fVec[0] = a; fVec[1] = b; }
-
-M(Sk2x<T>&) operator=(const Sk2x<T>& o) {
- fVec[0] = o.fVec[0];
- fVec[1] = o.fVec[1];
- return *this;
-}
-
-M(Sk2x<T>) Load(const T vals[2]) { return Sk2x<T>(vals[0], vals[1]); }
-M(void) store(T vals[2]) const { vals[0] = fVec[0]; vals[1] = fVec[1]; }
-
-M(Sk2x<T>) add(const Sk2x<T>& o) const {
- return Sk2x<T>(fVec[0] + o.fVec[0], fVec[1] + o.fVec[1]);
-}
-M(Sk2x<T>) subtract(const Sk2x<T>& o) const {
- return Sk2x<T>(fVec[0] - o.fVec[0], fVec[1] - o.fVec[1]);
-}
-M(Sk2x<T>) multiply(const Sk2x<T>& o) const {
- return Sk2x<T>(fVec[0] * o.fVec[0], fVec[1] * o.fVec[1]);
-}
-M(Sk2x<T>) divide(const Sk2x<T>& o) const {
- return Sk2x<T>(fVec[0] / o.fVec[0], fVec[1] / o.fVec[1]);
-}
-
-M(Sk2x<T>) Min(const Sk2x<T>& a, const Sk2x<T>& b) {
- return Sk2x<T>(SkTMin(a.fVec[0], b.fVec[0]), SkTMin(a.fVec[1], b.fVec[1]));
-}
-M(Sk2x<T>) Max(const Sk2x<T>& a, const Sk2x<T>& b) {
- return Sk2x<T>(SkTMax(a.fVec[0], b.fVec[0]), SkTMax(a.fVec[1], b.fVec[1]));
-}
-
-M(Sk2x<T>) invert() const { return Sk2x<T>((T)1.0 / fVec[0], (T)1.0 / fVec[1]); }
-M(Sk2x<T>) approxInvert() const { return this->invert(); }
-
-#undef M
-
-#define M template <> inline
-
-M Sk2f Sk2f::rsqrt() const { return Sk2f(sk_float_rsqrt(fVec[0]), sk_float_rsqrt(fVec[1])); }
-M Sk2f Sk2f:: sqrt() const { return Sk2f( sqrtf(fVec[0]), sqrtf(fVec[1])); }
-
-M Sk2d Sk2d::rsqrt() const { return Sk2d(1.0/::sqrt(fVec[0]), 1.0/::sqrt(fVec[1])); }
-M Sk2d Sk2d:: sqrt() const { return Sk2d( ::sqrt(fVec[0]), ::sqrt(fVec[1])); }
-
-#undef M
-
-#endif
diff --git a/src/opts/Sk2x_sse.h b/src/opts/Sk2x_sse.h
deleted file mode 100644
index 1136f1d856..0000000000
--- a/src/opts/Sk2x_sse.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-// It is important _not_ to put header guards here.
-// This file will be intentionally included three times.
-
-#include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362
-
-#if defined(SK2X_PREAMBLE)
- #include <immintrin.h>
- template <typename T> struct SkScalarToSIMD;
- template <> struct SkScalarToSIMD< float> { typedef __m128 Type; };
- template <> struct SkScalarToSIMD<double> { typedef __m128d Type; };
-
-
-#elif defined(SK2X_PRIVATE)
- typename SkScalarToSIMD<T>::Type fVec;
- /*implicit*/ Sk2x(const typename SkScalarToSIMD<T>::Type vec) { fVec = vec; }
-
-#else
-
-#define M(...) template <> inline __VA_ARGS__ Sk2x<float>::
-
-M() Sk2x() {}
-M() Sk2x(float val) { fVec = _mm_set1_ps(val); }
-M() Sk2x(float a, float b) { fVec = _mm_set_ps(b,a,b,a); }
-M(Sk2f&) operator=(const Sk2f& o) { fVec = o.fVec; return *this; }
-
-M(Sk2f) Load(const float vals[2]) {
- return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)vals));
-}
-M(void) store(float vals[2]) const { _mm_storel_pi((__m64*)vals, fVec); }
-
-M(Sk2f) add(const Sk2f& o) const { return _mm_add_ps(fVec, o.fVec); }
-M(Sk2f) subtract(const Sk2f& o) const { return _mm_sub_ps(fVec, o.fVec); }
-M(Sk2f) multiply(const Sk2f& o) const { return _mm_mul_ps(fVec, o.fVec); }
-M(Sk2f) divide(const Sk2f& o) const { return _mm_div_ps(fVec, o.fVec); }
-
-M(Sk2f) Min(const Sk2f& a, const Sk2f& b) { return _mm_min_ps(a.fVec, b.fVec); }
-M(Sk2f) Max(const Sk2f& a, const Sk2f& b) { return _mm_max_ps(a.fVec, b.fVec); }
-
-M(Sk2f) rsqrt() const { return _mm_rsqrt_ps(fVec); }
-M(Sk2f) sqrt() const { return _mm_sqrt_ps (fVec); }
-
-M(Sk2f) invert() const { return Sk2f(1.0f) / *this; }
-M(Sk2f) approxInvert() const { return _mm_rcp_ps(fVec); }
-
-#undef M
-
-#define M(...) template <> inline __VA_ARGS__ Sk2x<double>::
-
-M() Sk2x() {}
-M() Sk2x(double val) { fVec = _mm_set1_pd(val); }
-M() Sk2x(double a, double b) { fVec = _mm_set_pd(b, a); }
-M(Sk2d&) operator=(const Sk2d& o) { fVec = o.fVec; return *this; }
-
-M(Sk2d) Load(const double vals[2]) { return _mm_loadu_pd(vals); }
-M(void) store(double vals[2]) const { _mm_storeu_pd(vals, fVec); }
-
-M(Sk2d) add(const Sk2d& o) const { return _mm_add_pd(fVec, o.fVec); }
-M(Sk2d) subtract(const Sk2d& o) const { return _mm_sub_pd(fVec, o.fVec); }
-M(Sk2d) multiply(const Sk2d& o) const { return _mm_mul_pd(fVec, o.fVec); }
-M(Sk2d) divide(const Sk2d& o) const { return _mm_div_pd(fVec, o.fVec); }
-
-M(Sk2d) Min(const Sk2d& a, const Sk2d& b) { return _mm_min_pd(a.fVec, b.fVec); }
-M(Sk2d) Max(const Sk2d& a, const Sk2d& b) { return _mm_max_pd(a.fVec, b.fVec); }
-
-// There is no _mm_rsqrt_pd, so we do Sk2d::rsqrt() in floats.
-M(Sk2d) rsqrt() const { return _mm_cvtps_pd(_mm_rsqrt_ps(_mm_cvtpd_ps(fVec))); }
-M(Sk2d) sqrt() const { return _mm_sqrt_pd(fVec); }
-
-// No _mm_rcp_pd, so do Sk2d::approxInvert() in floats.
-M(Sk2d) invert() const { return Sk2d(1.0) / *this; }
-M(Sk2d) approxInvert() const { return _mm_cvtps_pd(_mm_rcp_ps(_mm_cvtpd_ps(fVec))); }
-
-#undef M
-
-#endif
diff --git a/src/opts/Sk4x_neon.h b/src/opts/Sk4x_neon.h
deleted file mode 100644
index b89c30fcb7..0000000000
--- a/src/opts/Sk4x_neon.h
+++ /dev/null
@@ -1,166 +0,0 @@
-// It is important _not_ to put header guards here.
-// This file will be intentionally included three times.
-
-#include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362
-
-#if defined(SK4X_PREAMBLE)
- #include <arm_neon.h>
-
- // Template metaprogramming to map scalar types to vector types.
- template <typename T> struct SkScalarToSIMD;
- template <> struct SkScalarToSIMD<float> { typedef float32x4_t Type; };
- template <> struct SkScalarToSIMD<int32_t> { typedef int32x4_t Type; };
-
-#elif defined(SK4X_PRIVATE)
- Sk4x(float32x4_t);
- Sk4x(int32x4_t);
-
- typename SkScalarToSIMD<T>::Type fVec;
-
-#else
-
-// Vector Constructors
-//template <> inline Sk4f::Sk4x(int32x4_t v) : fVec(vcvtq_f32_s32(v)) {}
-template <> inline Sk4f::Sk4x(float32x4_t v) : fVec(v) {}
-template <> inline Sk4i::Sk4x(int32x4_t v) : fVec(v) {}
-//template <> inline Sk4i::Sk4x(float32x4_t v) : fVec(vcvtq_s32_f32(v)) {}
-
-// Generic Methods
-template <typename T> Sk4x<T>::Sk4x() {}
-template <typename T> Sk4x<T>::Sk4x(const Sk4x& other) { *this = other; }
-template <typename T> Sk4x<T>& Sk4x<T>::operator=(const Sk4x<T>& other) {
- fVec = other.fVec;
- return *this;
-}
-
-// Sk4f Methods
-#define M(...) template <> inline __VA_ARGS__ Sk4f::
-
-M() Sk4x(float v) : fVec(vdupq_n_f32(v)) {}
-M() Sk4x(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
-
-// As far as I can tell, it's not possible to provide an alignment hint to
-// NEON using intrinsics. However, I think it is possible at the assembly
-// level if we want to get into that.
-// TODO: Write our own aligned load and store.
-M(Sk4f) Load (const float fs[4]) { return vld1q_f32(fs); }
-M(Sk4f) LoadAligned(const float fs[4]) { return vld1q_f32(fs); }
-M(void) store (float fs[4]) const { vst1q_f32(fs, fVec); }
-M(void) storeAligned(float fs[4]) const { vst1q_f32 (fs, fVec); }
-
-template <>
-M(Sk4i) reinterpret<Sk4i>() const { return vreinterpretq_s32_f32(fVec); }
-
-template <>
-M(Sk4i) cast<Sk4i>() const { return vcvtq_s32_f32(fVec); }
-
-// We're going to skip allTrue(), anyTrue(), and bit-manipulators
-// for Sk4f. Code that calls them probably does so accidentally.
-// Ask msarett or mtklein to fill these in if you really need them.
-M(Sk4f) add (const Sk4f& o) const { return vaddq_f32(fVec, o.fVec); }
-M(Sk4f) subtract(const Sk4f& o) const { return vsubq_f32(fVec, o.fVec); }
-M(Sk4f) multiply(const Sk4f& o) const { return vmulq_f32(fVec, o.fVec); }
-
-M(Sk4f) divide (const Sk4f& o) const {
-#if defined(SK_CPU_ARM64)
- return vdivq_f32(fVec, o.fVec);
-#else
- float32x4_t est0 = vrecpeq_f32(o.fVec),
- est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0),
- est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1);
- return vmulq_f32(est2, fVec);
-#endif
-}
-
-M(Sk4f) rsqrt() const {
- float32x4_t est0 = vrsqrteq_f32(fVec),
- est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0);
- return est1;
-}
-
-M(Sk4f) sqrt() const {
-#if defined(SK_CPU_ARM64)
- return vsqrtq_f32(fVec);
-#else
- float32x4_t est1 = this->rsqrt().fVec,
- // An extra step of Newton's method to refine the estimate of 1/sqrt(this).
- est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1);
- return vmulq_f32(fVec, est2);
-#endif
-}
-
-M(Sk4i) equal (const Sk4f& o) const { return vreinterpretq_s32_u32(vceqq_f32(fVec, o.fVec)); }
-M(Sk4i) notEqual (const Sk4f& o) const { return vreinterpretq_s32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec))); }
-M(Sk4i) lessThan (const Sk4f& o) const { return vreinterpretq_s32_u32(vcltq_f32(fVec, o.fVec)); }
-M(Sk4i) greaterThan (const Sk4f& o) const { return vreinterpretq_s32_u32(vcgtq_f32(fVec, o.fVec)); }
-M(Sk4i) lessThanEqual (const Sk4f& o) const { return vreinterpretq_s32_u32(vcleq_f32(fVec, o.fVec)); }
-M(Sk4i) greaterThanEqual(const Sk4f& o) const { return vreinterpretq_s32_u32(vcgeq_f32(fVec, o.fVec)); }
-
-M(Sk4f) Min(const Sk4f& a, const Sk4f& b) { return vminq_f32(a.fVec, b.fVec); }
-M(Sk4f) Max(const Sk4f& a, const Sk4f& b) { return vmaxq_f32(a.fVec, b.fVec); }
-
-M(Sk4f) aacc() const { return vtrnq_f32(fVec, fVec).val[0]; }
-M(Sk4f) bbdd() const { return vtrnq_f32(fVec, fVec).val[1]; }
-M(Sk4f) badc() const { return vrev64q_f32(fVec); }
-
-// Sk4i Methods
-#undef M
-#define M(...) template <> inline __VA_ARGS__ Sk4i::
-
-M() Sk4x(int32_t v) : fVec(vdupq_n_s32(v)) {}
-M() Sk4x(int32_t a, int32_t b, int32_t c, int32_t d) { fVec = (int32x4_t) { a, b, c, d }; }
-
-// As far as I can tell, it's not possible to provide an alignment hint to
-// NEON using intrinsics. However, I think it is possible at the assembly
-// level if we want to get into that.
-M(Sk4i) Load (const int32_t is[4]) { return vld1q_s32(is); }
-M(Sk4i) LoadAligned(const int32_t is[4]) { return vld1q_s32(is); }
-M(void) store (int32_t is[4]) const { vst1q_s32(is, fVec); }
-M(void) storeAligned(int32_t is[4]) const { vst1q_s32 (is, fVec); }
-
-template <>
-M(Sk4f) reinterpret<Sk4f>() const { return vreinterpretq_f32_s32(fVec); }
-
-template <>
-M(Sk4f) cast<Sk4f>() const { return vcvtq_f32_s32(fVec); }
-
-M(bool) allTrue() const {
- int32_t a = vgetq_lane_s32(fVec, 0);
- int32_t b = vgetq_lane_s32(fVec, 1);
- int32_t c = vgetq_lane_s32(fVec, 2);
- int32_t d = vgetq_lane_s32(fVec, 3);
- return a & b & c & d;
-}
-M(bool) anyTrue() const {
- int32_t a = vgetq_lane_s32(fVec, 0);
- int32_t b = vgetq_lane_s32(fVec, 1);
- int32_t c = vgetq_lane_s32(fVec, 2);
- int32_t d = vgetq_lane_s32(fVec, 3);
- return a | b | c | d;
-}
-
-M(Sk4i) bitNot() const { return vmvnq_s32(fVec); }
-M(Sk4i) bitAnd(const Sk4i& o) const { return vandq_s32(fVec, o.fVec); }
-M(Sk4i) bitOr (const Sk4i& o) const { return vorrq_s32(fVec, o.fVec); }
-
-M(Sk4i) equal (const Sk4i& o) const { return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec)); }
-M(Sk4i) notEqual (const Sk4i& o) const { return vreinterpretq_s32_u32(vmvnq_u32(vceqq_s32(fVec, o.fVec))); }
-M(Sk4i) lessThan (const Sk4i& o) const { return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec)); }
-M(Sk4i) greaterThan (const Sk4i& o) const { return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec)); }
-M(Sk4i) lessThanEqual (const Sk4i& o) const { return vreinterpretq_s32_u32(vcleq_s32(fVec, o.fVec)); }
-M(Sk4i) greaterThanEqual(const Sk4i& o) const { return vreinterpretq_s32_u32(vcgeq_s32(fVec, o.fVec)); }
-
-M(Sk4i) add (const Sk4i& o) const { return vaddq_s32(fVec, o.fVec); }
-M(Sk4i) subtract(const Sk4i& o) const { return vsubq_s32(fVec, o.fVec); }
-M(Sk4i) multiply(const Sk4i& o) const { return vmulq_s32(fVec, o.fVec); }
-// NEON does not have integer reciprocal, sqrt, or division.
-M(Sk4i) Min(const Sk4i& a, const Sk4i& b) { return vminq_s32(a.fVec, b.fVec); }
-M(Sk4i) Max(const Sk4i& a, const Sk4i& b) { return vmaxq_s32(a.fVec, b.fVec); }
-
-M(Sk4i) aacc() const { return vtrnq_s32(fVec, fVec).val[0]; }
-M(Sk4i) bbdd() const { return vtrnq_s32(fVec, fVec).val[1]; }
-M(Sk4i) badc() const { return vrev64q_s32(fVec); }
-
-#undef M
-
-#endif
diff --git a/src/opts/Sk4x_none.h b/src/opts/Sk4x_none.h
deleted file mode 100644
index b477177026..0000000000
--- a/src/opts/Sk4x_none.h
+++ /dev/null
@@ -1,120 +0,0 @@
-// It is important _not_ to put header guards here.
-// This file will be intentionally included three times.
-
-#include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362
-
-#if defined(SK4X_PREAMBLE)
- #include "SkFloatingPoint.h"
- #include <math.h>
-
-#elif defined(SK4X_PRIVATE)
- typedef T Type;
- typedef T Vector[4];
-
- Vector fVec;
-
- template <int m, int a, int s, int k>
- static Sk4x Shuffle(const Sk4x&, const Sk4x&);
-
- void set(const T vals[4]) { for (int i = 0; i < 4; i++) { fVec[i] = vals[i]; } }
-
-#else
-
-#define M(...) template <typename T> __VA_ARGS__ Sk4x<T>::
-
-M() Sk4x() {}
-M() Sk4x(T v) { fVec[0] = fVec[1] = fVec[2] = fVec[3] = v; }
-M() Sk4x(T a, T b, T c, T d) { fVec[0] = a; fVec[1] = b; fVec[2] = c; fVec[3] = d; }
-
-M() Sk4x(const Sk4x<T>& other) { this->set(other.fVec); }
-M(Sk4x<T>&) operator=(const Sk4x<T>& other) { this->set(other.fVec); return *this; }
-
-M(Sk4x<T>) Load (const T vals[4]) { Sk4x r; r.set(vals); return r; }
-M(Sk4x<T>) LoadAligned(const T vals[4]) { return Load(vals); }
-
-M(void) store (T vals[4]) const { for (int i = 0; i < 4; i++) { vals[i] = fVec[i]; } }
-M(void) storeAligned(T vals[4]) const { this->store(vals); }
-
-M(template <typename Dst> Dst) reinterpret() const {
- Dst d;
- memcpy(&d.fVec, fVec, sizeof(fVec));
- return d;
-}
-M(template <typename Dst> Dst) cast() const {
- return Dst((typename Dst::Type)fVec[0],
- (typename Dst::Type)fVec[1],
- (typename Dst::Type)fVec[2],
- (typename Dst::Type)fVec[3]);
-}
-
-M(bool) allTrue() const { return fVec[0] && fVec[1] && fVec[2] && fVec[3]; }
-M(bool) anyTrue() const { return fVec[0] || fVec[1] || fVec[2] || fVec[3]; }
-
-M(Sk4x<T>) bitNot() const { return Sk4x(~fVec[0], ~fVec[1], ~fVec[2], ~fVec[3]); }
-
-#define BINOP(op) fVec[0] op other.fVec[0], \
- fVec[1] op other.fVec[1], \
- fVec[2] op other.fVec[2], \
- fVec[3] op other.fVec[3]
-M(Sk4x<T>) bitAnd(const Sk4x& other) const { return Sk4x(BINOP(&)); }
-M(Sk4x<T>) bitOr(const Sk4x& other) const { return Sk4x(BINOP(|)); }
-M(Sk4x<T>) add(const Sk4x<T>& other) const { return Sk4x(BINOP(+)); }
-M(Sk4x<T>) subtract(const Sk4x<T>& other) const { return Sk4x(BINOP(-)); }
-M(Sk4x<T>) multiply(const Sk4x<T>& other) const { return Sk4x(BINOP(*)); }
-M(Sk4x<T>) divide(const Sk4x<T>& other) const { return Sk4x(BINOP(/)); }
-#undef BINOP
-
-template<> inline Sk4f Sk4f::rsqrt() const {
- return Sk4f(sk_float_rsqrt(fVec[0]),
- sk_float_rsqrt(fVec[1]),
- sk_float_rsqrt(fVec[2]),
- sk_float_rsqrt(fVec[3]));
-}
-
-template<> inline Sk4f Sk4f::sqrt() const {
- return Sk4f(sqrtf(fVec[0]),
- sqrtf(fVec[1]),
- sqrtf(fVec[2]),
- sqrtf(fVec[3]));
-}
-
-#define BOOL_BINOP(op) fVec[0] op other.fVec[0] ? -1 : 0, \
- fVec[1] op other.fVec[1] ? -1 : 0, \
- fVec[2] op other.fVec[2] ? -1 : 0, \
- fVec[3] op other.fVec[3] ? -1 : 0
-M(Sk4i) equal(const Sk4x<T>& other) const { return Sk4i(BOOL_BINOP(==)); }
-M(Sk4i) notEqual(const Sk4x<T>& other) const { return Sk4i(BOOL_BINOP(!=)); }
-M(Sk4i) lessThan(const Sk4x<T>& other) const { return Sk4i(BOOL_BINOP( <)); }
-M(Sk4i) greaterThan(const Sk4x<T>& other) const { return Sk4i(BOOL_BINOP( >)); }
-M(Sk4i) lessThanEqual(const Sk4x<T>& other) const { return Sk4i(BOOL_BINOP(<=)); }
-M(Sk4i) greaterThanEqual(const Sk4x<T>& other) const { return Sk4i(BOOL_BINOP(>=)); }
-#undef BOOL_BINOP
-
-M(Sk4x<T>) Min(const Sk4x<T>& a, const Sk4x<T>& b) {
- return Sk4x(SkTMin(a.fVec[0], b.fVec[0]),
- SkTMin(a.fVec[1], b.fVec[1]),
- SkTMin(a.fVec[2], b.fVec[2]),
- SkTMin(a.fVec[3], b.fVec[3]));
-}
-
-M(Sk4x<T>) Max(const Sk4x<T>& a, const Sk4x<T>& b) {
- return Sk4x(SkTMax(a.fVec[0], b.fVec[0]),
- SkTMax(a.fVec[1], b.fVec[1]),
- SkTMax(a.fVec[2], b.fVec[2]),
- SkTMax(a.fVec[3], b.fVec[3]));
-}
-
-M(template <int m, int a, int s, int k> Sk4x<T>) Shuffle(const Sk4x<T>& x, const Sk4x<T>& y) {
- return Sk4x(m < 4 ? x.fVec[m] : y.fVec[m-4],
- a < 4 ? x.fVec[a] : y.fVec[a-4],
- s < 4 ? x.fVec[s] : y.fVec[s-4],
- k < 4 ? x.fVec[k] : y.fVec[k-4]);
-}
-
-M(Sk4x<T>) aacc() const { return Shuffle<0,0,2,2>(*this, *this); }
-M(Sk4x<T>) bbdd() const { return Shuffle<1,1,3,3>(*this, *this); }
-M(Sk4x<T>) badc() const { return Shuffle<1,0,3,2>(*this, *this); }
-
-#undef M
-
-#endif
diff --git a/src/opts/Sk4x_sse.h b/src/opts/Sk4x_sse.h
deleted file mode 100644
index ce452d08b6..0000000000
--- a/src/opts/Sk4x_sse.h
+++ /dev/null
@@ -1,177 +0,0 @@
-// It is important _not_ to put header guards here.
-// This file will be intentionally included three times.
-
-// Useful reading:
-// https://software.intel.com/sites/landingpage/IntrinsicsGuide/
-
-#include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362
-
-#if defined(SK4X_PREAMBLE)
- // Code in this file may assume SSE and SSE2.
- #include <emmintrin.h>
-
- // It must check for later instruction sets.
- #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
- #include <immintrin.h>
- #endif
-
- // A little bit of template metaprogramming to map
- // float to __m128 and int32_t to __m128i.
- template <typename T> struct SkScalarToSIMD;
- template <> struct SkScalarToSIMD<float> { typedef __m128 Type; };
- template <> struct SkScalarToSIMD<int32_t> { typedef __m128i Type; };
-
- // These are all free, zero instructions.
- // MSVC insists we use _mm_castA_B(a) instead of (B)a.
- static inline __m128 as_4f(__m128i v) { return _mm_castsi128_ps(v); }
- static inline __m128 as_4f(__m128 v) { return v ; }
- static inline __m128i as_4i(__m128i v) { return v ; }
- static inline __m128i as_4i(__m128 v) { return _mm_castps_si128(v); }
-
-#elif defined(SK4X_PRIVATE)
- // It'd be slightly faster to call _mm_cmpeq_epi32() on an unintialized register and itself,
- // but that has caused hard to debug issues when compilers recognize dealing with uninitialized
- // memory as undefined behavior that can be optimized away.
- static __m128i True() { return _mm_set1_epi8(~0); }
-
- // Leaving these implicit makes the rest of the code below a bit less noisy to read.
- Sk4x(__m128i);
- Sk4x(__m128);
-
- Sk4x andNot(const Sk4x&) const;
-
- typename SkScalarToSIMD<T>::Type fVec;
-
-#else//Method definitions.
-
-// Helps to get these in before anything else.
-template <> inline Sk4f::Sk4x(__m128i v) : fVec(as_4f(v)) {}
-template <> inline Sk4f::Sk4x(__m128 v) : fVec( v ) {}
-template <> inline Sk4i::Sk4x(__m128i v) : fVec( v ) {}
-template <> inline Sk4i::Sk4x(__m128 v) : fVec(as_4i(v)) {}
-
-// Next, methods whose implementation is the same for Sk4f and Sk4i.
-template <typename T> Sk4x<T>::Sk4x() {}
-template <typename T> Sk4x<T>::Sk4x(const Sk4x& other) { *this = other; }
-template <typename T> Sk4x<T>& Sk4x<T>::operator=(const Sk4x<T>& other) {
- fVec = other.fVec;
- return *this;
-}
-
-// We pun in these _mm_shuffle_* methods a little to use the fastest / most available methods.
-// They're all bit-preserving operations so it shouldn't matter.
-
-template <typename T>
-Sk4x<T> Sk4x<T>::aacc() const { return _mm_shuffle_epi32(as_4i(fVec), _MM_SHUFFLE(2,2,0,0)); }
-template <typename T>
-Sk4x<T> Sk4x<T>::bbdd() const { return _mm_shuffle_epi32(as_4i(fVec), _MM_SHUFFLE(3,3,1,1)); }
-template <typename T>
-Sk4x<T> Sk4x<T>::badc() const { return _mm_shuffle_epi32(as_4i(fVec), _MM_SHUFFLE(2,3,0,1)); }
-
-// Now we'll write all Sk4f specific methods. This M() macro will remove some noise.
-#define M(...) template <> inline __VA_ARGS__ Sk4f::
-
-M() Sk4x(float v) : fVec(_mm_set1_ps(v)) {}
-M() Sk4x(float a, float b, float c, float d) : fVec(_mm_set_ps(d,c,b,a)) {}
-
-M(Sk4f) Load (const float fs[4]) { return _mm_loadu_ps(fs); }
-M(Sk4f) LoadAligned(const float fs[4]) { return _mm_load_ps (fs); }
-
-M(void) store (float fs[4]) const { _mm_storeu_ps(fs, fVec); }
-M(void) storeAligned(float fs[4]) const { _mm_store_ps (fs, fVec); }
-
-template <> M(Sk4i) reinterpret<Sk4i>() const { return as_4i(fVec); }
-
-// cvttps truncates, same as (int) when positive.
-template <> M(Sk4i) cast<Sk4i>() const { return _mm_cvttps_epi32(fVec); }
-
-// We're going to try a little experiment here and skip allTrue(), anyTrue(), and bit-manipulators
-// for Sk4f. Code that calls them probably does so accidentally.
-// Ask mtklein to fill these in if you really need them.
-
-M(Sk4f) add (const Sk4f& o) const { return _mm_add_ps(fVec, o.fVec); }
-M(Sk4f) subtract(const Sk4f& o) const { return _mm_sub_ps(fVec, o.fVec); }
-M(Sk4f) multiply(const Sk4f& o) const { return _mm_mul_ps(fVec, o.fVec); }
-M(Sk4f) divide (const Sk4f& o) const { return _mm_div_ps(fVec, o.fVec); }
-
-M(Sk4f) rsqrt() const { return _mm_rsqrt_ps(fVec); }
-M(Sk4f) sqrt() const { return _mm_sqrt_ps( fVec); }
-
-M(Sk4i) equal (const Sk4f& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
-M(Sk4i) notEqual (const Sk4f& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
-M(Sk4i) lessThan (const Sk4f& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
-M(Sk4i) greaterThan (const Sk4f& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
-M(Sk4i) lessThanEqual (const Sk4f& o) const { return _mm_cmple_ps (fVec, o.fVec); }
-M(Sk4i) greaterThanEqual(const Sk4f& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
-
-M(Sk4f) Min(const Sk4f& a, const Sk4f& b) { return _mm_min_ps(a.fVec, b.fVec); }
-M(Sk4f) Max(const Sk4f& a, const Sk4f& b) { return _mm_max_ps(a.fVec, b.fVec); }
-
-// Now we'll write all the Sk4i specific methods. Same deal for M().
-#undef M
-#define M(...) template <> inline __VA_ARGS__ Sk4i::
-
-M() Sk4x(int32_t v) : fVec(_mm_set1_epi32(v)) {}
-M() Sk4x(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_set_epi32(d,c,b,a)) {}
-
-M(Sk4i) Load (const int32_t is[4]) { return _mm_loadu_si128((const __m128i*)is); }
-M(Sk4i) LoadAligned(const int32_t is[4]) { return _mm_load_si128 ((const __m128i*)is); }
-
-M(void) store (int32_t is[4]) const { _mm_storeu_si128((__m128i*)is, fVec); }
-M(void) storeAligned(int32_t is[4]) const { _mm_store_si128 ((__m128i*)is, fVec); }
-
-template <>
-M(Sk4f) reinterpret<Sk4f>() const { return as_4f(fVec); }
-
-template <>
-M(Sk4f) cast<Sk4f>() const { return _mm_cvtepi32_ps(fVec); }
-
-M(bool) allTrue() const { return 0xf == _mm_movemask_ps(as_4f(fVec)); }
-M(bool) anyTrue() const { return 0x0 != _mm_movemask_ps(as_4f(fVec)); }
-
-M(Sk4i) bitNot() const { return _mm_xor_si128(fVec, True()); }
-M(Sk4i) bitAnd(const Sk4i& o) const { return _mm_and_si128(fVec, o.fVec); }
-M(Sk4i) bitOr (const Sk4i& o) const { return _mm_or_si128 (fVec, o.fVec); }
-
-M(Sk4i) equal (const Sk4i& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
-M(Sk4i) lessThan (const Sk4i& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); }
-M(Sk4i) greaterThan (const Sk4i& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); }
-M(Sk4i) notEqual (const Sk4i& o) const { return this-> equal(o).bitNot(); }
-M(Sk4i) lessThanEqual (const Sk4i& o) const { return this->greaterThan(o).bitNot(); }
-M(Sk4i) greaterThanEqual(const Sk4i& o) const { return this-> lessThan(o).bitNot(); }
-
-M(Sk4i) add (const Sk4i& o) const { return _mm_add_epi32(fVec, o.fVec); }
-M(Sk4i) subtract(const Sk4i& o) const { return _mm_sub_epi32(fVec, o.fVec); }
-
-// SSE doesn't have integer division. Let's see how far we can get without Sk4i::divide().
-
-// Sk4i's multiply(), Min(), and Max() all improve significantly with SSE4.1.
-#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
- M(Sk4i) multiply(const Sk4i& o) const { return _mm_mullo_epi32(fVec, o.fVec); }
- M(Sk4i) Min(const Sk4i& a, const Sk4i& b) { return _mm_min_epi32(a.fVec, b.fVec); }
- M(Sk4i) Max(const Sk4i& a, const Sk4i& b) { return _mm_max_epi32(a.fVec, b.fVec); }
-#else
- M(Sk4i) multiply(const Sk4i& o) const {
- // First 2 32->64 bit multiplies.
- __m128i mul02 = _mm_mul_epu32(fVec, o.fVec),
- mul13 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.fVec, 4));
- // Now recombine the high bits of the two products.
- return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul02, _MM_SHUFFLE(0,0,2,0)),
- _mm_shuffle_epi32(mul13, _MM_SHUFFLE(0,0,2,0)));
- }
-
- M(Sk4i) andNot(const Sk4i& o) const { return _mm_andnot_si128(o.fVec, fVec); }
-
- M(Sk4i) Min(const Sk4i& a, const Sk4i& b) {
- Sk4i less = a.lessThan(b);
- return a.bitAnd(less).bitOr(b.andNot(less));
- }
- M(Sk4i) Max(const Sk4i& a, const Sk4i& b) {
- Sk4i less = a.lessThan(b);
- return b.bitAnd(less).bitOr(a.andNot(less));
- }
-#endif
-
-#undef M
-
-#endif//Method definitions.
diff --git a/src/opts/SkNx_neon.h b/src/opts/SkNx_neon.h
new file mode 100644
index 0000000000..01ea67c5d7
--- /dev/null
+++ b/src/opts/SkNx_neon.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNx_neon_DEFINED
+#define SkNx_neon_DEFINED
+
+#include <arm_neon.h>
+
+template <>
+class SkNi<2, int32_t> {
+public:
+ SkNi(int32x2_t vec) : fVec(vec) {}
+
+ SkNi() {}
+ bool allTrue() const { return fVec[0] && fVec[1]; }
+ bool anyTrue() const { return fVec[0] || fVec[1]; }
+private:
+ int32x2_t fVec;
+};
+
+template <>
+class SkNi<4, int32_t> {
+public:
+ SkNi(int32x4_t vec) : fVec(vec) {}
+
+ SkNi() {}
+ bool allTrue() const { return fVec[0] && fVec[1] && fVec[2] && fVec[3]; }
+ bool anyTrue() const { return fVec[0] || fVec[1] || fVec[2] || fVec[3]; }
+private:
+ int32x4_t fVec;
+};
+
+template <>
+class SkNf<2, float> {
+ typedef SkNi<2, int32_t> Ni;
+public:
+ SkNf(float32x2_t vec) : fVec(vec) {}
+
+ SkNf() {}
+ explicit SkNf(float val) : fVec(vdup_n_f32(val)) {}
+ static SkNf Load(const float vals[2]) { return vld1_f32(vals); }
+ SkNf(float a, float b) { fVec = (float32x2_t) { a, b }; }
+
+ void store(float vals[2]) const { vst1_f32(vals, fVec); }
+
+ SkNf approxInvert() const {
+ float32x2_t est0 = vrecpe_f32(fVec),
+ est1 = vmul_f32(vrecps_f32(est0, fVec), est0);
+ return est1;
+ }
+ SkNf invert() const {
+ float32x2_t est1 = this->approxInvert().fVec,
+ est2 = vmul_f32(vrecps_f32(est1, fVec), est1);
+ return est2;
+ }
+
+ SkNf operator + (const SkNf& o) const { return vadd_f32(fVec, o.fVec); }
+ SkNf operator - (const SkNf& o) const { return vsub_f32(fVec, o.fVec); }
+ SkNf operator * (const SkNf& o) const { return vmul_f32(fVec, o.fVec); }
+ SkNf operator / (const SkNf& o) const {
+ #if defined(SK_CPU_ARM64)
+ return vdiv_f32(fVec, o.fVec);
+ #else
+ return vmul_f32(fVec, o.invert().fVec);
+ #endif
+ }
+
+ Ni operator == (const SkNf& o) const { return vreinterpret_s32_u32(vceq_f32(fVec, o.fVec)); }
+ Ni operator < (const SkNf& o) const { return vreinterpret_s32_u32(vclt_f32(fVec, o.fVec)); }
+ Ni operator > (const SkNf& o) const { return vreinterpret_s32_u32(vcgt_f32(fVec, o.fVec)); }
+ Ni operator <= (const SkNf& o) const { return vreinterpret_s32_u32(vcle_f32(fVec, o.fVec)); }
+ Ni operator >= (const SkNf& o) const { return vreinterpret_s32_u32(vcge_f32(fVec, o.fVec)); }
+ Ni operator != (const SkNf& o) const {
+ return vreinterpret_s32_u32(vmvn_u32(vceq_f32(fVec, o.fVec)));
+ }
+
+ static SkNf Min(const SkNf& l, const SkNf& r) { return vmin_f32(l.fVec, r.fVec); }
+ static SkNf Max(const SkNf& l, const SkNf& r) { return vmax_f32(l.fVec, r.fVec); }
+
+ SkNf rsqrt() const {
+ float32x2_t est0 = vrsqrte_f32(fVec),
+ est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0);
+ return est1;
+ }
+
+ SkNf sqrt() const {
+ #if defined(SK_CPU_ARM64)
+ return vsqrt_f32(fVec);
+ #else
+ float32x2_t est1 = this->rsqrt().fVec,
+ // An extra step of Newton's method to refine the estimate of 1/sqrt(this).
+ est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1);
+ return vmul_f32(fVec, est2);
+ #endif
+ }
+
+ float operator[] (int k) const {
+ SkASSERT(0 <= k && k < 2);
+ return fVec[k];
+ }
+
+private:
+ float32x2_t fVec;
+};
+
+#if defined(SK_CPU_ARM64)
+template <>
+class SkNi<2, int64_t> {
+public:
+ SkNi(int64x2_t vec) : fVec(vec) {}
+
+ SkNi() {}
+ bool allTrue() const { return fVec[0] && fVec[1]; }
+ bool anyTrue() const { return fVec[0] || fVec[1]; }
+private:
+ int64x2_t fVec;
+};
+
+template <>
+class SkNf<2, double> {
+ typedef SkNi<2, int64_t> Ni;
+public:
+ SkNf(float64x2_t vec) : fVec(vec) {}
+
+ SkNf() {}
+ explicit SkNf(double val) : fVec(vdupq_n_f64(val)) {}
+ static SkNf Load(const double vals[2]) { return vld1q_f64(vals); }
+ SkNf(double a, double b) { fVec = (float64x2_t) { a, b }; }
+
+ void store(double vals[2]) const { vst1q_f64(vals, fVec); }
+
+ SkNf operator + (const SkNf& o) const { return vaddq_f64(fVec, o.fVec); }
+ SkNf operator - (const SkNf& o) const { return vsubq_f64(fVec, o.fVec); }
+ SkNf operator * (const SkNf& o) const { return vmulq_f64(fVec, o.fVec); }
+ SkNf operator / (const SkNf& o) const { return vdivq_f64(fVec, o.fVec); }
+
+ Ni operator == (const SkNf& o) const { return vreinterpretq_s64_u64(vceqq_f64(fVec, o.fVec)); }
+ Ni operator < (const SkNf& o) const { return vreinterpretq_s64_u64(vcltq_f64(fVec, o.fVec)); }
+ Ni operator > (const SkNf& o) const { return vreinterpretq_s64_u64(vcgtq_f64(fVec, o.fVec)); }
+ Ni operator <= (const SkNf& o) const { return vreinterpretq_s64_u64(vcleq_f64(fVec, o.fVec)); }
+ Ni operator >= (const SkNf& o) const { return vreinterpretq_s64_u64(vcgeq_f64(fVec, o.fVec)); }
+ Ni operator != (const SkNf& o) const {
+ return vreinterpretq_s64_u32(vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(fVec, o.fVec))));
+ }
+
+ static SkNf Min(const SkNf& l, const SkNf& r) { return vminq_f64(l.fVec, r.fVec); }
+ static SkNf Max(const SkNf& l, const SkNf& r) { return vmaxq_f64(l.fVec, r.fVec); }
+
+ SkNf sqrt() const { return vsqrtq_f64(fVec); }
+ SkNf rsqrt() const {
+ float64x2_t est0 = vrsqrteq_f64(fVec),
+ est1 = vmulq_f64(vrsqrtsq_f64(fVec, vmulq_f64(est0, est0)), est0);
+ return est1;
+ }
+
+ SkNf approxInvert() const {
+ float64x2_t est0 = vrecpeq_f64(fVec),
+ est1 = vmulq_f64(vrecpsq_f64(est0, fVec), est0);
+ return est1;
+ }
+
+ SkNf invert() const {
+ float64x2_t est1 = this->approxInvert().fVec,
+ est2 = vmulq_f64(vrecpsq_f64(est1, fVec), est1),
+ est3 = vmulq_f64(vrecpsq_f64(est2, fVec), est2);
+ return est3;
+ }
+
+ double operator[] (int k) const {
+ SkASSERT(0 <= k && k < 2);
+ return fVec[k];
+ }
+
+private:
+ float64x2_t fVec;
+};
+#endif//defined(SK_CPU_ARM64)
+
+template <>
+class SkNf<4, float> {
+ typedef SkNi<4, int32_t> Ni;
+public:
+ SkNf(float32x4_t vec) : fVec(vec) {}
+ float32x4_t vec() const { return fVec; }
+
+ SkNf() {}
+ explicit SkNf(float val) : fVec(vdupq_n_f32(val)) {}
+ static SkNf Load(const float vals[4]) { return vld1q_f32(vals); }
+ SkNf(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
+
+ void store(float vals[4]) const { vst1q_f32(vals, fVec); }
+
+ SkNf approxInvert() const {
+ float32x4_t est0 = vrecpeq_f32(fVec),
+ est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0);
+ return est1;
+ }
+ SkNf invert() const {
+ float32x4_t est1 = this->approxInvert().fVec,
+ est2 = vmulq_f32(vrecpsq_f32(est1, fVec), est1);
+ return est2;
+ }
+
+ SkNf operator + (const SkNf& o) const { return vaddq_f32(fVec, o.fVec); }
+ SkNf operator - (const SkNf& o) const { return vsubq_f32(fVec, o.fVec); }
+ SkNf operator * (const SkNf& o) const { return vmulq_f32(fVec, o.fVec); }
+ SkNf operator / (const SkNf& o) const {
+ #if defined(SK_CPU_ARM64)
+ return vdivq_f32(fVec, o.fVec);
+ #else
+ return vmulq_f32(fVec, o.invert().fVec);
+ #endif
+ }
+
+ Ni operator == (const SkNf& o) const { return vreinterpretq_s32_u32(vceqq_f32(fVec, o.fVec)); }
+ Ni operator < (const SkNf& o) const { return vreinterpretq_s32_u32(vcltq_f32(fVec, o.fVec)); }
+ Ni operator > (const SkNf& o) const { return vreinterpretq_s32_u32(vcgtq_f32(fVec, o.fVec)); }
+ Ni operator <= (const SkNf& o) const { return vreinterpretq_s32_u32(vcleq_f32(fVec, o.fVec)); }
+ Ni operator >= (const SkNf& o) const { return vreinterpretq_s32_u32(vcgeq_f32(fVec, o.fVec)); }
+ Ni operator != (const SkNf& o) const {
+ return vreinterpretq_s32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec)));
+ }
+
+ static SkNf Min(const SkNf& l, const SkNf& r) { return vminq_f32(l.fVec, r.fVec); }
+ static SkNf Max(const SkNf& l, const SkNf& r) { return vmaxq_f32(l.fVec, r.fVec); }
+
+ SkNf rsqrt() const {
+ float32x4_t est0 = vrsqrteq_f32(fVec),
+ est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0);
+ return est1;
+ }
+
+ SkNf sqrt() const {
+ #if defined(SK_CPU_ARM64)
+ return vsqrtq_f32(fVec);
+ #else
+ float32x4_t est1 = this->rsqrt().fVec,
+ // An extra step of Newton's method to refine the estimate of 1/sqrt(this).
+ est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1);
+ return vmulq_f32(fVec, est2);
+ #endif
+ }
+
+ float operator[] (int k) const {
+ SkASSERT(0 <= k && k < 4);
+ return fVec[k];
+ }
+
+private:
+ float32x4_t fVec;
+};
+
+#endif//SkNx_neon_DEFINED
diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h
new file mode 100644
index 0000000000..87754ad155
--- /dev/null
+++ b/src/opts/SkNx_sse.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNx_sse_DEFINED
+#define SkNx_sse_DEFINED
+
+// This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent.
+#include <immintrin.h>
+
+template <>
+class SkNi<2, int32_t> {
+public:
+ SkNi(const __m128i& vec) : fVec(vec) {}
+
+ SkNi() {}
+ bool allTrue() const { return 0xff == (_mm_movemask_epi8(fVec) & 0xff); }
+ bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(fVec) & 0xff); }
+
+private:
+ __m128i fVec;
+};
+
+template <>
+class SkNi<4, int32_t> {
+public:
+ SkNi(const __m128i& vec) : fVec(vec) {}
+
+ SkNi() {}
+ bool allTrue() const { return 0xffff == _mm_movemask_epi8(fVec); }
+ bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(fVec); }
+
+private:
+ __m128i fVec;
+};
+
+template <>
+class SkNi<2, int64_t> {
+public:
+ SkNi(const __m128i& vec) : fVec(vec) {}
+
+ SkNi() {}
+ bool allTrue() const { return 0xffff == _mm_movemask_epi8(fVec); }
+ bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(fVec); }
+
+private:
+ __m128i fVec;
+};
+
+
+template <>
+class SkNf<2, float> {
+ typedef SkNi<2, int32_t> Ni;
+public:
+ SkNf(const __m128& vec) : fVec(vec) {}
+
+ SkNf() {}
+ explicit SkNf(float val) : fVec(_mm_set1_ps(val)) {}
+ static SkNf Load(const float vals[2]) {
+ return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)vals));
+ }
+ SkNf(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {}
+
+ void store(float vals[2]) const { _mm_storel_pi((__m64*)vals, fVec); }
+
+ SkNf operator + (const SkNf& o) const { return _mm_add_ps(fVec, o.fVec); }
+ SkNf operator - (const SkNf& o) const { return _mm_sub_ps(fVec, o.fVec); }
+ SkNf operator * (const SkNf& o) const { return _mm_mul_ps(fVec, o.fVec); }
+ SkNf operator / (const SkNf& o) const { return _mm_div_ps(fVec, o.fVec); }
+
+ Ni operator == (const SkNf& o) const { return _mm_castps_si128(_mm_cmpeq_ps (fVec, o.fVec)); }
+ Ni operator != (const SkNf& o) const { return _mm_castps_si128(_mm_cmpneq_ps(fVec, o.fVec)); }
+ Ni operator < (const SkNf& o) const { return _mm_castps_si128(_mm_cmplt_ps (fVec, o.fVec)); }
+ Ni operator > (const SkNf& o) const { return _mm_castps_si128(_mm_cmpgt_ps (fVec, o.fVec)); }
+ Ni operator <= (const SkNf& o) const { return _mm_castps_si128(_mm_cmple_ps (fVec, o.fVec)); }
+ Ni operator >= (const SkNf& o) const { return _mm_castps_si128(_mm_cmpge_ps (fVec, o.fVec)); }
+
+ static SkNf Min(const SkNf& l, const SkNf& r) { return _mm_min_ps(l.fVec, r.fVec); }
+ static SkNf Max(const SkNf& l, const SkNf& r) { return _mm_max_ps(l.fVec, r.fVec); }
+
+ SkNf sqrt() const { return _mm_sqrt_ps (fVec); }
+ SkNf rsqrt() const { return _mm_rsqrt_ps(fVec); }
+
+ SkNf invert() const { return SkNf(1) / *this; }
+ SkNf approxInvert() const { return _mm_rcp_ps(fVec); }
+
+ float operator[] (int k) const {
+ SkASSERT(0 <= k && k < 2);
+ union { __m128 v; float fs[4]; } pun = {fVec};
+ return pun.fs[k];
+ }
+
+private:
+ __m128 fVec;
+};
+
+template <>
+class SkNf<2, double> {
+ typedef SkNi<2, int64_t> Ni;
+public:
+ SkNf(const __m128d& vec) : fVec(vec) {}
+
+ SkNf() {}
+ explicit SkNf(double val) : fVec( _mm_set1_pd(val) ) {}
+ static SkNf Load(const double vals[2]) { return _mm_loadu_pd(vals); }
+ SkNf(double a, double b) : fVec(_mm_setr_pd(a,b)) {}
+
+ void store(double vals[2]) const { _mm_storeu_pd(vals, fVec); }
+
+ SkNf operator + (const SkNf& o) const { return _mm_add_pd(fVec, o.fVec); }
+ SkNf operator - (const SkNf& o) const { return _mm_sub_pd(fVec, o.fVec); }
+ SkNf operator * (const SkNf& o) const { return _mm_mul_pd(fVec, o.fVec); }
+ SkNf operator / (const SkNf& o) const { return _mm_div_pd(fVec, o.fVec); }
+
+ Ni operator == (const SkNf& o) const { return _mm_castpd_si128(_mm_cmpeq_pd (fVec, o.fVec)); }
+ Ni operator != (const SkNf& o) const { return _mm_castpd_si128(_mm_cmpneq_pd(fVec, o.fVec)); }
+ Ni operator < (const SkNf& o) const { return _mm_castpd_si128(_mm_cmplt_pd (fVec, o.fVec)); }
+ Ni operator > (const SkNf& o) const { return _mm_castpd_si128(_mm_cmpgt_pd (fVec, o.fVec)); }
+ Ni operator <= (const SkNf& o) const { return _mm_castpd_si128(_mm_cmple_pd (fVec, o.fVec)); }
+ Ni operator >= (const SkNf& o) const { return _mm_castpd_si128(_mm_cmpge_pd (fVec, o.fVec)); }
+
+ static SkNf Min(const SkNf& l, const SkNf& r) { return _mm_min_pd(l.fVec, r.fVec); }
+ static SkNf Max(const SkNf& l, const SkNf& r) { return _mm_max_pd(l.fVec, r.fVec); }
+
+ SkNf sqrt() const { return _mm_sqrt_pd(fVec); }
+ SkNf rsqrt() const { return _mm_cvtps_pd(_mm_rsqrt_ps(_mm_cvtpd_ps(fVec))); }
+
+ SkNf invert() const { return SkNf(1) / *this; }
+ SkNf approxInvert() const { return _mm_cvtps_pd(_mm_rcp_ps(_mm_cvtpd_ps(fVec))); }
+
+ double operator[] (int k) const {
+ SkASSERT(0 <= k && k < 2);
+ union { __m128d v; double ds[2]; } pun = {fVec};
+ return pun.ds[k];
+ }
+
+private:
+ __m128d fVec;
+};
+
+template <>
+class SkNf<4, float> {
+ typedef SkNi<4, int32_t> Ni;
+public:
+ SkNf(const __m128& vec) : fVec(vec) {}
+ __m128 vec() const { return fVec; }
+
+ SkNf() {}
+ explicit SkNf(float val) : fVec( _mm_set1_ps(val) ) {}
+ static SkNf Load(const float vals[4]) { return _mm_loadu_ps(vals); }
+ SkNf(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
+
+ void store(float vals[4]) const { _mm_storeu_ps(vals, fVec); }
+
+ SkNf operator + (const SkNf& o) const { return _mm_add_ps(fVec, o.fVec); }
+ SkNf operator - (const SkNf& o) const { return _mm_sub_ps(fVec, o.fVec); }
+ SkNf operator * (const SkNf& o) const { return _mm_mul_ps(fVec, o.fVec); }
+ SkNf operator / (const SkNf& o) const { return _mm_div_ps(fVec, o.fVec); }
+
+ Ni operator == (const SkNf& o) const { return _mm_castps_si128(_mm_cmpeq_ps (fVec, o.fVec)); }
+ Ni operator != (const SkNf& o) const { return _mm_castps_si128(_mm_cmpneq_ps(fVec, o.fVec)); }
+ Ni operator < (const SkNf& o) const { return _mm_castps_si128(_mm_cmplt_ps (fVec, o.fVec)); }
+ Ni operator > (const SkNf& o) const { return _mm_castps_si128(_mm_cmpgt_ps (fVec, o.fVec)); }
+ Ni operator <= (const SkNf& o) const { return _mm_castps_si128(_mm_cmple_ps (fVec, o.fVec)); }
+ Ni operator >= (const SkNf& o) const { return _mm_castps_si128(_mm_cmpge_ps (fVec, o.fVec)); }
+
+ static SkNf Min(const SkNf& l, const SkNf& r) { return _mm_min_ps(l.fVec, r.fVec); }
+ static SkNf Max(const SkNf& l, const SkNf& r) { return _mm_max_ps(l.fVec, r.fVec); }
+
+ SkNf sqrt() const { return _mm_sqrt_ps (fVec); }
+ SkNf rsqrt() const { return _mm_rsqrt_ps(fVec); }
+
+ SkNf invert() const { return SkNf(1) / *this; }
+ SkNf approxInvert() const { return _mm_rcp_ps(fVec); }
+
+ float operator[] (int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { __m128 v; float fs[4]; } pun = {fVec};
+ return pun.fs[k];
+ }
+
+private:
+ __m128 fVec;
+};
+
+
+#endif//SkNx_sse_DEFINED
diff --git a/src/opts/SkPMFloat_SSE2.h b/src/opts/SkPMFloat_SSE2.h
index fa920d75d6..231940d86e 100644
--- a/src/opts/SkPMFloat_SSE2.h
+++ b/src/opts/SkPMFloat_SSE2.h
@@ -5,11 +5,6 @@
* found in the LICENSE file.
*/
-inline SkPMFloat& SkPMFloat::operator=(const SkPMFloat& that) {
- fColors = that.fColors;
- return *this;
-}
-
// For SkPMFloat(SkPMColor), we widen our 8 bit components (fix8) to 8-bit components in 16 bits
// (fix8_16), then widen those to 8-bit-in-32-bits (fix8_32), and finally convert those to floats.
@@ -33,7 +28,7 @@ inline SkPMColor SkPMFloat::get() const {
inline SkPMColor SkPMFloat::clamped() const {
// We don't use _mm_cvtps_epi32, because we want precise control over how 0.5 rounds (up).
- __m128i fix8_32 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), fColors)),
+ __m128i fix8_32 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), fColors.vec())),
fix8_16 = _mm_packus_epi16(fix8_32, fix8_32),
fix8 = _mm_packus_epi16(fix8_16, fix8_16);
SkPMColor c = _mm_cvtsi128_si32(fix8);
@@ -43,7 +38,7 @@ inline SkPMColor SkPMFloat::clamped() const {
inline SkPMColor SkPMFloat::trunc() const {
// Basically, same as clamped(), but no rounding.
- __m128i fix8_32 = _mm_cvttps_epi32(fColors),
+ __m128i fix8_32 = _mm_cvttps_epi32(fColors.vec()),
fix8_16 = _mm_packus_epi16(fix8_32, fix8_32),
fix8 = _mm_packus_epi16(fix8_16, fix8_16);
SkPMColor c = _mm_cvtsi128_si32(fix8);
@@ -72,10 +67,10 @@ inline void SkPMFloat::ClampTo4PMColors(
SkPMColor colors[4]) {
// Same as _SSSE3.h's. We use 3 _mm_packus_epi16() where the naive loop uses 8.
// We don't use _mm_cvtps_epi32, because we want precise control over how 0.5 rounds (up).
- __m128i c0 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), a.fColors)),
- c1 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), b.fColors)),
- c2 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), c.fColors)),
- c3 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), d.fColors));
+ __m128i c0 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), a.fColors.vec())),
+ c1 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), b.fColors.vec())),
+ c2 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), c.fColors.vec())),
+ c3 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), d.fColors.vec()));
__m128i c3210 = _mm_packus_epi16(_mm_packus_epi16(c0, c1),
_mm_packus_epi16(c2, c3));
_mm_storeu_si128((__m128i*)colors, c3210);
diff --git a/src/opts/SkPMFloat_SSSE3.h b/src/opts/SkPMFloat_SSSE3.h
index 6ff6929d01..390c71ce3f 100644
--- a/src/opts/SkPMFloat_SSSE3.h
+++ b/src/opts/SkPMFloat_SSSE3.h
@@ -5,11 +5,6 @@
* found in the LICENSE file.
*/
-inline SkPMFloat& SkPMFloat::operator=(const SkPMFloat& that) {
- fColors = that.fColors;
- return *this;
-}
-
// For SkPMFloat(SkPMColor), we widen our 8 bit components (fix8) to 8-bit components in 32 bits
// (fix8_32), then convert those to floats.
@@ -29,7 +24,7 @@ inline SkPMFloat::SkPMFloat(SkPMColor c) {
inline SkPMColor SkPMFloat::trunc() const {
const int _ = 255; // _ means to zero that byte.
- __m128i fix8_32 = _mm_cvttps_epi32(fColors),
+ __m128i fix8_32 = _mm_cvttps_epi32(fColors.vec()),
fix8 = _mm_shuffle_epi8(fix8_32, _mm_set_epi8(_,_,_,_, _,_,_,_, _,_,_,_, 12,8,4,0));
SkPMColor c = _mm_cvtsi128_si32(fix8);
SkPMColorAssert(c);
@@ -38,12 +33,12 @@ inline SkPMColor SkPMFloat::trunc() const {
inline SkPMColor SkPMFloat::get() const {
SkASSERT(this->isValid());
- return SkPMFloat(Sk4f(0.5f) + *this).trunc();
+ return SkPMFloat(Sk4s(0.5f) + *this).trunc();
}
inline SkPMColor SkPMFloat::clamped() const {
// We don't use _mm_cvtps_epi32, because we want precise control over how 0.5 rounds (up).
- __m128i fix8_32 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), fColors)),
+ __m128i fix8_32 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), fColors.vec())),
fix8_16 = _mm_packus_epi16(fix8_32, fix8_32),
fix8 = _mm_packus_epi16(fix8_16, fix8_16);
SkPMColor c = _mm_cvtsi128_si32(fix8);
@@ -75,10 +70,10 @@ inline void SkPMFloat::ClampTo4PMColors(
SkPMColor colors[4]) {
// Same as _SSE2.h's. We use 3 _mm_packus_epi16() where the naive loop uses 8.
// We don't use _mm_cvtps_epi32, because we want precise control over how 0.5 rounds (up).
- __m128i c0 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), a.fColors)),
- c1 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), b.fColors)),
- c2 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), c.fColors)),
- c3 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), d.fColors));
+ __m128i c0 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), a.fColors.vec())),
+ c1 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), b.fColors.vec())),
+ c2 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), c.fColors.vec())),
+ c3 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), d.fColors.vec()));
__m128i c3210 = _mm_packus_epi16(_mm_packus_epi16(c0, c1),
_mm_packus_epi16(c2, c3));
_mm_storeu_si128((__m128i*)colors, c3210);
diff --git a/src/opts/SkPMFloat_neon.h b/src/opts/SkPMFloat_neon.h
index e5b16f5e6f..41c553ebb9 100644
--- a/src/opts/SkPMFloat_neon.h
+++ b/src/opts/SkPMFloat_neon.h
@@ -5,11 +5,6 @@
* found in the LICENSE file.
*/
-inline SkPMFloat& SkPMFloat::operator=(const SkPMFloat& that) {
- fColors = that.fColors;
- return *this;
-}
-
// For SkPMFloat(SkPMFColor), we widen our 8 bit components (fix8) to 8-bit components in 16 bits
// (fix8_16), then widen those to 8-bit-in-32-bits (fix8_32), and finally convert those to floats.
@@ -27,7 +22,7 @@ inline SkPMFloat::SkPMFloat(SkPMColor c) {
}
inline SkPMColor SkPMFloat::trunc() const {
- uint32x4_t fix8_32 = vcvtq_u32_f32(fColors); // vcvtq_u32_f32 truncates
+ uint32x4_t fix8_32 = vcvtq_u32_f32(fColors.vec()); // vcvtq_u32_f32 truncates
uint16x4_t fix8_16 = vmovn_u32(fix8_32);
uint8x8_t fix8 = vmovn_u16(vcombine_u16(fix8_16, vdup_n_u16(0)));
SkPMColor c = vget_lane_u32((uint32x2_t)fix8, 0);
@@ -37,11 +32,11 @@ inline SkPMColor SkPMFloat::trunc() const {
inline SkPMColor SkPMFloat::get() const {
SkASSERT(this->isValid());
- return SkPMFloat(Sk4f(0.5f) + *this).trunc();
+ return SkPMFloat(Sk4s(0.5f) + *this).trunc();
}
inline SkPMColor SkPMFloat::clamped() const {
- float32x4_t add_half = vaddq_f32(fColors, vdupq_n_f32(0.5f));
+ float32x4_t add_half = vaddq_f32(fColors.vec(), vdupq_n_f32(0.5f));
uint32x4_t fix8_32 = vcvtq_u32_f32(add_half); // vcvtq_u32_f32 truncates, so round manually
uint16x4_t fix8_16 = vqmovn_u32(fix8_32);
uint8x8_t fix8 = vqmovn_u16(vcombine_u16(fix8_16, vdup_n_u16(0)));
diff --git a/src/opts/SkPMFloat_none.h b/src/opts/SkPMFloat_none.h
index 86516b1875..19557f11de 100644
--- a/src/opts/SkPMFloat_none.h
+++ b/src/opts/SkPMFloat_none.h
@@ -5,11 +5,6 @@
* found in the LICENSE file.
*/
-inline SkPMFloat& SkPMFloat::operator=(const SkPMFloat& that) {
- for (int i = 0; i < 4; i++) { fColor[i] = that.fColor[i]; }
- return *this;
-}
-
inline SkPMFloat::SkPMFloat(SkPMColor c) {
*this = SkPMFloat::FromARGB(SkGetPackedA32(c),
SkGetPackedR32(c),