aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar Mike Klein <mtklein@chromium.org>2016-10-19 09:21:11 -0400
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2016-10-19 15:56:30 +0000
commit7c78f3a863c620d722f02d00b88de5b3cde298a4 (patch)
tree64e2122bed852086d7811d101efd697d57e93568 /src
parent520ced63cf0750e207223169a31edb2a16e5ca96 (diff)
SkNx: use SK_ALWAYS_INLINE thoroughly.
MSVC's not so good at inlining. So tell it where to. It won't hurt the others. This has nothing directly to do with ODR safety. The anonymous namespaces and 'static' on freestanding functions provide the correctness we need there. But this change can help to mechanically prevent the sort of problems ODR violations can lead to. I may follow up by extending this strategy further to Sk4px, which is used to implement a lot of the legacy xfermodes. BUG=skia: GOLD_TRYBOT_URL= https://gold.skia.org/search?issue=3608 CQ_INCLUDE_TRYBOTS=master.client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot Change-Id: I927334c40910ce43da1fbabdf243c9cd5438bea6 Reviewed-on: https://skia-review.googlesource.com/3608 Reviewed-by: Matt Sarett <msarett@google.com> Commit-Queue: Mike Klein <mtklein@chromium.org>
Diffstat (limited to 'src')
-rw-r--r--src/core/SkNx.h215
-rw-r--r--src/opts/SkNx_neon.h331
-rw-r--r--src/opts/SkNx_sse.h479
3 files changed, 509 insertions, 516 deletions
diff --git a/src/core/SkNx.h b/src/core/SkNx.h
index 6d9af9fe47..ad0fc0d7c1 100644
--- a/src/core/SkNx.h
+++ b/src/core/SkNx.h
@@ -8,8 +8,6 @@
#ifndef SkNx_DEFINED
#define SkNx_DEFINED
-//#define SKNX_NO_SIMD
-
#include "SkScalar.h"
#include "SkTypes.h"
#include <limits>
@@ -21,9 +19,10 @@
template <int N, typename T> struct SkNx_abi { SkNx_abi<N/2,T> lo, hi; };
template < typename T> struct SkNx_abi<1,T> { T val; };
-namespace {
+// Every single SkNx method wants to be fully inlined. (We know better than MSVC).
+#define AI SK_ALWAYS_INLINE
-#define SI static inline
+namespace {
// The default SkNx<N,T> just proxies down to a pair of SkNx<N/2, T>.
template <int N, typename T>
@@ -32,40 +31,41 @@ struct SkNx {
Half fLo, fHi;
- SkNx() = default;
- SkNx(const Half& lo, const Half& hi) : fLo(lo), fHi(hi) {}
+ AI SkNx() = default;
+ AI SkNx(const Half& lo, const Half& hi) : fLo(lo), fHi(hi) {}
- SkNx(T v) : fLo(v), fHi(v) {}
+ AI SkNx(T v) : fLo(v), fHi(v) {}
- SkNx(T a, T b) : fLo(a) , fHi(b) { static_assert(N==2, ""); }
- SkNx(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) { static_assert(N==4, ""); }
- SkNx(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) {
+ AI SkNx(T a, T b) : fLo(a) , fHi(b) { static_assert(N==2, ""); }
+ AI SkNx(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) { static_assert(N==4, ""); }
+ AI SkNx(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) {
static_assert(N==8, "");
}
- SkNx(T a, T b, T c, T d, T e, T f, T g, T h,
- T i, T j, T k, T l, T m, T n, T o, T p) : fLo(a,b,c,d, e,f,g,h), fHi(i,j,k,l, m,n,o,p) {
+ AI SkNx(T a, T b, T c, T d, T e, T f, T g, T h,
+ T i, T j, T k, T l, T m, T n, T o, T p)
+ : fLo(a,b,c,d, e,f,g,h), fHi(i,j,k,l, m,n,o,p) {
static_assert(N==16, "");
}
- SkNx(const SkNx_abi<N,T>& a) : fLo(a.lo), fHi(a.hi) {}
- operator SkNx_abi<N,T>() const { return { (SkNx_abi<N/2,T>)fLo, (SkNx_abi<N/2,T>)fHi }; }
+ AI SkNx(const SkNx_abi<N,T>& a) : fLo(a.lo), fHi(a.hi) {}
+ AI operator SkNx_abi<N,T>() const { return { (SkNx_abi<N/2,T>)fLo, (SkNx_abi<N/2,T>)fHi }; }
- T operator[](int k) const {
+ AI T operator[](int k) const {
SkASSERT(0 <= k && k < N);
return k < N/2 ? fLo[k] : fHi[k-N/2];
}
- static SkNx Load(const void* vptr) {
+ AI static SkNx Load(const void* vptr) {
auto ptr = (const char*)vptr;
return { Half::Load(ptr), Half::Load(ptr + N/2*sizeof(T)) };
}
- void store(void* vptr) const {
+ AI void store(void* vptr) const {
auto ptr = (char*)vptr;
fLo.store(ptr);
fHi.store(ptr + N/2*sizeof(T));
}
- static void Load4(const void* vptr, SkNx* a, SkNx* b, SkNx* c, SkNx* d) {
+ AI static void Load4(const void* vptr, SkNx* a, SkNx* b, SkNx* c, SkNx* d) {
auto ptr = (const char*)vptr;
Half al, bl, cl, dl,
ah, bh, ch, dh;
@@ -76,55 +76,55 @@ struct SkNx {
*c = SkNx{cl, ch};
*d = SkNx{dl, dh};
}
- static void Store4(void* vptr, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) {
+ AI static void Store4(void* vptr, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) {
auto ptr = (char*)vptr;
Half::Store4(ptr, a.fLo, b.fLo, c.fLo, d.fLo);
Half::Store4(ptr + 4*N/2*sizeof(T), a.fHi, b.fHi, c.fHi, d.fHi);
}
- bool anyTrue() const { return fLo.anyTrue() || fHi.anyTrue(); }
- bool allTrue() const { return fLo.allTrue() && fHi.allTrue(); }
+ AI bool anyTrue() const { return fLo.anyTrue() || fHi.anyTrue(); }
+ AI bool allTrue() const { return fLo.allTrue() && fHi.allTrue(); }
- SkNx abs() const { return { fLo. abs(), fHi. abs() }; }
- SkNx sqrt() const { return { fLo. sqrt(), fHi. sqrt() }; }
- SkNx rsqrt() const { return { fLo. rsqrt(), fHi. rsqrt() }; }
- SkNx floor() const { return { fLo. floor(), fHi. floor() }; }
- SkNx invert() const { return { fLo.invert(), fHi.invert() }; }
+ AI SkNx abs() const { return { fLo. abs(), fHi. abs() }; }
+ AI SkNx sqrt() const { return { fLo. sqrt(), fHi. sqrt() }; }
+ AI SkNx rsqrt() const { return { fLo. rsqrt(), fHi. rsqrt() }; }
+ AI SkNx floor() const { return { fLo. floor(), fHi. floor() }; }
+ AI SkNx invert() const { return { fLo.invert(), fHi.invert() }; }
- SkNx operator!() const { return { !fLo, !fHi }; }
- SkNx operator-() const { return { -fLo, -fHi }; }
- SkNx operator~() const { return { ~fLo, ~fHi }; }
+ AI SkNx operator!() const { return { !fLo, !fHi }; }
+ AI SkNx operator-() const { return { -fLo, -fHi }; }
+ AI SkNx operator~() const { return { ~fLo, ~fHi }; }
- SkNx operator<<(int bits) const { return { fLo << bits, fHi << bits }; }
- SkNx operator>>(int bits) const { return { fLo >> bits, fHi >> bits }; }
+ AI SkNx operator<<(int bits) const { return { fLo << bits, fHi << bits }; }
+ AI SkNx operator>>(int bits) const { return { fLo >> bits, fHi >> bits }; }
- SkNx operator+(const SkNx& y) const { return { fLo + y.fLo, fHi + y.fHi }; }
- SkNx operator-(const SkNx& y) const { return { fLo - y.fLo, fHi - y.fHi }; }
- SkNx operator*(const SkNx& y) const { return { fLo * y.fLo, fHi * y.fHi }; }
- SkNx operator/(const SkNx& y) const { return { fLo / y.fLo, fHi / y.fHi }; }
+ AI SkNx operator+(const SkNx& y) const { return { fLo + y.fLo, fHi + y.fHi }; }
+ AI SkNx operator-(const SkNx& y) const { return { fLo - y.fLo, fHi - y.fHi }; }
+ AI SkNx operator*(const SkNx& y) const { return { fLo * y.fLo, fHi * y.fHi }; }
+ AI SkNx operator/(const SkNx& y) const { return { fLo / y.fLo, fHi / y.fHi }; }
- SkNx operator&(const SkNx& y) const { return { fLo & y.fLo, fHi & y.fHi }; }
- SkNx operator|(const SkNx& y) const { return { fLo | y.fLo, fHi | y.fHi }; }
- SkNx operator^(const SkNx& y) const { return { fLo ^ y.fLo, fHi ^ y.fHi }; }
+ AI SkNx operator&(const SkNx& y) const { return { fLo & y.fLo, fHi & y.fHi }; }
+ AI SkNx operator|(const SkNx& y) const { return { fLo | y.fLo, fHi | y.fHi }; }
+ AI SkNx operator^(const SkNx& y) const { return { fLo ^ y.fLo, fHi ^ y.fHi }; }
- SkNx operator==(const SkNx& y) const { return { fLo == y.fLo, fHi == y.fHi }; }
- SkNx operator!=(const SkNx& y) const { return { fLo != y.fLo, fHi != y.fHi }; }
- SkNx operator<=(const SkNx& y) const { return { fLo <= y.fLo, fHi <= y.fHi }; }
- SkNx operator>=(const SkNx& y) const { return { fLo >= y.fLo, fHi >= y.fHi }; }
- SkNx operator< (const SkNx& y) const { return { fLo < y.fLo, fHi < y.fHi }; }
- SkNx operator> (const SkNx& y) const { return { fLo > y.fLo, fHi > y.fHi }; }
+ AI SkNx operator==(const SkNx& y) const { return { fLo == y.fLo, fHi == y.fHi }; }
+ AI SkNx operator!=(const SkNx& y) const { return { fLo != y.fLo, fHi != y.fHi }; }
+ AI SkNx operator<=(const SkNx& y) const { return { fLo <= y.fLo, fHi <= y.fHi }; }
+ AI SkNx operator>=(const SkNx& y) const { return { fLo >= y.fLo, fHi >= y.fHi }; }
+ AI SkNx operator< (const SkNx& y) const { return { fLo < y.fLo, fHi < y.fHi }; }
+ AI SkNx operator> (const SkNx& y) const { return { fLo > y.fLo, fHi > y.fHi }; }
- SkNx saturatedAdd(const SkNx& y) const {
+ AI SkNx saturatedAdd(const SkNx& y) const {
return { fLo.saturatedAdd(y.fLo), fHi.saturatedAdd(y.fHi) };
}
- SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
return { fLo.thenElse(t.fLo, e.fLo), fHi.thenElse(t.fHi, e.fHi) };
}
- static SkNx Min(const SkNx& x, const SkNx& y) {
+ AI static SkNx Min(const SkNx& x, const SkNx& y) {
return { Half::Min(x.fLo, y.fLo), Half::Min(x.fHi, y.fHi) };
}
- static SkNx Max(const SkNx& x, const SkNx& y) {
+ AI static SkNx Max(const SkNx& x, const SkNx& y) {
return { Half::Max(x.fLo, y.fLo), Half::Max(x.fHi, y.fHi) };
}
};
@@ -134,33 +134,33 @@ template <typename T>
struct SkNx<1,T> {
T fVal;
- SkNx() = default;
- SkNx(T v) : fVal(v) {}
+ AI SkNx() = default;
+ AI SkNx(T v) : fVal(v) {}
- SkNx(const SkNx_abi<1,T>& a) : fVal(a.val) {}
- operator SkNx_abi<1,T>() const { return { fVal }; }
+ AI SkNx(const SkNx_abi<1,T>& a) : fVal(a.val) {}
+ AI operator SkNx_abi<1,T>() const { return { fVal }; }
// Android complains against unused parameters, so we guard it
- T operator[](int SkDEBUGCODE(k)) const {
+ AI T operator[](int SkDEBUGCODE(k)) const {
SkASSERT(k == 0);
return fVal;
}
- static SkNx Load(const void* ptr) {
+ AI static SkNx Load(const void* ptr) {
SkNx v;
memcpy(&v, ptr, sizeof(T));
return v;
}
- void store(void* ptr) const { memcpy(ptr, &fVal, sizeof(T)); }
+ AI void store(void* ptr) const { memcpy(ptr, &fVal, sizeof(T)); }
- static void Load4(const void* vptr, SkNx* a, SkNx* b, SkNx* c, SkNx* d) {
+ AI static void Load4(const void* vptr, SkNx* a, SkNx* b, SkNx* c, SkNx* d) {
auto ptr = (const char*)vptr;
*a = Load(ptr + 0*sizeof(T));
*b = Load(ptr + 1*sizeof(T));
*c = Load(ptr + 2*sizeof(T));
*d = Load(ptr + 3*sizeof(T));
}
- static void Store4(void* vptr, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) {
+ AI static void Store4(void* vptr, const SkNx& a, const SkNx& b, const SkNx& c, const SkNx& d) {
auto ptr = (char*)vptr;
a.store(ptr + 0*sizeof(T));
b.store(ptr + 1*sizeof(T));
@@ -168,65 +168,67 @@ struct SkNx<1,T> {
d.store(ptr + 3*sizeof(T));
}
- bool anyTrue() const { return fVal != 0; }
- bool allTrue() const { return fVal != 0; }
+ AI bool anyTrue() const { return fVal != 0; }
+ AI bool allTrue() const { return fVal != 0; }
- SkNx abs() const { return Abs(fVal); }
- SkNx sqrt() const { return Sqrt(fVal); }
- SkNx rsqrt() const { return T(1) / this->sqrt(); }
- SkNx floor() const { return Floor(fVal); }
- SkNx invert() const { return T(1) / *this; }
+ AI SkNx abs() const { return Abs(fVal); }
+ AI SkNx sqrt() const { return Sqrt(fVal); }
+ AI SkNx rsqrt() const { return T(1) / this->sqrt(); }
+ AI SkNx floor() const { return Floor(fVal); }
+ AI SkNx invert() const { return T(1) / *this; }
- SkNx operator!() const { return !fVal; }
- SkNx operator-() const { return -fVal; }
- SkNx operator~() const { return FromBits(~ToBits(fVal)); }
+ AI SkNx operator!() const { return !fVal; }
+ AI SkNx operator-() const { return -fVal; }
+ AI SkNx operator~() const { return FromBits(~ToBits(fVal)); }
- SkNx operator<<(int bits) const { return fVal << bits; }
- SkNx operator>>(int bits) const { return fVal >> bits; }
+ AI SkNx operator<<(int bits) const { return fVal << bits; }
+ AI SkNx operator>>(int bits) const { return fVal >> bits; }
- SkNx operator+(const SkNx& y) const { return fVal + y.fVal; }
- SkNx operator-(const SkNx& y) const { return fVal - y.fVal; }
- SkNx operator*(const SkNx& y) const { return fVal * y.fVal; }
- SkNx operator/(const SkNx& y) const { return fVal / y.fVal; }
+ AI SkNx operator+(const SkNx& y) const { return fVal + y.fVal; }
+ AI SkNx operator-(const SkNx& y) const { return fVal - y.fVal; }
+ AI SkNx operator*(const SkNx& y) const { return fVal * y.fVal; }
+ AI SkNx operator/(const SkNx& y) const { return fVal / y.fVal; }
- SkNx operator&(const SkNx& y) const { return FromBits(ToBits(fVal) & ToBits(y.fVal)); }
- SkNx operator|(const SkNx& y) const { return FromBits(ToBits(fVal) | ToBits(y.fVal)); }
- SkNx operator^(const SkNx& y) const { return FromBits(ToBits(fVal) ^ ToBits(y.fVal)); }
+ AI SkNx operator&(const SkNx& y) const { return FromBits(ToBits(fVal) & ToBits(y.fVal)); }
+ AI SkNx operator|(const SkNx& y) const { return FromBits(ToBits(fVal) | ToBits(y.fVal)); }
+ AI SkNx operator^(const SkNx& y) const { return FromBits(ToBits(fVal) ^ ToBits(y.fVal)); }
- SkNx operator==(const SkNx& y) const { return FromBits(fVal == y.fVal ? ~0 : 0); }
- SkNx operator!=(const SkNx& y) const { return FromBits(fVal != y.fVal ? ~0 : 0); }
- SkNx operator<=(const SkNx& y) const { return FromBits(fVal <= y.fVal ? ~0 : 0); }
- SkNx operator>=(const SkNx& y) const { return FromBits(fVal >= y.fVal ? ~0 : 0); }
- SkNx operator< (const SkNx& y) const { return FromBits(fVal < y.fVal ? ~0 : 0); }
- SkNx operator> (const SkNx& y) const { return FromBits(fVal > y.fVal ? ~0 : 0); }
+ AI SkNx operator==(const SkNx& y) const { return FromBits(fVal == y.fVal ? ~0 : 0); }
+ AI SkNx operator!=(const SkNx& y) const { return FromBits(fVal != y.fVal ? ~0 : 0); }
+ AI SkNx operator<=(const SkNx& y) const { return FromBits(fVal <= y.fVal ? ~0 : 0); }
+ AI SkNx operator>=(const SkNx& y) const { return FromBits(fVal >= y.fVal ? ~0 : 0); }
+ AI SkNx operator< (const SkNx& y) const { return FromBits(fVal < y.fVal ? ~0 : 0); }
+ AI SkNx operator> (const SkNx& y) const { return FromBits(fVal > y.fVal ? ~0 : 0); }
- static SkNx Min(const SkNx& x, const SkNx& y) { return x.fVal < y.fVal ? x : y; }
- static SkNx Max(const SkNx& x, const SkNx& y) { return x.fVal > y.fVal ? x : y; }
+ AI static SkNx Min(const SkNx& x, const SkNx& y) { return x.fVal < y.fVal ? x : y; }
+ AI static SkNx Max(const SkNx& x, const SkNx& y) { return x.fVal > y.fVal ? x : y; }
- SkNx saturatedAdd(const SkNx& y) const {
+ AI SkNx saturatedAdd(const SkNx& y) const {
static_assert(std::is_unsigned<T>::value, "");
T sum = fVal + y.fVal;
return sum < fVal ? std::numeric_limits<T>::max() : sum;
}
- SkNx thenElse(const SkNx& t, const SkNx& e) const { return fVal != 0 ? t : e; }
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const { return fVal != 0 ? t : e; }
private:
// Helper functions to choose the right float/double methods. (In <cmath> madness lies...)
- static float Abs(float val) { return ::fabsf(val); }
- static float Sqrt(float val) { return ::sqrtf(val); }
- static float Floor(float val) { return ::floorf(val); }
+ AI static float Abs(float val) { return ::fabsf(val); }
+ AI static float Sqrt(float val) { return ::sqrtf(val); }
+ AI static float Floor(float val) { return ::floorf(val); }
- static double Abs(double val) { return ::fabs(val); }
- static double Sqrt(double val) { return ::sqrt(val); }
- static double Floor(double val) { return ::floor(val); }
+ AI static double Abs(double val) { return ::fabs(val); }
+ AI static double Sqrt(double val) { return ::sqrt(val); }
+ AI static double Floor(double val) { return ::floor(val); }
// Helper functions for working with floats/doubles as bit patterns.
- template <typename U> static U ToBits(U v) { return v; }
- static int32_t ToBits(float v) { int32_t bits; memcpy(&bits, &v, sizeof(v)); return bits; }
- static int64_t ToBits(double v) { int64_t bits; memcpy(&bits, &v, sizeof(v)); return bits; }
+ template <typename U>
+ AI static U ToBits(U v) { return v; }
+ AI static int32_t ToBits(float v) { int32_t bits; memcpy(&bits, &v, sizeof(v)); return bits; }
+ AI static int64_t ToBits(double v) { int64_t bits; memcpy(&bits, &v, sizeof(v)); return bits; }
- template <typename Bits> static T FromBits(Bits bits) {
+ template <typename Bits>
+ AI static T FromBits(Bits bits) {
static_assert(std::is_pod<T >::value &&
std::is_pod<Bits>::value &&
sizeof(T) <= sizeof(Bits), "");
@@ -237,7 +239,7 @@ private:
};
// Allow scalars on the left or right of binary operators, and things like +=, &=, etc.
-#define V template <int N, typename T> SI SkNx<N,T>
+#define V template <int N, typename T> AI static SkNx<N,T>
V operator+ (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) + y; }
V operator- (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) - y; }
V operator* (T x, const SkNx<N,T>& y) { return SkNx<N,T>(x) * y; }
@@ -288,14 +290,14 @@ private:
// SkNx<N,T> ~~> SkNx<N/2,T> + SkNx<N/2,T>
template <int N, typename T>
-SI void SkNx_split(const SkNx<N,T>& v, SkNx<N/2,T>* lo, SkNx<N/2,T>* hi) {
+AI static void SkNx_split(const SkNx<N,T>& v, SkNx<N/2,T>* lo, SkNx<N/2,T>* hi) {
*lo = v.fLo;
*hi = v.fHi;
}
// SkNx<N/2,T> + SkNx<N/2,T> ~~> SkNx<N,T>
template <int N, typename T>
-SI SkNx<N*2,T> SkNx_join(const SkNx<N,T>& lo, const SkNx<N,T>& hi) {
+AI static SkNx<N*2,T> SkNx_join(const SkNx<N,T>& lo, const SkNx<N,T>& hi) {
return { lo, hi };
}
@@ -306,22 +308,22 @@ SI SkNx<N*2,T> SkNx_join(const SkNx<N,T>& lo, const SkNx<N,T>& hi) {
// SkNx_shuffle<2,1,2,1,2,1,2,1>(v) ~~> {B,G,B,G,B,G,B,G}
// SkNx_shuffle<3,3,3,3>(v) ~~> {A,A,A,A}
template <int... Ix, int N, typename T>
-SI SkNx<sizeof...(Ix),T> SkNx_shuffle(const SkNx<N,T>& v) {
+AI static SkNx<sizeof...(Ix),T> SkNx_shuffle(const SkNx<N,T>& v) {
return { v[Ix]... };
}
// Cast from SkNx<N, Src> to SkNx<N, Dst>, as if you called static_cast<Dst>(Src).
template <typename Dst, typename Src, int N>
-SI SkNx<N,Dst> SkNx_cast(const SkNx<N,Src>& v) {
+AI static SkNx<N,Dst> SkNx_cast(const SkNx<N,Src>& v) {
return { SkNx_cast<Dst>(v.fLo), SkNx_cast<Dst>(v.fHi) };
}
template <typename Dst, typename Src>
-SI SkNx<1,Dst> SkNx_cast(const SkNx<1,Src>& v) {
+AI static SkNx<1,Dst> SkNx_cast(const SkNx<1,Src>& v) {
return static_cast<Dst>(v.fVal);
}
template <int N, typename T>
-SI SkNx<N,T> SkNx_fma(const SkNx<N,T>& f, const SkNx<N,T>& m, const SkNx<N,T>& a) {
+AI static SkNx<N,T> SkNx_fma(const SkNx<N,T>& f, const SkNx<N,T>& m, const SkNx<N,T>& a) {
return f*m+a;
}
@@ -356,7 +358,7 @@ typedef SkNx<4, uint32_t> Sk4u;
#include "../opts/SkNx_neon.h"
#else
-SI Sk4i Sk4f_round(const Sk4f& x) {
+AI static Sk4i Sk4f_round(const Sk4f& x) {
return { (int) lrintf (x[0]),
(int) lrintf (x[1]),
(int) lrintf (x[2]),
@@ -365,10 +367,11 @@ SI Sk4i Sk4f_round(const Sk4f& x) {
#endif
-SI void Sk4f_ToBytes(uint8_t p[16], const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) {
+AI static void Sk4f_ToBytes(uint8_t p[16],
+ const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) {
SkNx_cast<uint8_t>(SkNx_join(SkNx_join(a,b), SkNx_join(c,d))).store(p);
}
-#undef SI
+#undef AI
#endif//SkNx_DEFINED
diff --git a/src/opts/SkNx_neon.h b/src/opts/SkNx_neon.h
index abdebe2c2f..b5d89891d1 100644
--- a/src/opts/SkNx_neon.h
+++ b/src/opts/SkNx_neon.h
@@ -20,7 +20,7 @@ namespace {
// - roundtrip through integers via truncation
// - subtract 1 if that's too big (possible for negative values).
// This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big.
-static inline float32x4_t armv7_vrndmq_f32(float32x4_t v) {
+AI static float32x4_t armv7_vrndmq_f32(float32x4_t v) {
auto roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
auto too_big = vcgtq_f32(roundtrip, v);
return vsubq_f32(roundtrip, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_n_f32(1)));
@@ -29,25 +29,25 @@ static inline float32x4_t armv7_vrndmq_f32(float32x4_t v) {
template <>
class SkNx<2, float> {
public:
- SkNx(float32x2_t vec) : fVec(vec) {}
+ AI SkNx(float32x2_t vec) : fVec(vec) {}
- SkNx() {}
- SkNx(float val) : fVec(vdup_n_f32(val)) {}
- static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); }
- SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; }
+ AI SkNx() {}
+ AI SkNx(float val) : fVec(vdup_n_f32(val)) {}
+ AI SkNx(float a, float b) { fVec = (float32x2_t) { a, b }; }
- void store(void* ptr) const { vst1_f32((float*)ptr, fVec); }
+ AI static SkNx Load(const void* ptr) { return vld1_f32((const float*)ptr); }
+ AI void store(void* ptr) const { vst1_f32((float*)ptr, fVec); }
- SkNx invert() const {
+ AI SkNx invert() const {
float32x2_t est0 = vrecpe_f32(fVec),
est1 = vmul_f32(vrecps_f32(est0, fVec), est0);
return est1;
}
- SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); }
- SkNx operator / (const SkNx& o) const {
+ AI SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return vmul_f32(fVec, o.fVec); }
+ AI SkNx operator / (const SkNx& o) const {
#if defined(SK_CPU_ARM64)
return vdiv_f32(fVec, o.fVec);
#else
@@ -58,24 +58,24 @@ public:
#endif
}
- SkNx operator == (const SkNx& o) const { return vreinterpret_f32_u32(vceq_f32(fVec, o.fVec)); }
- SkNx operator < (const SkNx& o) const { return vreinterpret_f32_u32(vclt_f32(fVec, o.fVec)); }
- SkNx operator > (const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f32(fVec, o.fVec)); }
- SkNx operator <= (const SkNx& o) const { return vreinterpret_f32_u32(vcle_f32(fVec, o.fVec)); }
- SkNx operator >= (const SkNx& o) const { return vreinterpret_f32_u32(vcge_f32(fVec, o.fVec)); }
- SkNx operator != (const SkNx& o) const {
+ AI SkNx operator==(const SkNx& o) const { return vreinterpret_f32_u32(vceq_f32(fVec, o.fVec)); }
+ AI SkNx operator <(const SkNx& o) const { return vreinterpret_f32_u32(vclt_f32(fVec, o.fVec)); }
+ AI SkNx operator >(const SkNx& o) const { return vreinterpret_f32_u32(vcgt_f32(fVec, o.fVec)); }
+ AI SkNx operator<=(const SkNx& o) const { return vreinterpret_f32_u32(vcle_f32(fVec, o.fVec)); }
+ AI SkNx operator>=(const SkNx& o) const { return vreinterpret_f32_u32(vcge_f32(fVec, o.fVec)); }
+ AI SkNx operator!=(const SkNx& o) const {
return vreinterpret_f32_u32(vmvn_u32(vceq_f32(fVec, o.fVec)));
}
- static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fVec); }
- static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fVec); }
+ AI static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fVec); }
+ AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fVec); }
- SkNx rsqrt() const {
+ AI SkNx rsqrt() const {
float32x2_t est0 = vrsqrte_f32(fVec);
return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0);
}
- SkNx sqrt() const {
+ AI SkNx sqrt() const {
#if defined(SK_CPU_ARM64)
return vsqrt_f32(fVec);
#else
@@ -86,17 +86,17 @@ public:
#endif
}
- float operator[](int k) const {
+ AI float operator[](int k) const {
SkASSERT(0 <= k && k < 2);
union { float32x2_t v; float fs[2]; } pun = {fVec};
return pun.fs[k&1];
}
- bool allTrue() const {
+ AI bool allTrue() const {
auto v = vreinterpret_u32_f32(fVec);
return vget_lane_u32(v,0) && vget_lane_u32(v,1);
}
- bool anyTrue() const {
+ AI bool anyTrue() const {
auto v = vreinterpret_u32_f32(fVec);
return vget_lane_u32(v,0) || vget_lane_u32(v,1);
}
@@ -107,26 +107,26 @@ public:
template <>
class SkNx<4, float> {
public:
- SkNx(float32x4_t vec) : fVec(vec) {}
+ AI SkNx(float32x4_t vec) : fVec(vec) {}
- SkNx() {}
- SkNx(float val) : fVec(vdupq_n_f32(val)) {}
- SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
+ AI SkNx() {}
+ AI SkNx(float val) : fVec(vdupq_n_f32(val)) {}
+ AI SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
- SkNx(const SkNx_abi<4,float>& a) : fVec(a.vec) {}
- operator SkNx_abi<4,float>() const { return { fVec }; }
+ AI SkNx(const SkNx_abi<4,float>& a) : fVec(a.vec) {}
+ AI operator SkNx_abi<4,float>() const { return { fVec }; }
- static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); }
- void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); }
+ AI static SkNx Load(const void* ptr) { return vld1q_f32((const float*)ptr); }
+ AI void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); }
- static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
+ AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
float32x4x4_t rgba = vld4q_f32((const float*) ptr);
*r = rgba.val[0];
*g = rgba.val[1];
*b = rgba.val[2];
*a = rgba.val[3];
}
- static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
+ AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
float32x4x4_t rgba = {{
r.fVec,
g.fVec,
@@ -136,16 +136,16 @@ public:
vst4q_f32((float*) dst, rgba);
}
- SkNx invert() const {
+ AI SkNx invert() const {
float32x4_t est0 = vrecpeq_f32(fVec),
est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0);
return est1;
}
- SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); }
- SkNx operator / (const SkNx& o) const {
+ AI SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return vmulq_f32(fVec, o.fVec); }
+ AI SkNx operator / (const SkNx& o) const {
#if defined(SK_CPU_ARM64)
return vdivq_f32(fVec, o.fVec);
#else
@@ -156,20 +156,20 @@ public:
#endif
}
- SkNx operator==(const SkNx& o) const { return vreinterpretq_f32_u32(vceqq_f32(fVec, o.fVec)); }
- SkNx operator <(const SkNx& o) const { return vreinterpretq_f32_u32(vcltq_f32(fVec, o.fVec)); }
- SkNx operator >(const SkNx& o) const { return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec)); }
- SkNx operator<=(const SkNx& o) const { return vreinterpretq_f32_u32(vcleq_f32(fVec, o.fVec)); }
- SkNx operator>=(const SkNx& o) const { return vreinterpretq_f32_u32(vcgeq_f32(fVec, o.fVec)); }
- SkNx operator!=(const SkNx& o) const {
+ AI SkNx operator==(const SkNx& o) const {return vreinterpretq_f32_u32(vceqq_f32(fVec, o.fVec));}
+ AI SkNx operator <(const SkNx& o) const {return vreinterpretq_f32_u32(vcltq_f32(fVec, o.fVec));}
+ AI SkNx operator >(const SkNx& o) const {return vreinterpretq_f32_u32(vcgtq_f32(fVec, o.fVec));}
+ AI SkNx operator<=(const SkNx& o) const {return vreinterpretq_f32_u32(vcleq_f32(fVec, o.fVec));}
+ AI SkNx operator>=(const SkNx& o) const {return vreinterpretq_f32_u32(vcgeq_f32(fVec, o.fVec));}
+ AI SkNx operator!=(const SkNx& o) const {
return vreinterpretq_f32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec)));
}
- static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.fVec); }
- static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.fVec); }
+ AI static SkNx Min(const SkNx& l, const SkNx& r) { return vminq_f32(l.fVec, r.fVec); }
+ AI static SkNx Max(const SkNx& l, const SkNx& r) { return vmaxq_f32(l.fVec, r.fVec); }
- SkNx abs() const { return vabsq_f32(fVec); }
- SkNx floor() const {
+ AI SkNx abs() const { return vabsq_f32(fVec); }
+ AI SkNx floor() const {
#if defined(SK_CPU_ARM64)
return vrndmq_f32(fVec);
#else
@@ -178,12 +178,12 @@ public:
}
- SkNx rsqrt() const {
+ AI SkNx rsqrt() const {
float32x4_t est0 = vrsqrteq_f32(fVec);
return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0);
}
- SkNx sqrt() const {
+ AI SkNx sqrt() const {
#if defined(SK_CPU_ARM64)
return vsqrtq_f32(fVec);
#else
@@ -194,24 +194,24 @@ public:
#endif
}
- float operator[](int k) const {
+ AI float operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { float32x4_t v; float fs[4]; } pun = {fVec};
return pun.fs[k&3];
}
- bool allTrue() const {
+ AI bool allTrue() const {
auto v = vreinterpretq_u32_f32(fVec);
return vgetq_lane_u32(v,0) && vgetq_lane_u32(v,1)
&& vgetq_lane_u32(v,2) && vgetq_lane_u32(v,3);
}
- bool anyTrue() const {
+ AI bool anyTrue() const {
auto v = vreinterpretq_u32_f32(fVec);
return vgetq_lane_u32(v,0) || vgetq_lane_u32(v,1)
|| vgetq_lane_u32(v,2) || vgetq_lane_u32(v,3);
}
- SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
return vbslq_f32(vreinterpretq_u32_f32(fVec), t.fVec, e.fVec);
}
@@ -224,18 +224,18 @@ public:
template <>
class SkNx<4, uint16_t> {
public:
- SkNx(const uint16x4_t& vec) : fVec(vec) {}
+ AI SkNx(const uint16x4_t& vec) : fVec(vec) {}
- SkNx() {}
- SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {}
- SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
+ AI SkNx() {}
+ AI SkNx(uint16_t val) : fVec(vdup_n_u16(val)) {}
+ AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
fVec = (uint16x4_t) { a,b,c,d };
}
- static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); }
- void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); }
+ AI static SkNx Load(const void* ptr) { return vld1_u16((const uint16_t*)ptr); }
+ AI void store(void* ptr) const { vst1_u16((uint16_t*)ptr, fVec); }
- static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
+ AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
uint16x4x4_t rgba = vld4_u16((const uint16_t*)ptr);
*r = rgba.val[0];
*g = rgba.val[1];
@@ -243,7 +243,7 @@ public:
*a = rgba.val[3];
}
- static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
+ AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
uint16x4x4_t rgba = {{
r.fVec,
g.fVec,
@@ -253,22 +253,22 @@ public:
vst4_u16((uint16_t*) dst, rgba);
}
- SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); }
+ AI SkNx operator + (const SkNx& o) const { return vadd_u16(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return vsub_u16(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return vmul_u16(fVec, o.fVec); }
- SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
- SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
+ AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
+ AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
- static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); }
+ AI static SkNx Min(const SkNx& a, const SkNx& b) { return vmin_u16(a.fVec, b.fVec); }
- uint16_t operator[](int k) const {
+ AI uint16_t operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { uint16x4_t v; uint16_t us[4]; } pun = {fVec};
return pun.us[k&3];
}
- SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
return vbsl_u16(fVec, t.fVec, e.fVec);
}
@@ -278,35 +278,35 @@ public:
template <>
class SkNx<8, uint16_t> {
public:
- SkNx(const uint16x8_t& vec) : fVec(vec) {}
+ AI SkNx(const uint16x8_t& vec) : fVec(vec) {}
- SkNx() {}
- SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {}
- static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr); }
+ AI SkNx() {}
+ AI SkNx(uint16_t val) : fVec(vdupq_n_u16(val)) {}
+ AI static SkNx Load(const void* ptr) { return vld1q_u16((const uint16_t*)ptr); }
- SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
- uint16_t e, uint16_t f, uint16_t g, uint16_t h) {
+ AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
+ uint16_t e, uint16_t f, uint16_t g, uint16_t h) {
fVec = (uint16x8_t) { a,b,c,d, e,f,g,h };
}
- void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); }
+ AI void store(void* ptr) const { vst1q_u16((uint16_t*)ptr, fVec); }
- SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); }
+ AI SkNx operator + (const SkNx& o) const { return vaddq_u16(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return vsubq_u16(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return vmulq_u16(fVec, o.fVec); }
- SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
- SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
+ AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
+ AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
- static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.fVec); }
+ AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u16(a.fVec, b.fVec); }
- uint16_t operator[](int k) const {
+ AI uint16_t operator[](int k) const {
SkASSERT(0 <= k && k < 8);
union { uint16x8_t v; uint16_t us[8]; } pun = {fVec};
return pun.us[k&7];
}
- SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
return vbslq_u16(fVec, t.fVec, e.fVec);
}
@@ -318,19 +318,19 @@ class SkNx<4, uint8_t> {
public:
typedef uint32_t __attribute__((aligned(1))) unaligned_uint32_t;
- SkNx(const uint8x8_t& vec) : fVec(vec) {}
+ AI SkNx(const uint8x8_t& vec) : fVec(vec) {}
- SkNx() {}
- SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) {
+ AI SkNx() {}
+ AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d) {
fVec = (uint8x8_t){a,b,c,d, 0,0,0,0};
}
- static SkNx Load(const void* ptr) {
+ AI static SkNx Load(const void* ptr) {
return (uint8x8_t)vld1_dup_u32((const unaligned_uint32_t*)ptr);
}
- void store(void* ptr) const {
+ AI void store(void* ptr) const {
return vst1_lane_u32((unaligned_uint32_t*)ptr, (uint32x2_t)fVec, 0);
}
- uint8_t operator[](int k) const {
+ AI uint8_t operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { uint8x8_t v; uint8_t us[8]; } pun = {fVec};
return pun.us[k&3];
@@ -344,36 +344,35 @@ public:
template <>
class SkNx<16, uint8_t> {
public:
- SkNx(const uint8x16_t& vec) : fVec(vec) {}
-
- SkNx() {}
- SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {}
- static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); }
-
- SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
- uint8_t e, uint8_t f, uint8_t g, uint8_t h,
- uint8_t i, uint8_t j, uint8_t k, uint8_t l,
- uint8_t m, uint8_t n, uint8_t o, uint8_t p) {
+ AI SkNx(const uint8x16_t& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint8_t val) : fVec(vdupq_n_u8(val)) {}
+ AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
+ uint8_t e, uint8_t f, uint8_t g, uint8_t h,
+ uint8_t i, uint8_t j, uint8_t k, uint8_t l,
+ uint8_t m, uint8_t n, uint8_t o, uint8_t p) {
fVec = (uint8x16_t) { a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p };
}
- void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); }
+ AI static SkNx Load(const void* ptr) { return vld1q_u8((const uint8_t*)ptr); }
+ AI void store(void* ptr) const { vst1q_u8((uint8_t*)ptr, fVec); }
- SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); }
+ AI SkNx saturatedAdd(const SkNx& o) const { return vqaddq_u8(fVec, o.fVec); }
- SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); }
+ AI SkNx operator + (const SkNx& o) const { return vaddq_u8(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return vsubq_u8(fVec, o.fVec); }
- static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fVec); }
- SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); }
+ AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u8(a.fVec, b.fVec); }
+ AI SkNx operator < (const SkNx& o) const { return vcltq_u8(fVec, o.fVec); }
- uint8_t operator[](int k) const {
+ AI uint8_t operator[](int k) const {
SkASSERT(0 <= k && k < 16);
union { uint8x16_t v; uint8_t us[16]; } pun = {fVec};
return pun.us[k&15];
}
- SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
return vbslq_u8(fVec, t.fVec, e.fVec);
}
@@ -383,52 +382,52 @@ public:
template <>
class SkNx<4, int32_t> {
public:
- SkNx(const int32x4_t& vec) : fVec(vec) {}
+ AI SkNx(const int32x4_t& vec) : fVec(vec) {}
- SkNx() {}
- SkNx(int32_t v) {
+ AI SkNx() {}
+ AI SkNx(int32_t v) {
fVec = vdupq_n_s32(v);
}
- SkNx(int32_t a, int32_t b, int32_t c, int32_t d) {
+ AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) {
fVec = (int32x4_t){a,b,c,d};
}
- static SkNx Load(const void* ptr) {
+ AI static SkNx Load(const void* ptr) {
return vld1q_s32((const int32_t*)ptr);
}
- void store(void* ptr) const {
+ AI void store(void* ptr) const {
return vst1q_s32((int32_t*)ptr, fVec);
}
- int32_t operator[](int k) const {
+ AI int32_t operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { int32x4_t v; int32_t is[4]; } pun = {fVec};
return pun.is[k&3];
}
- SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); }
+ AI SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); }
- SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); }
- SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); }
- SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); }
+ AI SkNx operator & (const SkNx& o) const { return vandq_s32(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return vorrq_s32(fVec, o.fVec); }
+ AI SkNx operator ^ (const SkNx& o) const { return veorq_s32(fVec, o.fVec); }
- SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
- SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
+ AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
+ AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
- SkNx operator == (const SkNx& o) const {
+ AI SkNx operator == (const SkNx& o) const {
return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec));
}
- SkNx operator < (const SkNx& o) const {
+ AI SkNx operator < (const SkNx& o) const {
return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec));
}
- SkNx operator > (const SkNx& o) const {
+ AI SkNx operator > (const SkNx& o) const {
return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec));
}
- static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); }
+ AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); }
// TODO as needed
- SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
return vbslq_s32(vreinterpretq_u32_s32(fVec), t.fVec, e.fVec);
}
@@ -438,84 +437,84 @@ public:
template <>
class SkNx<4, uint32_t> {
public:
- SkNx(const uint32x4_t& vec) : fVec(vec) {}
+ AI SkNx(const uint32x4_t& vec) : fVec(vec) {}
- SkNx() {}
- SkNx(uint32_t v) {
+ AI SkNx() {}
+ AI SkNx(uint32_t v) {
fVec = vdupq_n_u32(v);
}
- SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
+ AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
fVec = (uint32x4_t){a,b,c,d};
}
- static SkNx Load(const void* ptr) {
+ AI static SkNx Load(const void* ptr) {
return vld1q_u32((const uint32_t*)ptr);
}
- void store(void* ptr) const {
+ AI void store(void* ptr) const {
return vst1q_u32((uint32_t*)ptr, fVec);
}
- uint32_t operator[](int k) const {
+ AI uint32_t operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { uint32x4_t v; uint32_t us[4]; } pun = {fVec};
return pun.us[k&3];
}
- SkNx operator + (const SkNx& o) const { return vaddq_u32(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return vsubq_u32(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const { return vmulq_u32(fVec, o.fVec); }
+ AI SkNx operator + (const SkNx& o) const { return vaddq_u32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return vsubq_u32(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return vmulq_u32(fVec, o.fVec); }
- SkNx operator & (const SkNx& o) const { return vandq_u32(fVec, o.fVec); }
- SkNx operator | (const SkNx& o) const { return vorrq_u32(fVec, o.fVec); }
- SkNx operator ^ (const SkNx& o) const { return veorq_u32(fVec, o.fVec); }
+ AI SkNx operator & (const SkNx& o) const { return vandq_u32(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return vorrq_u32(fVec, o.fVec); }
+ AI SkNx operator ^ (const SkNx& o) const { return veorq_u32(fVec, o.fVec); }
- SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
- SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
+ AI SkNx operator << (int bits) const { return fVec << SkNx(bits).fVec; }
+ AI SkNx operator >> (int bits) const { return fVec >> SkNx(bits).fVec; }
- SkNx operator == (const SkNx& o) const { return vceqq_u32(fVec, o.fVec); }
- SkNx operator < (const SkNx& o) const { return vcltq_u32(fVec, o.fVec); }
- SkNx operator > (const SkNx& o) const { return vcgtq_u32(fVec, o.fVec); }
+ AI SkNx operator == (const SkNx& o) const { return vceqq_u32(fVec, o.fVec); }
+ AI SkNx operator < (const SkNx& o) const { return vcltq_u32(fVec, o.fVec); }
+ AI SkNx operator > (const SkNx& o) const { return vcgtq_u32(fVec, o.fVec); }
- static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.fVec); }
+ AI static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_u32(a.fVec, b.fVec); }
// TODO as needed
- SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
return vbslq_u32(fVec, t.fVec, e.fVec);
}
uint32x4_t fVec;
};
-template<> inline Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) {
+template<> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) {
return vcvtq_s32_f32(src.fVec);
}
-template<> inline Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
+template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
return vcvtq_f32_s32(src.fVec);
}
-template<> inline Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
+template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
return SkNx_cast<float>(Sk4i::Load(&src));
}
-template<> inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
+template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
return vqmovn_u32(vcvtq_u32_f32(src.fVec));
}
-template<> inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
+template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
return vcvtq_f32_u32(vmovl_u16(src.fVec));
}
-template<> inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
+template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
uint32x4_t _32 = vcvtq_u32_f32(src.fVec);
uint16x4_t _16 = vqmovn_u32(_32);
return vqmovn_u16(vcombine_u16(_16, _16));
}
-template<> inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
+template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
uint16x8_t _16 = vmovl_u8 (src.fVec) ;
uint32x4_t _32 = vmovl_u16(vget_low_u16(_16));
return vcvtq_f32_u32(_32);
}
-template<> inline Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
+template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
Sk8f ab, cd;
SkNx_split(src, &ab, &cd);
@@ -528,32 +527,32 @@ template<> inline Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
(uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0];
}
-template<> inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
+template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
return vget_low_u16(vmovl_u8(src.fVec));
}
-template<> inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
+template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
return vmovn_u16(vcombine_u16(src.fVec, src.fVec));
}
-template<> inline Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
+template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
uint16x4_t _16 = vqmovun_s32(src.fVec);
return vqmovn_u16(vcombine_u16(_16, _16));
}
-template<> inline Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
+template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
return vreinterpretq_s32_u32(vmovl_u16(src.fVec));
}
-template<> inline Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) {
+template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) {
return vmovn_u32(vreinterpretq_u32_s32(src.fVec));
}
-template<> /*static*/ inline Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) {
+template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) {
return vreinterpretq_s32_u32(src.fVec);
}
-static inline Sk4i Sk4f_round(const Sk4f& x) {
+AI static Sk4i Sk4f_round(const Sk4f& x) {
return vcvtq_s32_f32((x + 0.5f).fVec);
}
diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h
index b79fe9a884..a4594115e0 100644
--- a/src/opts/SkNx_sse.h
+++ b/src/opts/SkNx_sse.h
@@ -25,44 +25,44 @@ namespace {
template <>
class SkNx<2, float> {
public:
- SkNx(const __m128& vec) : fVec(vec) {}
+ AI SkNx(const __m128& vec) : fVec(vec) {}
- SkNx() {}
- SkNx(float val) : fVec(_mm_set1_ps(val)) {}
- static SkNx Load(const void* ptr) {
+ AI SkNx() {}
+ AI SkNx(float val) : fVec(_mm_set1_ps(val)) {}
+ AI static SkNx Load(const void* ptr) {
return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr));
}
- SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {}
+ AI SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {}
- void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); }
+ AI void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); }
- SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
- SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
+ AI SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
- SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
- SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
- SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
- SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
- SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
- SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
+ AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
+ AI SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
+ AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
+ AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
+ AI SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
+ AI SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
- static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
- static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
+ AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
+ AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
- SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
- SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
- SkNx invert() const { return _mm_rcp_ps(fVec); }
+ AI SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
+ AI SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
+ AI SkNx invert() const { return _mm_rcp_ps(fVec); }
- float operator[](int k) const {
+ AI float operator[](int k) const {
SkASSERT(0 <= k && k < 2);
union { __m128 v; float fs[4]; } pun = {fVec};
return pun.fs[k&1];
}
- bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
- bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
+ AI bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
+ AI bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
__m128 fVec;
};
@@ -70,19 +70,19 @@ public:
template <>
class SkNx<4, float> {
public:
- SkNx(const __m128& vec) : fVec(vec) {}
+ AI SkNx(const __m128& vec) : fVec(vec) {}
- SkNx() {}
- SkNx(float val) : fVec( _mm_set1_ps(val) ) {}
- SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
+ AI SkNx() {}
+ AI SkNx(float val) : fVec( _mm_set1_ps(val) ) {}
+ AI SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
- SkNx(const SkNx_abi<4,float>& a) : fVec(a.vec) {}
- operator SkNx_abi<4,float>() const { return { fVec }; }
+ AI SkNx(const SkNx_abi<4,float>& a) : fVec(a.vec) {}
+ AI operator SkNx_abi<4,float>() const { return { fVec }; }
- static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr); }
- void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); }
+ AI static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr); }
+ AI void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); }
- static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
+ AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
__m128 v0 = _mm_loadu_ps(((float*)ptr) + 0),
v1 = _mm_loadu_ps(((float*)ptr) + 4),
v2 = _mm_loadu_ps(((float*)ptr) + 8),
@@ -93,7 +93,7 @@ public:
*b = v2;
*a = v3;
}
- static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
+ AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
__m128 v0 = r.fVec,
v1 = g.fVec,
v2 = b.fVec,
@@ -105,23 +105,23 @@ public:
_mm_storeu_ps(((float*) dst) + 12, v3);
}
- SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
- SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
+ AI SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
- SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
- SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
- SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
- SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
- SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
- SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
+ AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
+ AI SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
+ AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
+ AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
+ AI SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
+ AI SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
- static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
- static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
+ AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
+ AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
- SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
- SkNx floor() const {
+ AI SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
+ AI SkNx floor() const {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
return _mm_floor_ps(fVec);
#else
@@ -136,20 +136,20 @@ public:
#endif
}
- SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
- SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
- SkNx invert() const { return _mm_rcp_ps(fVec); }
+ AI SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
+ AI SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
+ AI SkNx invert() const { return _mm_rcp_ps(fVec); }
- float operator[](int k) const {
+ AI float operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { __m128 v; float fs[4]; } pun = {fVec};
return pun.fs[k&3];
}
- bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(fVec)); }
- bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(fVec)); }
+ AI bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(fVec)); }
+ AI bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(fVec)); }
- SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
return _mm_blendv_ps(e.fVec, t.fVec, fVec);
#else
@@ -164,42 +164,42 @@ public:
template <>
class SkNx<4, int32_t> {
public:
- SkNx(const __m128i& vec) : fVec(vec) {}
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
- SkNx() {}
- SkNx(int32_t val) : fVec(_mm_set1_epi32(val)) {}
- static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
- SkNx(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
+ AI SkNx() {}
+ AI SkNx(int32_t val) : fVec(_mm_set1_epi32(val)) {}
+ AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
+ AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
- void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
+ AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
- SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const {
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const {
__m128i mul20 = _mm_mul_epu32(fVec, o.fVec),
mul31 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.fVec, 4));
return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0)),
_mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0)));
}
- SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
- SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
- SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
+ AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
+ AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
- SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
- SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); }
+ AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
+ AI SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); }
- SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
- SkNx operator < (const SkNx& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); }
- SkNx operator > (const SkNx& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); }
+ AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
+ AI SkNx operator < (const SkNx& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); }
+ AI SkNx operator > (const SkNx& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); }
- int32_t operator[](int k) const {
+ AI int32_t operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { __m128i v; int32_t is[4]; } pun = {fVec};
return pun.is[k&3];
}
- SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
#else
@@ -214,36 +214,36 @@ public:
template <>
class SkNx<4, uint32_t> {
public:
- SkNx(const __m128i& vec) : fVec(vec) {}
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
- SkNx() {}
- SkNx(uint32_t val) : fVec(_mm_set1_epi32(val)) {}
- static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
- SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
+ AI SkNx() {}
+ AI SkNx(uint32_t val) : fVec(_mm_set1_epi32(val)) {}
+ AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
+ AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
- void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
+ AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
- SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
// Not quite sure how to best do operator * in SSE2. We probably don't use it.
- SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
- SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
- SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
+ AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
+ AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
- SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
- SkNx operator >> (int bits) const { return _mm_srli_epi32(fVec, bits); }
+ AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
+ AI SkNx operator >> (int bits) const { return _mm_srli_epi32(fVec, bits); }
- SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
+ AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
// operator < and > take a little extra fiddling to make work for unsigned ints.
- uint32_t operator[](int k) const {
+ AI uint32_t operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { __m128i v; uint32_t us[4]; } pun = {fVec};
return pun.us[k&3];
}
- SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
#else
@@ -259,16 +259,17 @@ public:
template <>
class SkNx<4, uint16_t> {
public:
- SkNx(const __m128i& vec) : fVec(vec) {}
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
- SkNx() {}
- SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
- SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) : fVec(_mm_setr_epi16(a,b,c,d,0,0,0,0)) {}
+ AI SkNx() {}
+ AI SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
+ AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d)
+ : fVec(_mm_setr_epi16(a,b,c,d,0,0,0,0)) {}
- static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
- void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
+ AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
+ AI void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
- static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
+ AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
__m128i lo = _mm_loadu_si128(((__m128i*)ptr) + 0),
hi = _mm_loadu_si128(((__m128i*)ptr) + 1);
__m128i even = _mm_unpacklo_epi16(lo, hi), // r0 r2 g0 g2 b0 b2 a0 a2
@@ -280,7 +281,7 @@ public:
*b = ba;
*a = _mm_srli_si128(ba, 8);
}
- static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
+ AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
__m128i rg = _mm_unpacklo_epi16(r.fVec, g.fVec);
__m128i ba = _mm_unpacklo_epi16(b.fVec, a.fVec);
__m128i lo = _mm_unpacklo_epi32(rg, ba);
@@ -289,14 +290,14 @@ public:
_mm_storeu_si128(((__m128i*) dst) + 1, hi);
}
- SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
- SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
- SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
+ AI SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
+ AI SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
- uint16_t operator[](int k) const {
+ AI uint16_t operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { __m128i v; uint16_t us[8]; } pun = {fVec};
return pun.us[k&3];
@@ -308,17 +309,18 @@ public:
template <>
class SkNx<8, uint16_t> {
public:
- SkNx(const __m128i& vec) : fVec(vec) {}
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
- SkNx() {}
- SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
- SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
- uint16_t e, uint16_t f, uint16_t g, uint16_t h) : fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {}
+ AI SkNx() {}
+ AI SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
+ AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
+ uint16_t e, uint16_t f, uint16_t g, uint16_t h)
+ : fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {}
- static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
- void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
+ AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
+ AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
- static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
+ AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
// TODO: AVX2 version
__m128i _01 = _mm_loadu_si128(((__m128i*)ptr) + 0),
_23 = _mm_loadu_si128(((__m128i*)ptr) + 1),
@@ -340,7 +342,7 @@ public:
*b = _mm_unpacklo_epi64(ba0123, ba4567);
*a = _mm_unpackhi_epi64(ba0123, ba4567);
}
- static void Store4(void* ptr, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
+ AI static void Store4(void* ptr, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
// TODO: AVX2 version
__m128i rg0123 = _mm_unpacklo_epi16(r.fVec, g.fVec), // r0 g0 r1 g1 r2 g2 r3 g3
rg4567 = _mm_unpackhi_epi16(r.fVec, g.fVec), // r4 g4 r5 g5 r6 g6 r7 g7
@@ -353,14 +355,14 @@ public:
_mm_storeu_si128((__m128i*)ptr + 3, _mm_unpackhi_epi32(rg4567, ba4567));
}
- SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
- SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
- SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
- SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
+ AI SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
+ AI SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
- static SkNx Min(const SkNx& a, const SkNx& b) {
+ AI static SkNx Min(const SkNx& a, const SkNx& b) {
// No unsigned _mm_min_epu16, so we'll shift into a space where we can use the
// signed version, _mm_min_epi16, then shift back.
const uint16_t top = 0x8000; // Keep this separate from _mm_set1_epi16 or MSVC will whine.
@@ -369,12 +371,12 @@ public:
_mm_sub_epi8(b.fVec, top_8x)));
}
- SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
_mm_andnot_si128(fVec, e.fVec));
}
- uint16_t operator[](int k) const {
+ AI uint16_t operator[](int k) const {
SkASSERT(0 <= k && k < 8);
union { __m128i v; uint16_t us[8]; } pun = {fVec};
return pun.us[k&7];
@@ -386,16 +388,16 @@ public:
template <>
class SkNx<4, uint8_t> {
public:
- SkNx() {}
- SkNx(const __m128i& vec) : fVec(vec) {}
- SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
+ AI SkNx() {}
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
+ AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
: fVec(_mm_setr_epi8(a,b,c,d, 0,0,0,0, 0,0,0,0, 0,0,0,0)) {}
- static SkNx Load(const void* ptr) { return _mm_cvtsi32_si128(*(const int*)ptr); }
- void store(void* ptr) const { *(int*)ptr = _mm_cvtsi128_si32(fVec); }
+ AI static SkNx Load(const void* ptr) { return _mm_cvtsi32_si128(*(const int*)ptr); }
+ AI void store(void* ptr) const { *(int*)ptr = _mm_cvtsi128_si32(fVec); }
- uint8_t operator[](int k) const {
+ AI uint8_t operator[](int k) const {
SkASSERT(0 <= k && k < 4);
union { __m128i v; uint8_t us[16]; } pun = {fVec};
return pun.us[k&3];
@@ -409,38 +411,38 @@ public:
template <>
class SkNx<16, uint8_t> {
public:
- SkNx(const __m128i& vec) : fVec(vec) {}
-
- SkNx() {}
- SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {}
- static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
- SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
- uint8_t e, uint8_t f, uint8_t g, uint8_t h,
- uint8_t i, uint8_t j, uint8_t k, uint8_t l,
- uint8_t m, uint8_t n, uint8_t o, uint8_t p)
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {}
+ AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
+ AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
+ uint8_t e, uint8_t f, uint8_t g, uint8_t h,
+ uint8_t i, uint8_t j, uint8_t k, uint8_t l,
+ uint8_t m, uint8_t n, uint8_t o, uint8_t p)
: fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p)) {}
- void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
+ AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
- SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); }
+ AI SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); }
- SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); }
- SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); }
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); }
- static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); }
- SkNx operator < (const SkNx& o) const {
+ AI static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); }
+ AI SkNx operator < (const SkNx& o) const {
// There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare.
auto flip = _mm_set1_epi8(char(0x80));
return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec));
}
- uint8_t operator[](int k) const {
+ AI uint8_t operator[](int k) const {
SkASSERT(0 <= k && k < 16);
union { __m128i v; uint8_t us[16]; } pun = {fVec};
return pun.us[k&15];
}
- SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
_mm_andnot_si128(fVec, e.fVec));
}
@@ -450,31 +452,22 @@ public:
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
-// There are two different SkNx<8, uint8_t>, SkNx<8, int32_t>, SkNx<8, uint32_t>, SkNx<8, float>:
-// - the default paired SkNx<4, ...> versions used without AVX2
-// - the native AVX2 versions.
-// It is important that we don't call methods for one from the other.
-// Usually these methods inline, but they don't always in Debug builds.
-// For now, try to fix this by marking all the AVX2 versions as always-inline.
-// We may want or need to extend this strategy to all SkNx methods.
-#define I SK_ALWAYS_INLINE
-
template <>
class SkNx<8, uint8_t> {
public:
- I SkNx(const __m128i& vec) : fVec(vec) {}
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
- I SkNx() {}
- I SkNx(uint8_t v) : fVec(_mm_set1_epi8(v)) {}
- I SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
- uint8_t e, uint8_t f, uint8_t g, uint8_t h)
+ AI SkNx() {}
+ AI SkNx(uint8_t v) : fVec(_mm_set1_epi8(v)) {}
+ AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
+ uint8_t e, uint8_t f, uint8_t g, uint8_t h)
: fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, 0,0,0,0, 0,0,0,0)) {}
- I static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
- I void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
+ AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
+ AI void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
- I uint8_t operator[](int k) const {
+ AI uint8_t operator[](int k) const {
SkASSERT(0 <= k && k < 8);
union { __m128i v; uint8_t us[16]; } pun = {fVec};
return pun.us[k&7];
@@ -486,29 +479,29 @@ public:
template <>
class SkNx<8, int32_t> {
public:
- I SkNx(const __m256i& vec) : fVec(vec) {}
+ AI SkNx(const __m256i& vec) : fVec(vec) {}
- I SkNx() {}
- I SkNx(int32_t v) : fVec(_mm256_set1_epi32(v)) {}
- I SkNx(int32_t a, int32_t b, int32_t c, int32_t d,
- int32_t e, int32_t f, int32_t g, int32_t h)
+ AI SkNx() {}
+ AI SkNx(int32_t v) : fVec(_mm256_set1_epi32(v)) {}
+ AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d,
+ int32_t e, int32_t f, int32_t g, int32_t h)
: fVec(_mm256_setr_epi32(a,b,c,d, e,f,g,h)) {}
- I static SkNx Load(const void* ptr) { return _mm256_loadu_si256((const __m256i*)ptr); }
- I void store(void* ptr) const { _mm256_storeu_si256((__m256i*)ptr, fVec); }
+ AI static SkNx Load(const void* ptr) { return _mm256_loadu_si256((const __m256i*)ptr); }
+ AI void store(void* ptr) const { _mm256_storeu_si256((__m256i*)ptr, fVec); }
- I SkNx operator + (const SkNx& o) const { return _mm256_add_epi32(fVec, o.fVec); }
- I SkNx operator - (const SkNx& o) const { return _mm256_sub_epi32(fVec, o.fVec); }
- I SkNx operator * (const SkNx& o) const { return _mm256_mullo_epi32(fVec, o.fVec); }
+ AI SkNx operator + (const SkNx& o) const { return _mm256_add_epi32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm256_sub_epi32(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return _mm256_mullo_epi32(fVec, o.fVec); }
- I SkNx operator & (const SkNx& o) const { return _mm256_and_si256(fVec, o.fVec); }
- I SkNx operator | (const SkNx& o) const { return _mm256_or_si256(fVec, o.fVec); }
- I SkNx operator ^ (const SkNx& o) const { return _mm256_xor_si256(fVec, o.fVec); }
+ AI SkNx operator & (const SkNx& o) const { return _mm256_and_si256(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return _mm256_or_si256(fVec, o.fVec); }
+ AI SkNx operator ^ (const SkNx& o) const { return _mm256_xor_si256(fVec, o.fVec); }
- I SkNx operator << (int bits) const { return _mm256_slli_epi32(fVec, bits); }
- I SkNx operator >> (int bits) const { return _mm256_srai_epi32(fVec, bits); }
+ AI SkNx operator << (int bits) const { return _mm256_slli_epi32(fVec, bits); }
+ AI SkNx operator >> (int bits) const { return _mm256_srai_epi32(fVec, bits); }
- I int32_t operator[](int k) const {
+ AI int32_t operator[](int k) const {
SkASSERT(0 <= k && k < 8);
union { __m256i v; int32_t is[8]; } pun = {fVec};
return pun.is[k&7];
@@ -520,29 +513,29 @@ public:
template <>
class SkNx<8, uint32_t> {
public:
- I SkNx(const __m256i& vec) : fVec(vec) {}
+ AI SkNx(const __m256i& vec) : fVec(vec) {}
- I SkNx() {}
- I SkNx(uint32_t v) : fVec(_mm256_set1_epi32(v)) {}
- I SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d,
- uint32_t e, uint32_t f, uint32_t g, uint32_t h)
+ AI SkNx() {}
+ AI SkNx(uint32_t v) : fVec(_mm256_set1_epi32(v)) {}
+ AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d,
+ uint32_t e, uint32_t f, uint32_t g, uint32_t h)
: fVec(_mm256_setr_epi32(a,b,c,d, e,f,g,h)) {}
- I static SkNx Load(const void* ptr) { return _mm256_loadu_si256((const __m256i*)ptr); }
- I void store(void* ptr) const { _mm256_storeu_si256((__m256i*)ptr, fVec); }
+ AI static SkNx Load(const void* ptr) { return _mm256_loadu_si256((const __m256i*)ptr); }
+ AI void store(void* ptr) const { _mm256_storeu_si256((__m256i*)ptr, fVec); }
- I SkNx operator + (const SkNx& o) const { return _mm256_add_epi32(fVec, o.fVec); }
- I SkNx operator - (const SkNx& o) const { return _mm256_sub_epi32(fVec, o.fVec); }
- I SkNx operator * (const SkNx& o) const { return _mm256_mullo_epi32(fVec, o.fVec); }
+ AI SkNx operator + (const SkNx& o) const { return _mm256_add_epi32(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm256_sub_epi32(fVec, o.fVec); }
+ AI SkNx operator * (const SkNx& o) const { return _mm256_mullo_epi32(fVec, o.fVec); }
- I SkNx operator & (const SkNx& o) const { return _mm256_and_si256(fVec, o.fVec); }
- I SkNx operator | (const SkNx& o) const { return _mm256_or_si256(fVec, o.fVec); }
- I SkNx operator ^ (const SkNx& o) const { return _mm256_xor_si256(fVec, o.fVec); }
+ AI SkNx operator & (const SkNx& o) const { return _mm256_and_si256(fVec, o.fVec); }
+ AI SkNx operator | (const SkNx& o) const { return _mm256_or_si256(fVec, o.fVec); }
+ AI SkNx operator ^ (const SkNx& o) const { return _mm256_xor_si256(fVec, o.fVec); }
- I SkNx operator << (int bits) const { return _mm256_slli_epi32(fVec, bits); }
- I SkNx operator >> (int bits) const { return _mm256_srli_epi32(fVec, bits); }
+ AI SkNx operator << (int bits) const { return _mm256_slli_epi32(fVec, bits); }
+ AI SkNx operator >> (int bits) const { return _mm256_srli_epi32(fVec, bits); }
- I uint32_t operator[](int k) const {
+ AI uint32_t operator[](int k) const {
SkASSERT(0 <= k && k < 8);
union { __m256i v; uint32_t us[8]; } pun = {fVec};
return pun.us[k&7];
@@ -554,101 +547,99 @@ public:
template <>
class SkNx<8, float> {
public:
- I SkNx(const __m256& vec) : fVec(vec) {}
+ AI SkNx(const __m256& vec) : fVec(vec) {}
- I SkNx() {}
- I SkNx(float val) : fVec(_mm256_set1_ps(val)) {}
- I SkNx(float a, float b, float c, float d,
- float e, float f, float g, float h) : fVec(_mm256_setr_ps(a,b,c,d,e,f,g,h)) {}
+ AI SkNx() {}
+ AI SkNx(float val) : fVec(_mm256_set1_ps(val)) {}
+ AI SkNx(float a, float b, float c, float d,
+ float e, float f, float g, float h) : fVec(_mm256_setr_ps(a,b,c,d,e,f,g,h)) {}
- SkNx(const SkNx_abi<8,float>& a) : fVec(a.vec) {}
- operator SkNx_abi<8,float>() const { return { fVec }; }
+ AI SkNx(const SkNx_abi<8,float>& a) : fVec(a.vec) {}
+ AI operator SkNx_abi<8,float>() const { return { fVec }; }
- I static SkNx Load(const void* ptr) { return _mm256_loadu_ps((const float*)ptr); }
- I void store(void* ptr) const { _mm256_storeu_ps((float*)ptr, fVec); }
+ AI static SkNx Load(const void* ptr) { return _mm256_loadu_ps((const float*)ptr); }
+ AI void store(void* ptr) const { _mm256_storeu_ps((float*)ptr, fVec); }
- I SkNx operator+(const SkNx& o) const { return _mm256_add_ps(fVec, o.fVec); }
- I SkNx operator-(const SkNx& o) const { return _mm256_sub_ps(fVec, o.fVec); }
- I SkNx operator*(const SkNx& o) const { return _mm256_mul_ps(fVec, o.fVec); }
- I SkNx operator/(const SkNx& o) const { return _mm256_div_ps(fVec, o.fVec); }
+ AI SkNx operator+(const SkNx& o) const { return _mm256_add_ps(fVec, o.fVec); }
+ AI SkNx operator-(const SkNx& o) const { return _mm256_sub_ps(fVec, o.fVec); }
+ AI SkNx operator*(const SkNx& o) const { return _mm256_mul_ps(fVec, o.fVec); }
+ AI SkNx operator/(const SkNx& o) const { return _mm256_div_ps(fVec, o.fVec); }
- I SkNx operator==(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_EQ_OQ); }
- I SkNx operator!=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_NEQ_OQ); }
- I SkNx operator <(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_LT_OQ); }
- I SkNx operator >(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_GT_OQ); }
- I SkNx operator<=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_LE_OQ); }
- I SkNx operator>=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_GE_OQ); }
+ AI SkNx operator==(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_EQ_OQ); }
+ AI SkNx operator!=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_NEQ_OQ); }
+ AI SkNx operator <(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_LT_OQ); }
+ AI SkNx operator >(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_GT_OQ); }
+ AI SkNx operator<=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_LE_OQ); }
+ AI SkNx operator>=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_GE_OQ); }
- I static SkNx Min(const SkNx& l, const SkNx& r) { return _mm256_min_ps(l.fVec, r.fVec); }
- I static SkNx Max(const SkNx& l, const SkNx& r) { return _mm256_max_ps(l.fVec, r.fVec); }
+ AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm256_min_ps(l.fVec, r.fVec); }
+ AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm256_max_ps(l.fVec, r.fVec); }
- I SkNx sqrt() const { return _mm256_sqrt_ps (fVec); }
- I SkNx rsqrt() const { return _mm256_rsqrt_ps(fVec); }
- I SkNx invert() const { return _mm256_rcp_ps (fVec); }
+ AI SkNx sqrt() const { return _mm256_sqrt_ps (fVec); }
+ AI SkNx rsqrt() const { return _mm256_rsqrt_ps(fVec); }
+ AI SkNx invert() const { return _mm256_rcp_ps (fVec); }
- I float operator[](int k) const {
+ AI float operator[](int k) const {
SkASSERT(0 <= k && k < 8);
union { __m256 v; float fs[8]; } pun = {fVec};
return pun.fs[k&7];
}
- I SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
return _mm256_blendv_ps(e.fVec, t.fVec, fVec);
}
__m256 fVec;
};
- static I void SkNx_split(const Sk8f& v, Sk4f* lo, Sk4f* hi) {
+ AI static void SkNx_split(const Sk8f& v, Sk4f* lo, Sk4f* hi) {
*lo = _mm256_extractf128_ps(v.fVec, 0);
*hi = _mm256_extractf128_ps(v.fVec, 1);
}
- static I Sk8f SkNx_join(const Sk4f& lo, const Sk4f& hi) {
+ AI static Sk8f SkNx_join(const Sk4f& lo, const Sk4f& hi) {
return _mm256_insertf128_ps(_mm256_castps128_ps256(lo.fVec), hi.fVec, 1);
}
- static I Sk8f SkNx_fma(const Sk8f& a, const Sk8f& b, const Sk8f& c) {
+ AI static Sk8f SkNx_fma(const Sk8f& a, const Sk8f& b, const Sk8f& c) {
return _mm256_fmadd_ps(a.fVec, b.fVec, c.fVec);
}
- template<> /*static*/ I Sk8f SkNx_cast<float>(const Sk8b& src) {
+ template<> AI /*static*/ Sk8f SkNx_cast<float>(const Sk8b& src) {
return _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(src.fVec));
}
- template<> /*static*/ I Sk8f SkNx_cast<float>(const Sk8i& src) {
+ template<> AI /*static*/ Sk8f SkNx_cast<float>(const Sk8i& src) {
return _mm256_cvtepi32_ps(src.fVec);
}
- template<> /*static*/ I Sk8i SkNx_cast<int>(const Sk8f& src) {
+ template<> AI /*static*/ Sk8i SkNx_cast<int>(const Sk8f& src) {
return _mm256_cvttps_epi32(src.fVec);
}
- template<> /*static*/ I Sk8i SkNx_cast<int>(const Sk8h& src) {
+ template<> AI /*static*/ Sk8i SkNx_cast<int>(const Sk8h& src) {
return _mm256_cvtepu16_epi32(src.fVec);
}
- template<> /*static*/ I Sk8h SkNx_cast<uint16_t>(const Sk8i& src) {
+ template<> AI /*static*/ Sk8h SkNx_cast<uint16_t>(const Sk8i& src) {
__m128i lo = _mm256_extractf128_si256(src.fVec, 0),
hi = _mm256_extractf128_si256(src.fVec, 1);
return _mm_packus_epi32(lo, hi);
}
-#undef I
-
#endif
-template<> /*static*/ inline Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
+template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
return _mm_cvtepi32_ps(src.fVec);
}
-template<> /*static*/ inline Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
+template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
return SkNx_cast<float>(Sk4i::Load(&src));
}
-template <> /*static*/ inline Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) {
+template <> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) {
return _mm_cvttps_epi32(src.fVec);
}
-template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) {
+template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) {
#if 0 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
// TODO: This seems to be causing code generation problems. Investigate?
return _mm_packus_epi32(src.fVec);
@@ -663,11 +654,11 @@ template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src)
#endif
}
-template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
+template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
return SkNx_cast<uint16_t>(SkNx_cast<int32_t>(src));
}
-template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
+template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
auto _32 = _mm_cvttps_epi32(src.fVec);
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
const int _ = ~0;
@@ -678,7 +669,7 @@ template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
#endif
}
-template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
+template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
const int _ = ~0;
auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_));
@@ -689,12 +680,12 @@ template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
return _mm_cvtepi32_ps(_32);
}
-template<> /*static*/ inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
+template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
return _mm_cvtepi32_ps(_32);
}
-template<> /*static*/ inline Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
+template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
Sk8f ab, cd;
SkNx_split(src, &ab, &cd);
@@ -708,27 +699,27 @@ template<> /*static*/ inline Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
_mm_cvttps_epi32(d.fVec)));
}
-template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
+template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
}
-template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
+template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
return _mm_packus_epi16(src.fVec, src.fVec);
}
-template<> /*static*/ inline Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
+template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
}
-template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
+template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec);
}
-template<> /*static*/ inline Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) {
+template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) {
return src.fVec;
}
-static inline Sk4i Sk4f_round(const Sk4f& x) {
+AI static Sk4i Sk4f_round(const Sk4f& x) {
return _mm_cvtps_epi32(x.fVec);
}