aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/opts
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2016-03-21 10:04:46 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2016-03-21 10:04:46 -0700
commitf8f90e4a85638faa18e7b4133cfe4d1ff5b1b23e (patch)
treef9288bc101e5c87ac35a29d595ba09a750297c8e /src/opts
parent04cdc4b61879849df63e883e68eecafe6510f423 (diff)
SkNx refresh
- rearrange a bit - fewer macros - hooks for all operators - add left and right scalar operator overrides - add +=, &=, <<=, etc. - add SkNx_split() and SkNx_join() - simplify the many rsqrt() and invert() options to just what we actually use This refactoring pointed out that our float <-> int NEON conversions are not specialized, so I've implemented them. It seems nice that this is an error rather than silently falling back to serial code. It's unclear to me if split/join want to be external, static methods, or non-static methods (SkNx_join(), Sk4f::Join(), x.join()). Time will tell? BUG=skia: GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1812233003 CQ_EXTRA_TRYBOTS=client.skia.android:Test-Android-GCC-Nexus5-CPU-NEON-Arm7-Release-Trybot;client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot Review URL: https://codereview.chromium.org/1812233003
Diffstat (limited to 'src/opts')
-rw-r--r--src/opts/SkNx_neon.h114
-rw-r--r--src/opts/SkNx_sse.h39
-rw-r--r--src/opts/SkXfermode_opts.h4
3 files changed, 98 insertions, 59 deletions
diff --git a/src/opts/SkNx_neon.h b/src/opts/SkNx_neon.h
index 52b2e73798..66f8074e40 100644
--- a/src/opts/SkNx_neon.h
+++ b/src/opts/SkNx_neon.h
@@ -54,16 +54,11 @@ public:
void store(void* ptr) const { vst1_f32((float*)ptr, fVec); }
- SkNx approxInvert() const {
+ SkNx invert() const {
float32x2_t est0 = vrecpe_f32(fVec),
est1 = vmul_f32(vrecps_f32(est0, fVec), est0);
return est1;
}
- SkNx invert() const {
- float32x2_t est1 = this->approxInvert().fVec,
- est2 = vmul_f32(vrecps_f32(est1, fVec), est1);
- return est2;
- }
SkNx operator + (const SkNx& o) const { return vadd_f32(fVec, o.fVec); }
SkNx operator - (const SkNx& o) const { return vsub_f32(fVec, o.fVec); }
@@ -72,7 +67,10 @@ public:
#if defined(SK_CPU_ARM64)
return vdiv_f32(fVec, o.fVec);
#else
- return vmul_f32(fVec, o.invert().fVec);
+ float32x2_t est0 = vrecpe_f32(o.fVec),
+ est1 = vmul_f32(vrecps_f32(est0, o.fVec), est0),
+ est2 = vmul_f32(vrecps_f32(est1, o.fVec), est1);
+ return vmul_f32(fVec, est2);
#endif
}
@@ -88,21 +86,19 @@ public:
static SkNx Min(const SkNx& l, const SkNx& r) { return vmin_f32(l.fVec, r.fVec); }
static SkNx Max(const SkNx& l, const SkNx& r) { return vmax_f32(l.fVec, r.fVec); }
- SkNx rsqrt0() const { return vrsqrte_f32(fVec); }
- SkNx rsqrt1() const {
- float32x2_t est0 = this->rsqrt0().fVec;
+ SkNx rsqrt() const {
+ float32x2_t est0 = vrsqrte_f32(fVec);
return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0);
}
- SkNx rsqrt2() const {
- float32x2_t est1 = this->rsqrt1().fVec;
- return vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1);
- }
SkNx sqrt() const {
#if defined(SK_CPU_ARM64)
return vsqrt_f32(fVec);
#else
- return *this * this->rsqrt2();
+ float32x2_t est0 = vrsqrte_f32(fVec),
+ est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0),
+ est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1);
+ return vmul_f32(fVec, est2);
#endif
}
@@ -135,16 +131,11 @@ public:
SkNx(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
void store(void* ptr) const { vst1q_f32((float*)ptr, fVec); }
- SkNx approxInvert() const {
+ SkNx invert() const {
float32x4_t est0 = vrecpeq_f32(fVec),
est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0);
return est1;
}
- SkNx invert() const {
- float32x4_t est1 = this->approxInvert().fVec,
- est2 = vmulq_f32(vrecpsq_f32(est1, fVec), est1);
- return est2;
- }
SkNx operator + (const SkNx& o) const { return vaddq_f32(fVec, o.fVec); }
SkNx operator - (const SkNx& o) const { return vsubq_f32(fVec, o.fVec); }
@@ -153,7 +144,10 @@ public:
#if defined(SK_CPU_ARM64)
return vdivq_f32(fVec, o.fVec);
#else
- return vmulq_f32(fVec, o.invert().fVec);
+ float32x4_t est0 = vrecpeq_f32(o.fVec),
+ est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0),
+ est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1);
+ return vmulq_f32(fVec, est2);
#endif
}
@@ -179,21 +173,19 @@ public:
}
- SkNx rsqrt0() const { return vrsqrteq_f32(fVec); }
- SkNx rsqrt1() const {
- float32x4_t est0 = this->rsqrt0().fVec;
+ SkNx rsqrt() const {
+ float32x4_t est0 = vrsqrteq_f32(fVec);
return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0);
}
- SkNx rsqrt2() const {
- float32x4_t est1 = this->rsqrt1().fVec;
- return vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1);
- }
SkNx sqrt() const {
#if defined(SK_CPU_ARM64)
return vsqrtq_f32(fVec);
#else
- return *this * this->rsqrt2();
+ float32x4_t est0 = vrsqrteq_f32(fVec),
+ est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0),
+ est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1);
+ return vmulq_f32(fVec, est2);
#endif
}
@@ -364,10 +356,55 @@ public:
uint8x16_t fVec;
};
+template <>
+class SkNx<4, int> {
+public:
+ SkNx(const int32x4_t& vec) : fVec(vec) {}
+
+ SkNx() {}
+ SkNx(int v) {
+ fVec = vdupq_n_s32(v);
+ }
+ SkNx(int a, int b, int c, int d) {
+ fVec = (int32x4_t){a,b,c,d};
+ }
+ static SkNx Load(const void* ptr) {
+ return vld1q_s32((const int32_t*)ptr);
+ }
+ void store(void* ptr) const {
+ return vst1q_s32((int32_t*)ptr, fVec);
+ }
+ int operator[](int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { int32x4_t v; int is[4]; } pun = {fVec};
+ return pun.is[k&3];
+ }
+
+ SkNx operator + (const SkNx& o) const { return vaddq_s32(fVec, o.fVec); }
+ SkNx operator - (const SkNx& o) const { return vsubq_s32(fVec, o.fVec); }
+ SkNx operator * (const SkNx& o) const { return vmulq_s32(fVec, o.fVec); }
+
+ SkNx operator << (int bits) const { SHIFT32(vshlq_n_s32, fVec, bits); }
+ SkNx operator >> (int bits) const { SHIFT32(vshrq_n_s32, fVec, bits); }
+
+ static SkNx Min(const SkNx& a, const SkNx& b) { return vminq_s32(a.fVec, b.fVec); }
+ // TODO as needed
+
+ int32x4_t fVec;
+};
+
#undef SHIFT32
#undef SHIFT16
#undef SHIFT8
+template<> inline Sk4i SkNx_cast<int, float>(const Sk4f& src) {
+ return vcvtq_s32_f32(src.fVec);
+
+}
+template<> inline Sk4f SkNx_cast<float, int>(const Sk4i& src) {
+ return vcvtq_f32_s32(src.fVec);
+}
+
template<> inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
return vqmovn_u32(vcvtq_u32_f32(src.fVec));
}
@@ -388,12 +425,17 @@ template<> inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
return vcvtq_f32_u32(_32);
}
-static inline void Sk4f_ToBytes(uint8_t bytes[16],
- const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) {
- vst1q_u8(bytes, vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec),
- (uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0],
- vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec),
- (uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0]);
+template<> inline Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
+ Sk8f ab, cd;
+ SkNx_split(src, &ab, &cd);
+
+ Sk4f a,b,c,d;
+ SkNx_split(ab, &a, &b);
+ SkNx_split(cd, &c, &d);
+ return vuzpq_u8(vuzpq_u8((uint8x16_t)vcvtq_u32_f32(a.fVec),
+ (uint8x16_t)vcvtq_u32_f32(b.fVec)).val[0],
+ vuzpq_u8((uint8x16_t)vcvtq_u32_f32(c.fVec),
+ (uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0];
}
template<> inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h
index 34b58c10ed..80c7f0e9ae 100644
--- a/src/opts/SkNx_sse.h
+++ b/src/opts/SkNx_sse.h
@@ -52,13 +52,9 @@ public:
static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
- SkNx sqrt () const { return _mm_sqrt_ps (fVec); }
- SkNx rsqrt0() const { return _mm_rsqrt_ps(fVec); }
- SkNx rsqrt1() const { return this->rsqrt0(); }
- SkNx rsqrt2() const { return this->rsqrt1(); }
-
- SkNx invert() const { return SkNx(1) / *this; }
- SkNx approxInvert() const { return _mm_rcp_ps(fVec); }
+ SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
+ SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
+ SkNx invert() const { return _mm_rcp_ps(fVec); }
float operator[](int k) const {
SkASSERT(0 <= k && k < 2);
@@ -103,13 +99,9 @@ public:
SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
SkNx floor() const { return sse2_mm_floor_ps(fVec); }
- SkNx sqrt () const { return _mm_sqrt_ps (fVec); }
- SkNx rsqrt0() const { return _mm_rsqrt_ps(fVec); }
- SkNx rsqrt1() const { return this->rsqrt0(); }
- SkNx rsqrt2() const { return this->rsqrt1(); }
-
- SkNx invert() const { return SkNx(1) / *this; }
- SkNx approxInvert() const { return _mm_rcp_ps(fVec); }
+ SkNx sqrt() const { return _mm_sqrt_ps (fVec); }
+ SkNx rsqrt() const { return _mm_rsqrt_ps(fVec); }
+ SkNx invert() const { return _mm_rcp_ps(fVec); }
float operator[](int k) const {
SkASSERT(0 <= k && k < 4);
@@ -346,13 +338,18 @@ template<> /*static*/ inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
return _mm_cvtepi32_ps(_32);
}
-static inline void Sk4f_ToBytes(uint8_t bytes[16],
- const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) {
- _mm_storeu_si128((__m128i*)bytes,
- _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec),
- _mm_cvttps_epi32(b.fVec)),
- _mm_packus_epi16(_mm_cvttps_epi32(c.fVec),
- _mm_cvttps_epi32(d.fVec))));
+template<> /*static*/ inline Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
+ Sk8f ab, cd;
+ SkNx_split(src, &ab, &cd);
+
+ Sk4f a,b,c,d;
+ SkNx_split(ab, &a, &b);
+ SkNx_split(cd, &c, &d);
+
+ return _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec),
+ _mm_cvttps_epi32(b.fVec)),
+ _mm_packus_epi16(_mm_cvttps_epi32(c.fVec),
+ _mm_cvttps_epi32(d.fVec)));
}
template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
diff --git a/src/opts/SkXfermode_opts.h b/src/opts/SkXfermode_opts.h
index 8b64d0b74a..54f906e4f6 100644
--- a/src/opts/SkXfermode_opts.h
+++ b/src/opts/SkXfermode_opts.h
@@ -133,7 +133,7 @@ XFERMODE(ColorDodge) {
auto srcover = s + d*isa,
dstover = d + s*ida,
- otherwise = sa * Sk4f::Min(da, (d*sa)*(sa-s).approxInvert()) + s*ida + d*isa;
+ otherwise = sa * Sk4f::Min(da, (d*sa)*(sa-s).invert()) + s*ida + d*isa;
// Order matters here, preferring d==0 over s==sa.
auto colors = (d == Sk4f(0)).thenElse(dstover,
@@ -149,7 +149,7 @@ XFERMODE(ColorBurn) {
auto srcover = s + d*isa,
dstover = d + s*ida,
- otherwise = sa*(da-Sk4f::Min(da, (da-d)*sa*s.approxInvert())) + s*ida + d*isa;
+ otherwise = sa*(da-Sk4f::Min(da, (da-d)*sa*s.invert())) + s*ida + d*isa;
// Order matters here, preferring d==da over s==0.
auto colors = (d == da).thenElse(dstover,