aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/opts
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2015-03-30 10:50:27 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2015-03-30 10:50:27 -0700
commitc9adb05b64fa0bfadf9d1a782afcda470da68c9e (patch)
tree6413cc149b70ae36181e9f0789246b9db24447f0 /src/opts
parent23ac62c83a49d675a38f1c20462b5537f3c8af01 (diff)
Refactor Sk2x<T> + Sk4x<T> into SkNf<N,T> and SkNi<N,T>
The primary feature this delivers is SkNf and SkNd for arbitrary power-of-two N. Non-specialized types or types larger than 128 bits should now Just Work (and we can drop in a specialization to make them faster). Sk4s is now just a typedef for SkNf<4, SkScalar>; Sk4d is SkNf<4, double>, Sk2f SkNf<2, float>, etc. This also makes implementing new specializations easier and more encapsulated. We're now using template specialization, which means the specialized versions don't have to leak out so much from SkNx_sse.h and SkNx_neon.h. This design leaves us room to grow up, e.g to SkNf<8, SkScalar> == Sk8s, and to grown down too, to things like SkNi<8, uint16_t> == Sk8h. To simplify things, I've stripped away most APIs (swizzles, casts, reinterpret_casts) that no one's using yet. I will happily add them back if they seem useful. You shouldn't feel bad about using any of the typedef Sk4s, Sk4f, Sk4d, Sk2s, Sk2f, Sk2d, Sk4i, etc. Here's how you should feel: - Sk4f, Sk4s, Sk2d: feel awesome - Sk2f, Sk2s, Sk4d: feel pretty good No public API changes. TBR=reed@google.com BUG=skia:3592 Review URL: https://codereview.chromium.org/1048593002
Diffstat (limited to 'src/opts')
-rw-r--r--src/opts/Sk2x_neon.h158
-rw-r--r--src/opts/Sk2x_none.h72
-rw-r--r--src/opts/Sk2x_sse.h82
-rw-r--r--src/opts/Sk4x_neon.h166
-rw-r--r--src/opts/Sk4x_none.h120
-rw-r--r--src/opts/Sk4x_sse.h177
-rw-r--r--src/opts/SkNx_neon.h257
-rw-r--r--src/opts/SkNx_sse.h190
-rw-r--r--src/opts/SkPMFloat_SSE2.h17
-rw-r--r--src/opts/SkPMFloat_SSSE3.h19
-rw-r--r--src/opts/SkPMFloat_neon.h11
-rw-r--r--src/opts/SkPMFloat_none.h5
12 files changed, 463 insertions, 811 deletions
diff --git a/src/opts/Sk2x_neon.h b/src/opts/Sk2x_neon.h
deleted file mode 100644
index 8e6e46164b..0000000000
--- a/src/opts/Sk2x_neon.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-// It is important _not_ to put header guards here.
-// This file will be intentionally included three times.
-
-#include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362
-
-#if defined(SK2X_PREAMBLE)
- #include <arm_neon.h>
- #include <math.h>
- template <typename T> struct SkScalarToSIMD;
- template <> struct SkScalarToSIMD< float> { typedef float32x2_t Type; };
- #if defined(SK_CPU_ARM64)
- template <> struct SkScalarToSIMD<double> { typedef float64x2_t Type; };
- #else
- template <> struct SkScalarToSIMD<double> { typedef double Type[2]; };
- #endif
-
-
-#elif defined(SK2X_PRIVATE)
- typename SkScalarToSIMD<T>::Type fVec;
- /*implicit*/ Sk2x(const typename SkScalarToSIMD<T>::Type vec) { fVec = vec; }
-
-#else
-
-#define M(...) template <> inline __VA_ARGS__ Sk2x<float>::
-
-M() Sk2x() {}
-M() Sk2x(float val) { fVec = vdup_n_f32(val); }
-M() Sk2x(float a, float b) { fVec = (float32x2_t) { a, b }; }
-M(Sk2f&) operator=(const Sk2f& o) { fVec = o.fVec; return *this; }
-
-M(Sk2f) Load(const float vals[2]) { return vld1_f32(vals); }
-M(void) store(float vals[2]) const { vst1_f32(vals, fVec); }
-
-M(Sk2f) approxInvert() const {
- float32x2_t est0 = vrecpe_f32(fVec),
- est1 = vmul_f32(vrecps_f32(est0, fVec), est0);
- return est1;
-}
-
-M(Sk2f) invert() const {
- float32x2_t est1 = this->approxInvert().fVec,
- est2 = vmul_f32(vrecps_f32(est1, fVec), est1);
- return est2;
-}
-
-M(Sk2f) add(const Sk2f& o) const { return vadd_f32(fVec, o.fVec); }
-M(Sk2f) subtract(const Sk2f& o) const { return vsub_f32(fVec, o.fVec); }
-M(Sk2f) multiply(const Sk2f& o) const { return vmul_f32(fVec, o.fVec); }
-M(Sk2f) divide(const Sk2f& o) const {
-#if defined(SK_CPU_ARM64)
- return vdiv_f32(fVec, o.fVec);
-#else
- return vmul_f32(fVec, o.invert().fVec);
-#endif
-}
-
-M(Sk2f) Min(const Sk2f& a, const Sk2f& b) { return vmin_f32(a.fVec, b.fVec); }
-M(Sk2f) Max(const Sk2f& a, const Sk2f& b) { return vmax_f32(a.fVec, b.fVec); }
-
-M(Sk2f) rsqrt() const {
- float32x2_t est0 = vrsqrte_f32(fVec),
- est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0);
- return est1;
-}
-M(Sk2f) sqrt() const {
-#if defined(SK_CPU_ARM64)
- return vsqrt_f32(fVec);
-#else
- float32x2_t est1 = this->rsqrt().fVec,
- // An extra step of Newton's method to refine the estimate of 1/sqrt(this).
- est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1);
- return vmul_f32(fVec, est2);
-#endif
-}
-
-#undef M
-
-#define M(...) template <> inline __VA_ARGS__ Sk2x<double>::
-
-#if defined(SK_CPU_ARM64)
- M() Sk2x() {}
- M() Sk2x(double val) { fVec = vdupq_n_f64(val); }
- M() Sk2x(double a, double b) { fVec = (float64x2_t) { a, b }; }
- M(Sk2d&) operator=(const Sk2d& o) { fVec = o.fVec; return *this; }
-
- M(Sk2d) Load(const double vals[2]) { return vld1q_f64(vals); }
- M(void) store(double vals[2]) const { vst1q_f64(vals, fVec); }
-
- M(Sk2d) add(const Sk2d& o) const { return vaddq_f64(fVec, o.fVec); }
- M(Sk2d) subtract(const Sk2d& o) const { return vsubq_f64(fVec, o.fVec); }
- M(Sk2d) multiply(const Sk2d& o) const { return vmulq_f64(fVec, o.fVec); }
- M(Sk2d) divide(const Sk2d& o) const { return vdivq_f64(fVec, o.fVec); }
-
- M(Sk2d) Min(const Sk2d& a, const Sk2d& b) { return vminq_f64(a.fVec, b.fVec); }
- M(Sk2d) Max(const Sk2d& a, const Sk2d& b) { return vmaxq_f64(a.fVec, b.fVec); }
-
- M(Sk2d) rsqrt() const {
- float64x2_t est0 = vrsqrteq_f64(fVec),
- est1 = vmulq_f64(vrsqrtsq_f64(fVec, vmulq_f64(est0, est0)), est0);
- return est1;
- }
- M(Sk2d) sqrt() const { return vsqrtq_f64(fVec); }
-
- M(Sk2d) approxInvert() const {
- float64x2_t est0 = vrecpeq_f64(fVec),
- est1 = vmulq_f64(vrecpsq_f64(est0, fVec), est0);
- return est1;
- }
-
- M(Sk2d) invert() const {
- float64x2_t est1 = this->approxInvert().fVec,
- est2 = vmulq_f64(vrecpsq_f64(est1, fVec), est1),
- est3 = vmulq_f64(vrecpsq_f64(est2, fVec), est2);
- return est3;
- }
-
-#else // Scalar implementation for 32-bit chips, which don't have float64x2_t.
- M() Sk2x() {}
- M() Sk2x(double val) { fVec[0] = fVec[1] = val; }
- M() Sk2x(double a, double b) { fVec[0] = a; fVec[1] = b; }
- M(Sk2d&) operator=(const Sk2d& o) {
- fVec[0] = o.fVec[0];
- fVec[1] = o.fVec[1];
- return *this;
- }
-
- M(Sk2d) Load(const double vals[2]) { return Sk2d(vals[0], vals[1]); }
- M(void) store(double vals[2]) const { vals[0] = fVec[0]; vals[1] = fVec[1]; }
-
- M(Sk2d) add(const Sk2d& o) const { return Sk2d(fVec[0] + o.fVec[0], fVec[1] + o.fVec[1]); }
- M(Sk2d) subtract(const Sk2d& o) const { return Sk2d(fVec[0] - o.fVec[0], fVec[1] - o.fVec[1]); }
- M(Sk2d) multiply(const Sk2d& o) const { return Sk2d(fVec[0] * o.fVec[0], fVec[1] * o.fVec[1]); }
- M(Sk2d) divide(const Sk2d& o) const { return Sk2d(fVec[0] / o.fVec[0], fVec[1] / o.fVec[1]); }
-
- M(Sk2d) Min(const Sk2d& a, const Sk2d& b) {
- return Sk2d(SkTMin(a.fVec[0], b.fVec[0]), SkTMin(a.fVec[1], b.fVec[1]));
- }
- M(Sk2d) Max(const Sk2d& a, const Sk2d& b) {
- return Sk2d(SkTMax(a.fVec[0], b.fVec[0]), SkTMax(a.fVec[1], b.fVec[1]));
- }
-
- M(Sk2d) rsqrt() const { return Sk2d(1.0/::sqrt(fVec[0]), 1.0/::sqrt(fVec[1])); }
- M(Sk2d) sqrt() const { return Sk2d( ::sqrt(fVec[0]), ::sqrt(fVec[1])); }
-
- M(Sk2d) invert() const { return Sk2d(1.0 / fVec[0], 1.0 / fVec[1]); }
- M(Sk2d) approxInvert() const { return this->invert(); }
-#endif
-
-#undef M
-
-#endif
diff --git a/src/opts/Sk2x_none.h b/src/opts/Sk2x_none.h
deleted file mode 100644
index 2c68e736f4..0000000000
--- a/src/opts/Sk2x_none.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-// It is important _not_ to put header guards here.
-// This file will be intentionally included three times.
-
-#include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362
-
-#if defined(SK2X_PREAMBLE)
- #include "SkFloatingPoint.h"
- #include <math.h>
-
-#elif defined(SK2X_PRIVATE)
- T fVec[2];
-
-#else
-
-#define M(...) template <typename T> __VA_ARGS__ Sk2x<T>::
-
-M() Sk2x() {}
-M() Sk2x(T val) { fVec[0] = fVec[1] = val; }
-M() Sk2x(T a, T b) { fVec[0] = a; fVec[1] = b; }
-
-M(Sk2x<T>&) operator=(const Sk2x<T>& o) {
- fVec[0] = o.fVec[0];
- fVec[1] = o.fVec[1];
- return *this;
-}
-
-M(Sk2x<T>) Load(const T vals[2]) { return Sk2x<T>(vals[0], vals[1]); }
-M(void) store(T vals[2]) const { vals[0] = fVec[0]; vals[1] = fVec[1]; }
-
-M(Sk2x<T>) add(const Sk2x<T>& o) const {
- return Sk2x<T>(fVec[0] + o.fVec[0], fVec[1] + o.fVec[1]);
-}
-M(Sk2x<T>) subtract(const Sk2x<T>& o) const {
- return Sk2x<T>(fVec[0] - o.fVec[0], fVec[1] - o.fVec[1]);
-}
-M(Sk2x<T>) multiply(const Sk2x<T>& o) const {
- return Sk2x<T>(fVec[0] * o.fVec[0], fVec[1] * o.fVec[1]);
-}
-M(Sk2x<T>) divide(const Sk2x<T>& o) const {
- return Sk2x<T>(fVec[0] / o.fVec[0], fVec[1] / o.fVec[1]);
-}
-
-M(Sk2x<T>) Min(const Sk2x<T>& a, const Sk2x<T>& b) {
- return Sk2x<T>(SkTMin(a.fVec[0], b.fVec[0]), SkTMin(a.fVec[1], b.fVec[1]));
-}
-M(Sk2x<T>) Max(const Sk2x<T>& a, const Sk2x<T>& b) {
- return Sk2x<T>(SkTMax(a.fVec[0], b.fVec[0]), SkTMax(a.fVec[1], b.fVec[1]));
-}
-
-M(Sk2x<T>) invert() const { return Sk2x<T>((T)1.0 / fVec[0], (T)1.0 / fVec[1]); }
-M(Sk2x<T>) approxInvert() const { return this->invert(); }
-
-#undef M
-
-#define M template <> inline
-
-M Sk2f Sk2f::rsqrt() const { return Sk2f(sk_float_rsqrt(fVec[0]), sk_float_rsqrt(fVec[1])); }
-M Sk2f Sk2f:: sqrt() const { return Sk2f( sqrtf(fVec[0]), sqrtf(fVec[1])); }
-
-M Sk2d Sk2d::rsqrt() const { return Sk2d(1.0/::sqrt(fVec[0]), 1.0/::sqrt(fVec[1])); }
-M Sk2d Sk2d:: sqrt() const { return Sk2d( ::sqrt(fVec[0]), ::sqrt(fVec[1])); }
-
-#undef M
-
-#endif
diff --git a/src/opts/Sk2x_sse.h b/src/opts/Sk2x_sse.h
deleted file mode 100644
index 1136f1d856..0000000000
--- a/src/opts/Sk2x_sse.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright 2015 Google Inc.
- *
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
- */
-
-// It is important _not_ to put header guards here.
-// This file will be intentionally included three times.
-
-#include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362
-
-#if defined(SK2X_PREAMBLE)
- #include <immintrin.h>
- template <typename T> struct SkScalarToSIMD;
- template <> struct SkScalarToSIMD< float> { typedef __m128 Type; };
- template <> struct SkScalarToSIMD<double> { typedef __m128d Type; };
-
-
-#elif defined(SK2X_PRIVATE)
- typename SkScalarToSIMD<T>::Type fVec;
- /*implicit*/ Sk2x(const typename SkScalarToSIMD<T>::Type vec) { fVec = vec; }
-
-#else
-
-#define M(...) template <> inline __VA_ARGS__ Sk2x<float>::
-
-M() Sk2x() {}
-M() Sk2x(float val) { fVec = _mm_set1_ps(val); }
-M() Sk2x(float a, float b) { fVec = _mm_set_ps(b,a,b,a); }
-M(Sk2f&) operator=(const Sk2f& o) { fVec = o.fVec; return *this; }
-
-M(Sk2f) Load(const float vals[2]) {
- return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)vals));
-}
-M(void) store(float vals[2]) const { _mm_storel_pi((__m64*)vals, fVec); }
-
-M(Sk2f) add(const Sk2f& o) const { return _mm_add_ps(fVec, o.fVec); }
-M(Sk2f) subtract(const Sk2f& o) const { return _mm_sub_ps(fVec, o.fVec); }
-M(Sk2f) multiply(const Sk2f& o) const { return _mm_mul_ps(fVec, o.fVec); }
-M(Sk2f) divide(const Sk2f& o) const { return _mm_div_ps(fVec, o.fVec); }
-
-M(Sk2f) Min(const Sk2f& a, const Sk2f& b) { return _mm_min_ps(a.fVec, b.fVec); }
-M(Sk2f) Max(const Sk2f& a, const Sk2f& b) { return _mm_max_ps(a.fVec, b.fVec); }
-
-M(Sk2f) rsqrt() const { return _mm_rsqrt_ps(fVec); }
-M(Sk2f) sqrt() const { return _mm_sqrt_ps (fVec); }
-
-M(Sk2f) invert() const { return Sk2f(1.0f) / *this; }
-M(Sk2f) approxInvert() const { return _mm_rcp_ps(fVec); }
-
-#undef M
-
-#define M(...) template <> inline __VA_ARGS__ Sk2x<double>::
-
-M() Sk2x() {}
-M() Sk2x(double val) { fVec = _mm_set1_pd(val); }
-M() Sk2x(double a, double b) { fVec = _mm_set_pd(b, a); }
-M(Sk2d&) operator=(const Sk2d& o) { fVec = o.fVec; return *this; }
-
-M(Sk2d) Load(const double vals[2]) { return _mm_loadu_pd(vals); }
-M(void) store(double vals[2]) const { _mm_storeu_pd(vals, fVec); }
-
-M(Sk2d) add(const Sk2d& o) const { return _mm_add_pd(fVec, o.fVec); }
-M(Sk2d) subtract(const Sk2d& o) const { return _mm_sub_pd(fVec, o.fVec); }
-M(Sk2d) multiply(const Sk2d& o) const { return _mm_mul_pd(fVec, o.fVec); }
-M(Sk2d) divide(const Sk2d& o) const { return _mm_div_pd(fVec, o.fVec); }
-
-M(Sk2d) Min(const Sk2d& a, const Sk2d& b) { return _mm_min_pd(a.fVec, b.fVec); }
-M(Sk2d) Max(const Sk2d& a, const Sk2d& b) { return _mm_max_pd(a.fVec, b.fVec); }
-
-// There is no _mm_rsqrt_pd, so we do Sk2d::rsqrt() in floats.
-M(Sk2d) rsqrt() const { return _mm_cvtps_pd(_mm_rsqrt_ps(_mm_cvtpd_ps(fVec))); }
-M(Sk2d) sqrt() const { return _mm_sqrt_pd(fVec); }
-
-// No _mm_rcp_pd, so do Sk2d::approxInvert() in floats.
-M(Sk2d) invert() const { return Sk2d(1.0) / *this; }
-M(Sk2d) approxInvert() const { return _mm_cvtps_pd(_mm_rcp_ps(_mm_cvtpd_ps(fVec))); }
-
-#undef M
-
-#endif
diff --git a/src/opts/Sk4x_neon.h b/src/opts/Sk4x_neon.h
deleted file mode 100644
index b89c30fcb7..0000000000
--- a/src/opts/Sk4x_neon.h
+++ /dev/null
@@ -1,166 +0,0 @@
-// It is important _not_ to put header guards here.
-// This file will be intentionally included three times.
-
-#include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362
-
-#if defined(SK4X_PREAMBLE)
- #include <arm_neon.h>
-
- // Template metaprogramming to map scalar types to vector types.
- template <typename T> struct SkScalarToSIMD;
- template <> struct SkScalarToSIMD<float> { typedef float32x4_t Type; };
- template <> struct SkScalarToSIMD<int32_t> { typedef int32x4_t Type; };
-
-#elif defined(SK4X_PRIVATE)
- Sk4x(float32x4_t);
- Sk4x(int32x4_t);
-
- typename SkScalarToSIMD<T>::Type fVec;
-
-#else
-
-// Vector Constructors
-//template <> inline Sk4f::Sk4x(int32x4_t v) : fVec(vcvtq_f32_s32(v)) {}
-template <> inline Sk4f::Sk4x(float32x4_t v) : fVec(v) {}
-template <> inline Sk4i::Sk4x(int32x4_t v) : fVec(v) {}
-//template <> inline Sk4i::Sk4x(float32x4_t v) : fVec(vcvtq_s32_f32(v)) {}
-
-// Generic Methods
-template <typename T> Sk4x<T>::Sk4x() {}
-template <typename T> Sk4x<T>::Sk4x(const Sk4x& other) { *this = other; }
-template <typename T> Sk4x<T>& Sk4x<T>::operator=(const Sk4x<T>& other) {
- fVec = other.fVec;
- return *this;
-}
-
-// Sk4f Methods
-#define M(...) template <> inline __VA_ARGS__ Sk4f::
-
-M() Sk4x(float v) : fVec(vdupq_n_f32(v)) {}
-M() Sk4x(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
-
-// As far as I can tell, it's not possible to provide an alignment hint to
-// NEON using intrinsics. However, I think it is possible at the assembly
-// level if we want to get into that.
-// TODO: Write our own aligned load and store.
-M(Sk4f) Load (const float fs[4]) { return vld1q_f32(fs); }
-M(Sk4f) LoadAligned(const float fs[4]) { return vld1q_f32(fs); }
-M(void) store (float fs[4]) const { vst1q_f32(fs, fVec); }
-M(void) storeAligned(float fs[4]) const { vst1q_f32 (fs, fVec); }
-
-template <>
-M(Sk4i) reinterpret<Sk4i>() const { return vreinterpretq_s32_f32(fVec); }
-
-template <>
-M(Sk4i) cast<Sk4i>() const { return vcvtq_s32_f32(fVec); }
-
-// We're going to skip allTrue(), anyTrue(), and bit-manipulators
-// for Sk4f. Code that calls them probably does so accidentally.
-// Ask msarett or mtklein to fill these in if you really need them.
-M(Sk4f) add (const Sk4f& o) const { return vaddq_f32(fVec, o.fVec); }
-M(Sk4f) subtract(const Sk4f& o) const { return vsubq_f32(fVec, o.fVec); }
-M(Sk4f) multiply(const Sk4f& o) const { return vmulq_f32(fVec, o.fVec); }
-
-M(Sk4f) divide (const Sk4f& o) const {
-#if defined(SK_CPU_ARM64)
- return vdivq_f32(fVec, o.fVec);
-#else
- float32x4_t est0 = vrecpeq_f32(o.fVec),
- est1 = vmulq_f32(vrecpsq_f32(est0, o.fVec), est0),
- est2 = vmulq_f32(vrecpsq_f32(est1, o.fVec), est1);
- return vmulq_f32(est2, fVec);
-#endif
-}
-
-M(Sk4f) rsqrt() const {
- float32x4_t est0 = vrsqrteq_f32(fVec),
- est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0);
- return est1;
-}
-
-M(Sk4f) sqrt() const {
-#if defined(SK_CPU_ARM64)
- return vsqrtq_f32(fVec);
-#else
- float32x4_t est1 = this->rsqrt().fVec,
- // An extra step of Newton's method to refine the estimate of 1/sqrt(this).
- est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1);
- return vmulq_f32(fVec, est2);
-#endif
-}
-
-M(Sk4i) equal (const Sk4f& o) const { return vreinterpretq_s32_u32(vceqq_f32(fVec, o.fVec)); }
-M(Sk4i) notEqual (const Sk4f& o) const { return vreinterpretq_s32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec))); }
-M(Sk4i) lessThan (const Sk4f& o) const { return vreinterpretq_s32_u32(vcltq_f32(fVec, o.fVec)); }
-M(Sk4i) greaterThan (const Sk4f& o) const { return vreinterpretq_s32_u32(vcgtq_f32(fVec, o.fVec)); }
-M(Sk4i) lessThanEqual (const Sk4f& o) const { return vreinterpretq_s32_u32(vcleq_f32(fVec, o.fVec)); }
-M(Sk4i) greaterThanEqual(const Sk4f& o) const { return vreinterpretq_s32_u32(vcgeq_f32(fVec, o.fVec)); }
-
-M(Sk4f) Min(const Sk4f& a, const Sk4f& b) { return vminq_f32(a.fVec, b.fVec); }
-M(Sk4f) Max(const Sk4f& a, const Sk4f& b) { return vmaxq_f32(a.fVec, b.fVec); }
-
-M(Sk4f) aacc() const { return vtrnq_f32(fVec, fVec).val[0]; }
-M(Sk4f) bbdd() const { return vtrnq_f32(fVec, fVec).val[1]; }
-M(Sk4f) badc() const { return vrev64q_f32(fVec); }
-
-// Sk4i Methods
-#undef M
-#define M(...) template <> inline __VA_ARGS__ Sk4i::
-
-M() Sk4x(int32_t v) : fVec(vdupq_n_s32(v)) {}
-M() Sk4x(int32_t a, int32_t b, int32_t c, int32_t d) { fVec = (int32x4_t) { a, b, c, d }; }
-
-// As far as I can tell, it's not possible to provide an alignment hint to
-// NEON using intrinsics. However, I think it is possible at the assembly
-// level if we want to get into that.
-M(Sk4i) Load (const int32_t is[4]) { return vld1q_s32(is); }
-M(Sk4i) LoadAligned(const int32_t is[4]) { return vld1q_s32(is); }
-M(void) store (int32_t is[4]) const { vst1q_s32(is, fVec); }
-M(void) storeAligned(int32_t is[4]) const { vst1q_s32 (is, fVec); }
-
-template <>
-M(Sk4f) reinterpret<Sk4f>() const { return vreinterpretq_f32_s32(fVec); }
-
-template <>
-M(Sk4f) cast<Sk4f>() const { return vcvtq_f32_s32(fVec); }
-
-M(bool) allTrue() const {
- int32_t a = vgetq_lane_s32(fVec, 0);
- int32_t b = vgetq_lane_s32(fVec, 1);
- int32_t c = vgetq_lane_s32(fVec, 2);
- int32_t d = vgetq_lane_s32(fVec, 3);
- return a & b & c & d;
-}
-M(bool) anyTrue() const {
- int32_t a = vgetq_lane_s32(fVec, 0);
- int32_t b = vgetq_lane_s32(fVec, 1);
- int32_t c = vgetq_lane_s32(fVec, 2);
- int32_t d = vgetq_lane_s32(fVec, 3);
- return a | b | c | d;
-}
-
-M(Sk4i) bitNot() const { return vmvnq_s32(fVec); }
-M(Sk4i) bitAnd(const Sk4i& o) const { return vandq_s32(fVec, o.fVec); }
-M(Sk4i) bitOr (const Sk4i& o) const { return vorrq_s32(fVec, o.fVec); }
-
-M(Sk4i) equal (const Sk4i& o) const { return vreinterpretq_s32_u32(vceqq_s32(fVec, o.fVec)); }
-M(Sk4i) notEqual (const Sk4i& o) const { return vreinterpretq_s32_u32(vmvnq_u32(vceqq_s32(fVec, o.fVec))); }
-M(Sk4i) lessThan (const Sk4i& o) const { return vreinterpretq_s32_u32(vcltq_s32(fVec, o.fVec)); }
-M(Sk4i) greaterThan (const Sk4i& o) const { return vreinterpretq_s32_u32(vcgtq_s32(fVec, o.fVec)); }
-M(Sk4i) lessThanEqual (const Sk4i& o) const { return vreinterpretq_s32_u32(vcleq_s32(fVec, o.fVec)); }
-M(Sk4i) greaterThanEqual(const Sk4i& o) const { return vreinterpretq_s32_u32(vcgeq_s32(fVec, o.fVec)); }
-
-M(Sk4i) add (const Sk4i& o) const { return vaddq_s32(fVec, o.fVec); }
-M(Sk4i) subtract(const Sk4i& o) const { return vsubq_s32(fVec, o.fVec); }
-M(Sk4i) multiply(const Sk4i& o) const { return vmulq_s32(fVec, o.fVec); }
-// NEON does not have integer reciprocal, sqrt, or division.
-M(Sk4i) Min(const Sk4i& a, const Sk4i& b) { return vminq_s32(a.fVec, b.fVec); }
-M(Sk4i) Max(const Sk4i& a, const Sk4i& b) { return vmaxq_s32(a.fVec, b.fVec); }
-
-M(Sk4i) aacc() const { return vtrnq_s32(fVec, fVec).val[0]; }
-M(Sk4i) bbdd() const { return vtrnq_s32(fVec, fVec).val[1]; }
-M(Sk4i) badc() const { return vrev64q_s32(fVec); }
-
-#undef M
-
-#endif
diff --git a/src/opts/Sk4x_none.h b/src/opts/Sk4x_none.h
deleted file mode 100644
index b477177026..0000000000
--- a/src/opts/Sk4x_none.h
+++ /dev/null
@@ -1,120 +0,0 @@
-// It is important _not_ to put header guards here.
-// This file will be intentionally included three times.
-
-#include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362
-
-#if defined(SK4X_PREAMBLE)
- #include "SkFloatingPoint.h"
- #include <math.h>
-
-#elif defined(SK4X_PRIVATE)
- typedef T Type;
- typedef T Vector[4];
-
- Vector fVec;
-
- template <int m, int a, int s, int k>
- static Sk4x Shuffle(const Sk4x&, const Sk4x&);
-
- void set(const T vals[4]) { for (int i = 0; i < 4; i++) { fVec[i] = vals[i]; } }
-
-#else
-
-#define M(...) template <typename T> __VA_ARGS__ Sk4x<T>::
-
-M() Sk4x() {}
-M() Sk4x(T v) { fVec[0] = fVec[1] = fVec[2] = fVec[3] = v; }
-M() Sk4x(T a, T b, T c, T d) { fVec[0] = a; fVec[1] = b; fVec[2] = c; fVec[3] = d; }
-
-M() Sk4x(const Sk4x<T>& other) { this->set(other.fVec); }
-M(Sk4x<T>&) operator=(const Sk4x<T>& other) { this->set(other.fVec); return *this; }
-
-M(Sk4x<T>) Load (const T vals[4]) { Sk4x r; r.set(vals); return r; }
-M(Sk4x<T>) LoadAligned(const T vals[4]) { return Load(vals); }
-
-M(void) store (T vals[4]) const { for (int i = 0; i < 4; i++) { vals[i] = fVec[i]; } }
-M(void) storeAligned(T vals[4]) const { this->store(vals); }
-
-M(template <typename Dst> Dst) reinterpret() const {
- Dst d;
- memcpy(&d.fVec, fVec, sizeof(fVec));
- return d;
-}
-M(template <typename Dst> Dst) cast() const {
- return Dst((typename Dst::Type)fVec[0],
- (typename Dst::Type)fVec[1],
- (typename Dst::Type)fVec[2],
- (typename Dst::Type)fVec[3]);
-}
-
-M(bool) allTrue() const { return fVec[0] && fVec[1] && fVec[2] && fVec[3]; }
-M(bool) anyTrue() const { return fVec[0] || fVec[1] || fVec[2] || fVec[3]; }
-
-M(Sk4x<T>) bitNot() const { return Sk4x(~fVec[0], ~fVec[1], ~fVec[2], ~fVec[3]); }
-
-#define BINOP(op) fVec[0] op other.fVec[0], \
- fVec[1] op other.fVec[1], \
- fVec[2] op other.fVec[2], \
- fVec[3] op other.fVec[3]
-M(Sk4x<T>) bitAnd(const Sk4x& other) const { return Sk4x(BINOP(&)); }
-M(Sk4x<T>) bitOr(const Sk4x& other) const { return Sk4x(BINOP(|)); }
-M(Sk4x<T>) add(const Sk4x<T>& other) const { return Sk4x(BINOP(+)); }
-M(Sk4x<T>) subtract(const Sk4x<T>& other) const { return Sk4x(BINOP(-)); }
-M(Sk4x<T>) multiply(const Sk4x<T>& other) const { return Sk4x(BINOP(*)); }
-M(Sk4x<T>) divide(const Sk4x<T>& other) const { return Sk4x(BINOP(/)); }
-#undef BINOP
-
-template<> inline Sk4f Sk4f::rsqrt() const {
- return Sk4f(sk_float_rsqrt(fVec[0]),
- sk_float_rsqrt(fVec[1]),
- sk_float_rsqrt(fVec[2]),
- sk_float_rsqrt(fVec[3]));
-}
-
-template<> inline Sk4f Sk4f::sqrt() const {
- return Sk4f(sqrtf(fVec[0]),
- sqrtf(fVec[1]),
- sqrtf(fVec[2]),
- sqrtf(fVec[3]));
-}
-
-#define BOOL_BINOP(op) fVec[0] op other.fVec[0] ? -1 : 0, \
- fVec[1] op other.fVec[1] ? -1 : 0, \
- fVec[2] op other.fVec[2] ? -1 : 0, \
- fVec[3] op other.fVec[3] ? -1 : 0
-M(Sk4i) equal(const Sk4x<T>& other) const { return Sk4i(BOOL_BINOP(==)); }
-M(Sk4i) notEqual(const Sk4x<T>& other) const { return Sk4i(BOOL_BINOP(!=)); }
-M(Sk4i) lessThan(const Sk4x<T>& other) const { return Sk4i(BOOL_BINOP( <)); }
-M(Sk4i) greaterThan(const Sk4x<T>& other) const { return Sk4i(BOOL_BINOP( >)); }
-M(Sk4i) lessThanEqual(const Sk4x<T>& other) const { return Sk4i(BOOL_BINOP(<=)); }
-M(Sk4i) greaterThanEqual(const Sk4x<T>& other) const { return Sk4i(BOOL_BINOP(>=)); }
-#undef BOOL_BINOP
-
-M(Sk4x<T>) Min(const Sk4x<T>& a, const Sk4x<T>& b) {
- return Sk4x(SkTMin(a.fVec[0], b.fVec[0]),
- SkTMin(a.fVec[1], b.fVec[1]),
- SkTMin(a.fVec[2], b.fVec[2]),
- SkTMin(a.fVec[3], b.fVec[3]));
-}
-
-M(Sk4x<T>) Max(const Sk4x<T>& a, const Sk4x<T>& b) {
- return Sk4x(SkTMax(a.fVec[0], b.fVec[0]),
- SkTMax(a.fVec[1], b.fVec[1]),
- SkTMax(a.fVec[2], b.fVec[2]),
- SkTMax(a.fVec[3], b.fVec[3]));
-}
-
-M(template <int m, int a, int s, int k> Sk4x<T>) Shuffle(const Sk4x<T>& x, const Sk4x<T>& y) {
- return Sk4x(m < 4 ? x.fVec[m] : y.fVec[m-4],
- a < 4 ? x.fVec[a] : y.fVec[a-4],
- s < 4 ? x.fVec[s] : y.fVec[s-4],
- k < 4 ? x.fVec[k] : y.fVec[k-4]);
-}
-
-M(Sk4x<T>) aacc() const { return Shuffle<0,0,2,2>(*this, *this); }
-M(Sk4x<T>) bbdd() const { return Shuffle<1,1,3,3>(*this, *this); }
-M(Sk4x<T>) badc() const { return Shuffle<1,0,3,2>(*this, *this); }
-
-#undef M
-
-#endif
diff --git a/src/opts/Sk4x_sse.h b/src/opts/Sk4x_sse.h
deleted file mode 100644
index ce452d08b6..0000000000
--- a/src/opts/Sk4x_sse.h
+++ /dev/null
@@ -1,177 +0,0 @@
-// It is important _not_ to put header guards here.
-// This file will be intentionally included three times.
-
-// Useful reading:
-// https://software.intel.com/sites/landingpage/IntrinsicsGuide/
-
-#include "SkTypes.h" // Keep this before any #ifdef for skbug.com/3362
-
-#if defined(SK4X_PREAMBLE)
- // Code in this file may assume SSE and SSE2.
- #include <emmintrin.h>
-
- // It must check for later instruction sets.
- #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
- #include <immintrin.h>
- #endif
-
- // A little bit of template metaprogramming to map
- // float to __m128 and int32_t to __m128i.
- template <typename T> struct SkScalarToSIMD;
- template <> struct SkScalarToSIMD<float> { typedef __m128 Type; };
- template <> struct SkScalarToSIMD<int32_t> { typedef __m128i Type; };
-
- // These are all free, zero instructions.
- // MSVC insists we use _mm_castA_B(a) instead of (B)a.
- static inline __m128 as_4f(__m128i v) { return _mm_castsi128_ps(v); }
- static inline __m128 as_4f(__m128 v) { return v ; }
- static inline __m128i as_4i(__m128i v) { return v ; }
- static inline __m128i as_4i(__m128 v) { return _mm_castps_si128(v); }
-
-#elif defined(SK4X_PRIVATE)
- // It'd be slightly faster to call _mm_cmpeq_epi32() on an unintialized register and itself,
- // but that has caused hard to debug issues when compilers recognize dealing with uninitialized
- // memory as undefined behavior that can be optimized away.
- static __m128i True() { return _mm_set1_epi8(~0); }
-
- // Leaving these implicit makes the rest of the code below a bit less noisy to read.
- Sk4x(__m128i);
- Sk4x(__m128);
-
- Sk4x andNot(const Sk4x&) const;
-
- typename SkScalarToSIMD<T>::Type fVec;
-
-#else//Method definitions.
-
-// Helps to get these in before anything else.
-template <> inline Sk4f::Sk4x(__m128i v) : fVec(as_4f(v)) {}
-template <> inline Sk4f::Sk4x(__m128 v) : fVec( v ) {}
-template <> inline Sk4i::Sk4x(__m128i v) : fVec( v ) {}
-template <> inline Sk4i::Sk4x(__m128 v) : fVec(as_4i(v)) {}
-
-// Next, methods whose implementation is the same for Sk4f and Sk4i.
-template <typename T> Sk4x<T>::Sk4x() {}
-template <typename T> Sk4x<T>::Sk4x(const Sk4x& other) { *this = other; }
-template <typename T> Sk4x<T>& Sk4x<T>::operator=(const Sk4x<T>& other) {
- fVec = other.fVec;
- return *this;
-}
-
-// We pun in these _mm_shuffle_* methods a little to use the fastest / most available methods.
-// They're all bit-preserving operations so it shouldn't matter.
-
-template <typename T>
-Sk4x<T> Sk4x<T>::aacc() const { return _mm_shuffle_epi32(as_4i(fVec), _MM_SHUFFLE(2,2,0,0)); }
-template <typename T>
-Sk4x<T> Sk4x<T>::bbdd() const { return _mm_shuffle_epi32(as_4i(fVec), _MM_SHUFFLE(3,3,1,1)); }
-template <typename T>
-Sk4x<T> Sk4x<T>::badc() const { return _mm_shuffle_epi32(as_4i(fVec), _MM_SHUFFLE(2,3,0,1)); }
-
-// Now we'll write all Sk4f specific methods. This M() macro will remove some noise.
-#define M(...) template <> inline __VA_ARGS__ Sk4f::
-
-M() Sk4x(float v) : fVec(_mm_set1_ps(v)) {}
-M() Sk4x(float a, float b, float c, float d) : fVec(_mm_set_ps(d,c,b,a)) {}
-
-M(Sk4f) Load (const float fs[4]) { return _mm_loadu_ps(fs); }
-M(Sk4f) LoadAligned(const float fs[4]) { return _mm_load_ps (fs); }
-
-M(void) store (float fs[4]) const { _mm_storeu_ps(fs, fVec); }
-M(void) storeAligned(float fs[4]) const { _mm_store_ps (fs, fVec); }
-
-template <> M(Sk4i) reinterpret<Sk4i>() const { return as_4i(fVec); }
-
-// cvttps truncates, same as (int) when positive.
-template <> M(Sk4i) cast<Sk4i>() const { return _mm_cvttps_epi32(fVec); }
-
-// We're going to try a little experiment here and skip allTrue(), anyTrue(), and bit-manipulators
-// for Sk4f. Code that calls them probably does so accidentally.
-// Ask mtklein to fill these in if you really need them.
-
-M(Sk4f) add (const Sk4f& o) const { return _mm_add_ps(fVec, o.fVec); }
-M(Sk4f) subtract(const Sk4f& o) const { return _mm_sub_ps(fVec, o.fVec); }
-M(Sk4f) multiply(const Sk4f& o) const { return _mm_mul_ps(fVec, o.fVec); }
-M(Sk4f) divide (const Sk4f& o) const { return _mm_div_ps(fVec, o.fVec); }
-
-M(Sk4f) rsqrt() const { return _mm_rsqrt_ps(fVec); }
-M(Sk4f) sqrt() const { return _mm_sqrt_ps( fVec); }
-
-M(Sk4i) equal (const Sk4f& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
-M(Sk4i) notEqual (const Sk4f& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
-M(Sk4i) lessThan (const Sk4f& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
-M(Sk4i) greaterThan (const Sk4f& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
-M(Sk4i) lessThanEqual (const Sk4f& o) const { return _mm_cmple_ps (fVec, o.fVec); }
-M(Sk4i) greaterThanEqual(const Sk4f& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
-
-M(Sk4f) Min(const Sk4f& a, const Sk4f& b) { return _mm_min_ps(a.fVec, b.fVec); }
-M(Sk4f) Max(const Sk4f& a, const Sk4f& b) { return _mm_max_ps(a.fVec, b.fVec); }
-
-// Now we'll write all the Sk4i specific methods. Same deal for M().
-#undef M
-#define M(...) template <> inline __VA_ARGS__ Sk4i::
-
-M() Sk4x(int32_t v) : fVec(_mm_set1_epi32(v)) {}
-M() Sk4x(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_set_epi32(d,c,b,a)) {}
-
-M(Sk4i) Load (const int32_t is[4]) { return _mm_loadu_si128((const __m128i*)is); }
-M(Sk4i) LoadAligned(const int32_t is[4]) { return _mm_load_si128 ((const __m128i*)is); }
-
-M(void) store (int32_t is[4]) const { _mm_storeu_si128((__m128i*)is, fVec); }
-M(void) storeAligned(int32_t is[4]) const { _mm_store_si128 ((__m128i*)is, fVec); }
-
-template <>
-M(Sk4f) reinterpret<Sk4f>() const { return as_4f(fVec); }
-
-template <>
-M(Sk4f) cast<Sk4f>() const { return _mm_cvtepi32_ps(fVec); }
-
-M(bool) allTrue() const { return 0xf == _mm_movemask_ps(as_4f(fVec)); }
-M(bool) anyTrue() const { return 0x0 != _mm_movemask_ps(as_4f(fVec)); }
-
-M(Sk4i) bitNot() const { return _mm_xor_si128(fVec, True()); }
-M(Sk4i) bitAnd(const Sk4i& o) const { return _mm_and_si128(fVec, o.fVec); }
-M(Sk4i) bitOr (const Sk4i& o) const { return _mm_or_si128 (fVec, o.fVec); }
-
-M(Sk4i) equal (const Sk4i& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
-M(Sk4i) lessThan (const Sk4i& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); }
-M(Sk4i) greaterThan (const Sk4i& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); }
-M(Sk4i) notEqual (const Sk4i& o) const { return this-> equal(o).bitNot(); }
-M(Sk4i) lessThanEqual (const Sk4i& o) const { return this->greaterThan(o).bitNot(); }
-M(Sk4i) greaterThanEqual(const Sk4i& o) const { return this-> lessThan(o).bitNot(); }
-
-M(Sk4i) add (const Sk4i& o) const { return _mm_add_epi32(fVec, o.fVec); }
-M(Sk4i) subtract(const Sk4i& o) const { return _mm_sub_epi32(fVec, o.fVec); }
-
-// SSE doesn't have integer division. Let's see how far we can get without Sk4i::divide().
-
-// Sk4i's multiply(), Min(), and Max() all improve significantly with SSE4.1.
-#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
- M(Sk4i) multiply(const Sk4i& o) const { return _mm_mullo_epi32(fVec, o.fVec); }
- M(Sk4i) Min(const Sk4i& a, const Sk4i& b) { return _mm_min_epi32(a.fVec, b.fVec); }
- M(Sk4i) Max(const Sk4i& a, const Sk4i& b) { return _mm_max_epi32(a.fVec, b.fVec); }
-#else
- M(Sk4i) multiply(const Sk4i& o) const {
- // First 2 32->64 bit multiplies.
- __m128i mul02 = _mm_mul_epu32(fVec, o.fVec),
- mul13 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.fVec, 4));
- // Now recombine the high bits of the two products.
- return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul02, _MM_SHUFFLE(0,0,2,0)),
- _mm_shuffle_epi32(mul13, _MM_SHUFFLE(0,0,2,0)));
- }
-
- M(Sk4i) andNot(const Sk4i& o) const { return _mm_andnot_si128(o.fVec, fVec); }
-
- M(Sk4i) Min(const Sk4i& a, const Sk4i& b) {
- Sk4i less = a.lessThan(b);
- return a.bitAnd(less).bitOr(b.andNot(less));
- }
- M(Sk4i) Max(const Sk4i& a, const Sk4i& b) {
- Sk4i less = a.lessThan(b);
- return b.bitAnd(less).bitOr(a.andNot(less));
- }
-#endif
-
-#undef M
-
-#endif//Method definitions.
diff --git a/src/opts/SkNx_neon.h b/src/opts/SkNx_neon.h
new file mode 100644
index 0000000000..01ea67c5d7
--- /dev/null
+++ b/src/opts/SkNx_neon.h
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNx_neon_DEFINED
+#define SkNx_neon_DEFINED
+
+#include <arm_neon.h>
+
+template <>
+class SkNi<2, int32_t> {
+public:
+ SkNi(int32x2_t vec) : fVec(vec) {}
+
+ SkNi() {}
+ bool allTrue() const { return fVec[0] && fVec[1]; }
+ bool anyTrue() const { return fVec[0] || fVec[1]; }
+private:
+ int32x2_t fVec;
+};
+
+template <>
+class SkNi<4, int32_t> {
+public:
+ SkNi(int32x4_t vec) : fVec(vec) {}
+
+ SkNi() {}
+ bool allTrue() const { return fVec[0] && fVec[1] && fVec[2] && fVec[3]; }
+ bool anyTrue() const { return fVec[0] || fVec[1] || fVec[2] || fVec[3]; }
+private:
+ int32x4_t fVec;
+};
+
+template <>
+class SkNf<2, float> {
+ typedef SkNi<2, int32_t> Ni;
+public:
+ SkNf(float32x2_t vec) : fVec(vec) {}
+
+ SkNf() {}
+ explicit SkNf(float val) : fVec(vdup_n_f32(val)) {}
+ static SkNf Load(const float vals[2]) { return vld1_f32(vals); }
+ SkNf(float a, float b) { fVec = (float32x2_t) { a, b }; }
+
+ void store(float vals[2]) const { vst1_f32(vals, fVec); }
+
+ SkNf approxInvert() const {
+ float32x2_t est0 = vrecpe_f32(fVec),
+ est1 = vmul_f32(vrecps_f32(est0, fVec), est0);
+ return est1;
+ }
+ SkNf invert() const {
+ float32x2_t est1 = this->approxInvert().fVec,
+ est2 = vmul_f32(vrecps_f32(est1, fVec), est1);
+ return est2;
+ }
+
+ SkNf operator + (const SkNf& o) const { return vadd_f32(fVec, o.fVec); }
+ SkNf operator - (const SkNf& o) const { return vsub_f32(fVec, o.fVec); }
+ SkNf operator * (const SkNf& o) const { return vmul_f32(fVec, o.fVec); }
+ SkNf operator / (const SkNf& o) const {
+ #if defined(SK_CPU_ARM64)
+ return vdiv_f32(fVec, o.fVec);
+ #else
+ return vmul_f32(fVec, o.invert().fVec);
+ #endif
+ }
+
+ Ni operator == (const SkNf& o) const { return vreinterpret_s32_u32(vceq_f32(fVec, o.fVec)); }
+ Ni operator < (const SkNf& o) const { return vreinterpret_s32_u32(vclt_f32(fVec, o.fVec)); }
+ Ni operator > (const SkNf& o) const { return vreinterpret_s32_u32(vcgt_f32(fVec, o.fVec)); }
+ Ni operator <= (const SkNf& o) const { return vreinterpret_s32_u32(vcle_f32(fVec, o.fVec)); }
+ Ni operator >= (const SkNf& o) const { return vreinterpret_s32_u32(vcge_f32(fVec, o.fVec)); }
+ Ni operator != (const SkNf& o) const {
+ return vreinterpret_s32_u32(vmvn_u32(vceq_f32(fVec, o.fVec)));
+ }
+
+ static SkNf Min(const SkNf& l, const SkNf& r) { return vmin_f32(l.fVec, r.fVec); }
+ static SkNf Max(const SkNf& l, const SkNf& r) { return vmax_f32(l.fVec, r.fVec); }
+
+ SkNf rsqrt() const {
+ float32x2_t est0 = vrsqrte_f32(fVec),
+ est1 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est0, est0)), est0);
+ return est1;
+ }
+
+ SkNf sqrt() const {
+ #if defined(SK_CPU_ARM64)
+ return vsqrt_f32(fVec);
+ #else
+ float32x2_t est1 = this->rsqrt().fVec,
+ // An extra step of Newton's method to refine the estimate of 1/sqrt(this).
+ est2 = vmul_f32(vrsqrts_f32(fVec, vmul_f32(est1, est1)), est1);
+ return vmul_f32(fVec, est2);
+ #endif
+ }
+
+ float operator[] (int k) const {
+ SkASSERT(0 <= k && k < 2);
+ return fVec[k];
+ }
+
+private:
+ float32x2_t fVec;
+};
+
+#if defined(SK_CPU_ARM64)
+template <>
+class SkNi<2, int64_t> {
+public:
+ SkNi(int64x2_t vec) : fVec(vec) {}
+
+ SkNi() {}
+ bool allTrue() const { return fVec[0] && fVec[1]; }
+ bool anyTrue() const { return fVec[0] || fVec[1]; }
+private:
+ int64x2_t fVec;
+};
+
+template <>
+class SkNf<2, double> {
+ typedef SkNi<2, int64_t> Ni;
+public:
+ SkNf(float64x2_t vec) : fVec(vec) {}
+
+ SkNf() {}
+ explicit SkNf(double val) : fVec(vdupq_n_f64(val)) {}
+ static SkNf Load(const double vals[2]) { return vld1q_f64(vals); }
+ SkNf(double a, double b) { fVec = (float64x2_t) { a, b }; }
+
+ void store(double vals[2]) const { vst1q_f64(vals, fVec); }
+
+ SkNf operator + (const SkNf& o) const { return vaddq_f64(fVec, o.fVec); }
+ SkNf operator - (const SkNf& o) const { return vsubq_f64(fVec, o.fVec); }
+ SkNf operator * (const SkNf& o) const { return vmulq_f64(fVec, o.fVec); }
+ SkNf operator / (const SkNf& o) const { return vdivq_f64(fVec, o.fVec); }
+
+ Ni operator == (const SkNf& o) const { return vreinterpretq_s64_u64(vceqq_f64(fVec, o.fVec)); }
+ Ni operator < (const SkNf& o) const { return vreinterpretq_s64_u64(vcltq_f64(fVec, o.fVec)); }
+ Ni operator > (const SkNf& o) const { return vreinterpretq_s64_u64(vcgtq_f64(fVec, o.fVec)); }
+ Ni operator <= (const SkNf& o) const { return vreinterpretq_s64_u64(vcleq_f64(fVec, o.fVec)); }
+ Ni operator >= (const SkNf& o) const { return vreinterpretq_s64_u64(vcgeq_f64(fVec, o.fVec)); }
+ Ni operator != (const SkNf& o) const {
+ return vreinterpretq_s64_u32(vmvnq_u32(vreinterpretq_u32_u64(vceqq_f64(fVec, o.fVec))));
+ }
+
+ static SkNf Min(const SkNf& l, const SkNf& r) { return vminq_f64(l.fVec, r.fVec); }
+ static SkNf Max(const SkNf& l, const SkNf& r) { return vmaxq_f64(l.fVec, r.fVec); }
+
+ SkNf sqrt() const { return vsqrtq_f64(fVec); }
+ SkNf rsqrt() const {
+ float64x2_t est0 = vrsqrteq_f64(fVec),
+ est1 = vmulq_f64(vrsqrtsq_f64(fVec, vmulq_f64(est0, est0)), est0);
+ return est1;
+ }
+
+ SkNf approxInvert() const {
+ float64x2_t est0 = vrecpeq_f64(fVec),
+ est1 = vmulq_f64(vrecpsq_f64(est0, fVec), est0);
+ return est1;
+ }
+
+ SkNf invert() const {
+ float64x2_t est1 = this->approxInvert().fVec,
+ est2 = vmulq_f64(vrecpsq_f64(est1, fVec), est1),
+ est3 = vmulq_f64(vrecpsq_f64(est2, fVec), est2);
+ return est3;
+ }
+
+ double operator[] (int k) const {
+ SkASSERT(0 <= k && k < 2);
+ return fVec[k];
+ }
+
+private:
+ float64x2_t fVec;
+};
+#endif//defined(SK_CPU_ARM64)
+
+template <>
+class SkNf<4, float> {
+ typedef SkNi<4, int32_t> Ni;
+public:
+ SkNf(float32x4_t vec) : fVec(vec) {}
+ float32x4_t vec() const { return fVec; }
+
+ SkNf() {}
+ explicit SkNf(float val) : fVec(vdupq_n_f32(val)) {}
+ static SkNf Load(const float vals[4]) { return vld1q_f32(vals); }
+ SkNf(float a, float b, float c, float d) { fVec = (float32x4_t) { a, b, c, d }; }
+
+ void store(float vals[4]) const { vst1q_f32(vals, fVec); }
+
+ SkNf approxInvert() const {
+ float32x4_t est0 = vrecpeq_f32(fVec),
+ est1 = vmulq_f32(vrecpsq_f32(est0, fVec), est0);
+ return est1;
+ }
+ SkNf invert() const {
+ float32x4_t est1 = this->approxInvert().fVec,
+ est2 = vmulq_f32(vrecpsq_f32(est1, fVec), est1);
+ return est2;
+ }
+
+ SkNf operator + (const SkNf& o) const { return vaddq_f32(fVec, o.fVec); }
+ SkNf operator - (const SkNf& o) const { return vsubq_f32(fVec, o.fVec); }
+ SkNf operator * (const SkNf& o) const { return vmulq_f32(fVec, o.fVec); }
+ SkNf operator / (const SkNf& o) const {
+ #if defined(SK_CPU_ARM64)
+ return vdivq_f32(fVec, o.fVec);
+ #else
+ return vmulq_f32(fVec, o.invert().fVec);
+ #endif
+ }
+
+ Ni operator == (const SkNf& o) const { return vreinterpretq_s32_u32(vceqq_f32(fVec, o.fVec)); }
+ Ni operator < (const SkNf& o) const { return vreinterpretq_s32_u32(vcltq_f32(fVec, o.fVec)); }
+ Ni operator > (const SkNf& o) const { return vreinterpretq_s32_u32(vcgtq_f32(fVec, o.fVec)); }
+ Ni operator <= (const SkNf& o) const { return vreinterpretq_s32_u32(vcleq_f32(fVec, o.fVec)); }
+ Ni operator >= (const SkNf& o) const { return vreinterpretq_s32_u32(vcgeq_f32(fVec, o.fVec)); }
+ Ni operator != (const SkNf& o) const {
+ return vreinterpretq_s32_u32(vmvnq_u32(vceqq_f32(fVec, o.fVec)));
+ }
+
+ static SkNf Min(const SkNf& l, const SkNf& r) { return vminq_f32(l.fVec, r.fVec); }
+ static SkNf Max(const SkNf& l, const SkNf& r) { return vmaxq_f32(l.fVec, r.fVec); }
+
+ SkNf rsqrt() const {
+ float32x4_t est0 = vrsqrteq_f32(fVec),
+ est1 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est0, est0)), est0);
+ return est1;
+ }
+
+ SkNf sqrt() const {
+ #if defined(SK_CPU_ARM64)
+ return vsqrtq_f32(fVec);
+ #else
+ float32x4_t est1 = this->rsqrt().fVec,
+ // An extra step of Newton's method to refine the estimate of 1/sqrt(this).
+ est2 = vmulq_f32(vrsqrtsq_f32(fVec, vmulq_f32(est1, est1)), est1);
+ return vmulq_f32(fVec, est2);
+ #endif
+ }
+
+ float operator[] (int k) const {
+ SkASSERT(0 <= k && k < 4);
+ return fVec[k];
+ }
+
+private:
+ float32x4_t fVec;
+};
+
+#endif//SkNx_neon_DEFINED
diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h
new file mode 100644
index 0000000000..87754ad155
--- /dev/null
+++ b/src/opts/SkNx_sse.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2015 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef SkNx_sse_DEFINED
+#define SkNx_sse_DEFINED
+
+// This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent.
+#include <immintrin.h>
+
+template <>
+class SkNi<2, int32_t> {
+public:
+ SkNi(const __m128i& vec) : fVec(vec) {}
+
+ SkNi() {}
+ bool allTrue() const { return 0xff == (_mm_movemask_epi8(fVec) & 0xff); }
+ bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(fVec) & 0xff); }
+
+private:
+ __m128i fVec;
+};
+
+template <>
+class SkNi<4, int32_t> {
+public:
+ SkNi(const __m128i& vec) : fVec(vec) {}
+
+ SkNi() {}
+ bool allTrue() const { return 0xffff == _mm_movemask_epi8(fVec); }
+ bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(fVec); }
+
+private:
+ __m128i fVec;
+};
+
+template <>
+class SkNi<2, int64_t> {
+public:
+ SkNi(const __m128i& vec) : fVec(vec) {}
+
+ SkNi() {}
+ bool allTrue() const { return 0xffff == _mm_movemask_epi8(fVec); }
+ bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(fVec); }
+
+private:
+ __m128i fVec;
+};
+
+
+template <>
+class SkNf<2, float> {
+ typedef SkNi<2, int32_t> Ni;
+public:
+ SkNf(const __m128& vec) : fVec(vec) {}
+
+ SkNf() {}
+ explicit SkNf(float val) : fVec(_mm_set1_ps(val)) {}
+ static SkNf Load(const float vals[2]) {
+ return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)vals));
+ }
+ SkNf(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {}
+
+ void store(float vals[2]) const { _mm_storel_pi((__m64*)vals, fVec); }
+
+ SkNf operator + (const SkNf& o) const { return _mm_add_ps(fVec, o.fVec); }
+ SkNf operator - (const SkNf& o) const { return _mm_sub_ps(fVec, o.fVec); }
+ SkNf operator * (const SkNf& o) const { return _mm_mul_ps(fVec, o.fVec); }
+ SkNf operator / (const SkNf& o) const { return _mm_div_ps(fVec, o.fVec); }
+
+ Ni operator == (const SkNf& o) const { return _mm_castps_si128(_mm_cmpeq_ps (fVec, o.fVec)); }
+ Ni operator != (const SkNf& o) const { return _mm_castps_si128(_mm_cmpneq_ps(fVec, o.fVec)); }
+ Ni operator < (const SkNf& o) const { return _mm_castps_si128(_mm_cmplt_ps (fVec, o.fVec)); }
+ Ni operator > (const SkNf& o) const { return _mm_castps_si128(_mm_cmpgt_ps (fVec, o.fVec)); }
+ Ni operator <= (const SkNf& o) const { return _mm_castps_si128(_mm_cmple_ps (fVec, o.fVec)); }
+ Ni operator >= (const SkNf& o) const { return _mm_castps_si128(_mm_cmpge_ps (fVec, o.fVec)); }
+
+ static SkNf Min(const SkNf& l, const SkNf& r) { return _mm_min_ps(l.fVec, r.fVec); }
+ static SkNf Max(const SkNf& l, const SkNf& r) { return _mm_max_ps(l.fVec, r.fVec); }
+
+ SkNf sqrt() const { return _mm_sqrt_ps (fVec); }
+ SkNf rsqrt() const { return _mm_rsqrt_ps(fVec); }
+
+ SkNf invert() const { return SkNf(1) / *this; }
+ SkNf approxInvert() const { return _mm_rcp_ps(fVec); }
+
+ float operator[] (int k) const {
+ SkASSERT(0 <= k && k < 2);
+ union { __m128 v; float fs[4]; } pun = {fVec};
+ return pun.fs[k];
+ }
+
+private:
+ __m128 fVec;
+};
+
+template <>
+class SkNf<2, double> {
+ typedef SkNi<2, int64_t> Ni;
+public:
+ SkNf(const __m128d& vec) : fVec(vec) {}
+
+ SkNf() {}
+ explicit SkNf(double val) : fVec( _mm_set1_pd(val) ) {}
+ static SkNf Load(const double vals[2]) { return _mm_loadu_pd(vals); }
+ SkNf(double a, double b) : fVec(_mm_setr_pd(a,b)) {}
+
+ void store(double vals[2]) const { _mm_storeu_pd(vals, fVec); }
+
+ SkNf operator + (const SkNf& o) const { return _mm_add_pd(fVec, o.fVec); }
+ SkNf operator - (const SkNf& o) const { return _mm_sub_pd(fVec, o.fVec); }
+ SkNf operator * (const SkNf& o) const { return _mm_mul_pd(fVec, o.fVec); }
+ SkNf operator / (const SkNf& o) const { return _mm_div_pd(fVec, o.fVec); }
+
+ Ni operator == (const SkNf& o) const { return _mm_castpd_si128(_mm_cmpeq_pd (fVec, o.fVec)); }
+ Ni operator != (const SkNf& o) const { return _mm_castpd_si128(_mm_cmpneq_pd(fVec, o.fVec)); }
+ Ni operator < (const SkNf& o) const { return _mm_castpd_si128(_mm_cmplt_pd (fVec, o.fVec)); }
+ Ni operator > (const SkNf& o) const { return _mm_castpd_si128(_mm_cmpgt_pd (fVec, o.fVec)); }
+ Ni operator <= (const SkNf& o) const { return _mm_castpd_si128(_mm_cmple_pd (fVec, o.fVec)); }
+ Ni operator >= (const SkNf& o) const { return _mm_castpd_si128(_mm_cmpge_pd (fVec, o.fVec)); }
+
+ static SkNf Min(const SkNf& l, const SkNf& r) { return _mm_min_pd(l.fVec, r.fVec); }
+ static SkNf Max(const SkNf& l, const SkNf& r) { return _mm_max_pd(l.fVec, r.fVec); }
+
+ SkNf sqrt() const { return _mm_sqrt_pd(fVec); }
+ SkNf rsqrt() const { return _mm_cvtps_pd(_mm_rsqrt_ps(_mm_cvtpd_ps(fVec))); }
+
+ SkNf invert() const { return SkNf(1) / *this; }
+ SkNf approxInvert() const { return _mm_cvtps_pd(_mm_rcp_ps(_mm_cvtpd_ps(fVec))); }
+
+ double operator[] (int k) const {
+ SkASSERT(0 <= k && k < 2);
+ union { __m128d v; double ds[2]; } pun = {fVec};
+ return pun.ds[k];
+ }
+
+private:
+ __m128d fVec;
+};
+
+template <>
+class SkNf<4, float> {
+ typedef SkNi<4, int32_t> Ni;
+public:
+ SkNf(const __m128& vec) : fVec(vec) {}
+ __m128 vec() const { return fVec; }
+
+ SkNf() {}
+ explicit SkNf(float val) : fVec( _mm_set1_ps(val) ) {}
+ static SkNf Load(const float vals[4]) { return _mm_loadu_ps(vals); }
+ SkNf(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
+
+ void store(float vals[4]) const { _mm_storeu_ps(vals, fVec); }
+
+ SkNf operator + (const SkNf& o) const { return _mm_add_ps(fVec, o.fVec); }
+ SkNf operator - (const SkNf& o) const { return _mm_sub_ps(fVec, o.fVec); }
+ SkNf operator * (const SkNf& o) const { return _mm_mul_ps(fVec, o.fVec); }
+ SkNf operator / (const SkNf& o) const { return _mm_div_ps(fVec, o.fVec); }
+
+ Ni operator == (const SkNf& o) const { return _mm_castps_si128(_mm_cmpeq_ps (fVec, o.fVec)); }
+ Ni operator != (const SkNf& o) const { return _mm_castps_si128(_mm_cmpneq_ps(fVec, o.fVec)); }
+ Ni operator < (const SkNf& o) const { return _mm_castps_si128(_mm_cmplt_ps (fVec, o.fVec)); }
+ Ni operator > (const SkNf& o) const { return _mm_castps_si128(_mm_cmpgt_ps (fVec, o.fVec)); }
+ Ni operator <= (const SkNf& o) const { return _mm_castps_si128(_mm_cmple_ps (fVec, o.fVec)); }
+ Ni operator >= (const SkNf& o) const { return _mm_castps_si128(_mm_cmpge_ps (fVec, o.fVec)); }
+
+ static SkNf Min(const SkNf& l, const SkNf& r) { return _mm_min_ps(l.fVec, r.fVec); }
+ static SkNf Max(const SkNf& l, const SkNf& r) { return _mm_max_ps(l.fVec, r.fVec); }
+
+ SkNf sqrt() const { return _mm_sqrt_ps (fVec); }
+ SkNf rsqrt() const { return _mm_rsqrt_ps(fVec); }
+
+ SkNf invert() const { return SkNf(1) / *this; }
+ SkNf approxInvert() const { return _mm_rcp_ps(fVec); }
+
+ float operator[] (int k) const {
+ SkASSERT(0 <= k && k < 4);
+ union { __m128 v; float fs[4]; } pun = {fVec};
+ return pun.fs[k];
+ }
+
+private:
+ __m128 fVec;
+};
+
+
+#endif//SkNx_sse_DEFINED
diff --git a/src/opts/SkPMFloat_SSE2.h b/src/opts/SkPMFloat_SSE2.h
index fa920d75d6..231940d86e 100644
--- a/src/opts/SkPMFloat_SSE2.h
+++ b/src/opts/SkPMFloat_SSE2.h
@@ -5,11 +5,6 @@
* found in the LICENSE file.
*/
-inline SkPMFloat& SkPMFloat::operator=(const SkPMFloat& that) {
- fColors = that.fColors;
- return *this;
-}
-
// For SkPMFloat(SkPMColor), we widen our 8 bit components (fix8) to 8-bit components in 16 bits
// (fix8_16), then widen those to 8-bit-in-32-bits (fix8_32), and finally convert those to floats.
@@ -33,7 +28,7 @@ inline SkPMColor SkPMFloat::get() const {
inline SkPMColor SkPMFloat::clamped() const {
// We don't use _mm_cvtps_epi32, because we want precise control over how 0.5 rounds (up).
- __m128i fix8_32 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), fColors)),
+ __m128i fix8_32 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), fColors.vec())),
fix8_16 = _mm_packus_epi16(fix8_32, fix8_32),
fix8 = _mm_packus_epi16(fix8_16, fix8_16);
SkPMColor c = _mm_cvtsi128_si32(fix8);
@@ -43,7 +38,7 @@ inline SkPMColor SkPMFloat::clamped() const {
inline SkPMColor SkPMFloat::trunc() const {
// Basically, same as clamped(), but no rounding.
- __m128i fix8_32 = _mm_cvttps_epi32(fColors),
+ __m128i fix8_32 = _mm_cvttps_epi32(fColors.vec()),
fix8_16 = _mm_packus_epi16(fix8_32, fix8_32),
fix8 = _mm_packus_epi16(fix8_16, fix8_16);
SkPMColor c = _mm_cvtsi128_si32(fix8);
@@ -72,10 +67,10 @@ inline void SkPMFloat::ClampTo4PMColors(
SkPMColor colors[4]) {
// Same as _SSSE3.h's. We use 3 _mm_packus_epi16() where the naive loop uses 8.
// We don't use _mm_cvtps_epi32, because we want precise control over how 0.5 rounds (up).
- __m128i c0 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), a.fColors)),
- c1 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), b.fColors)),
- c2 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), c.fColors)),
- c3 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), d.fColors));
+ __m128i c0 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), a.fColors.vec())),
+ c1 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), b.fColors.vec())),
+ c2 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), c.fColors.vec())),
+ c3 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), d.fColors.vec()));
__m128i c3210 = _mm_packus_epi16(_mm_packus_epi16(c0, c1),
_mm_packus_epi16(c2, c3));
_mm_storeu_si128((__m128i*)colors, c3210);
diff --git a/src/opts/SkPMFloat_SSSE3.h b/src/opts/SkPMFloat_SSSE3.h
index 6ff6929d01..390c71ce3f 100644
--- a/src/opts/SkPMFloat_SSSE3.h
+++ b/src/opts/SkPMFloat_SSSE3.h
@@ -5,11 +5,6 @@
* found in the LICENSE file.
*/
-inline SkPMFloat& SkPMFloat::operator=(const SkPMFloat& that) {
- fColors = that.fColors;
- return *this;
-}
-
// For SkPMFloat(SkPMColor), we widen our 8 bit components (fix8) to 8-bit components in 32 bits
// (fix8_32), then convert those to floats.
@@ -29,7 +24,7 @@ inline SkPMFloat::SkPMFloat(SkPMColor c) {
inline SkPMColor SkPMFloat::trunc() const {
const int _ = 255; // _ means to zero that byte.
- __m128i fix8_32 = _mm_cvttps_epi32(fColors),
+ __m128i fix8_32 = _mm_cvttps_epi32(fColors.vec()),
fix8 = _mm_shuffle_epi8(fix8_32, _mm_set_epi8(_,_,_,_, _,_,_,_, _,_,_,_, 12,8,4,0));
SkPMColor c = _mm_cvtsi128_si32(fix8);
SkPMColorAssert(c);
@@ -38,12 +33,12 @@ inline SkPMColor SkPMFloat::trunc() const {
inline SkPMColor SkPMFloat::get() const {
SkASSERT(this->isValid());
- return SkPMFloat(Sk4f(0.5f) + *this).trunc();
+ return SkPMFloat(Sk4s(0.5f) + *this).trunc();
}
inline SkPMColor SkPMFloat::clamped() const {
// We don't use _mm_cvtps_epi32, because we want precise control over how 0.5 rounds (up).
- __m128i fix8_32 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), fColors)),
+ __m128i fix8_32 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), fColors.vec())),
fix8_16 = _mm_packus_epi16(fix8_32, fix8_32),
fix8 = _mm_packus_epi16(fix8_16, fix8_16);
SkPMColor c = _mm_cvtsi128_si32(fix8);
@@ -75,10 +70,10 @@ inline void SkPMFloat::ClampTo4PMColors(
SkPMColor colors[4]) {
// Same as _SSE2.h's. We use 3 _mm_packus_epi16() where the naive loop uses 8.
// We don't use _mm_cvtps_epi32, because we want precise control over how 0.5 rounds (up).
- __m128i c0 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), a.fColors)),
- c1 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), b.fColors)),
- c2 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), c.fColors)),
- c3 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), d.fColors));
+ __m128i c0 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), a.fColors.vec())),
+ c1 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), b.fColors.vec())),
+ c2 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), c.fColors.vec())),
+ c3 = _mm_cvttps_epi32(_mm_add_ps(_mm_set1_ps(0.5f), d.fColors.vec()));
__m128i c3210 = _mm_packus_epi16(_mm_packus_epi16(c0, c1),
_mm_packus_epi16(c2, c3));
_mm_storeu_si128((__m128i*)colors, c3210);
diff --git a/src/opts/SkPMFloat_neon.h b/src/opts/SkPMFloat_neon.h
index e5b16f5e6f..41c553ebb9 100644
--- a/src/opts/SkPMFloat_neon.h
+++ b/src/opts/SkPMFloat_neon.h
@@ -5,11 +5,6 @@
* found in the LICENSE file.
*/
-inline SkPMFloat& SkPMFloat::operator=(const SkPMFloat& that) {
- fColors = that.fColors;
- return *this;
-}
-
// For SkPMFloat(SkPMFColor), we widen our 8 bit components (fix8) to 8-bit components in 16 bits
// (fix8_16), then widen those to 8-bit-in-32-bits (fix8_32), and finally convert those to floats.
@@ -27,7 +22,7 @@ inline SkPMFloat::SkPMFloat(SkPMColor c) {
}
inline SkPMColor SkPMFloat::trunc() const {
- uint32x4_t fix8_32 = vcvtq_u32_f32(fColors); // vcvtq_u32_f32 truncates
+ uint32x4_t fix8_32 = vcvtq_u32_f32(fColors.vec()); // vcvtq_u32_f32 truncates
uint16x4_t fix8_16 = vmovn_u32(fix8_32);
uint8x8_t fix8 = vmovn_u16(vcombine_u16(fix8_16, vdup_n_u16(0)));
SkPMColor c = vget_lane_u32((uint32x2_t)fix8, 0);
@@ -37,11 +32,11 @@ inline SkPMColor SkPMFloat::trunc() const {
inline SkPMColor SkPMFloat::get() const {
SkASSERT(this->isValid());
- return SkPMFloat(Sk4f(0.5f) + *this).trunc();
+ return SkPMFloat(Sk4s(0.5f) + *this).trunc();
}
inline SkPMColor SkPMFloat::clamped() const {
- float32x4_t add_half = vaddq_f32(fColors, vdupq_n_f32(0.5f));
+ float32x4_t add_half = vaddq_f32(fColors.vec(), vdupq_n_f32(0.5f));
uint32x4_t fix8_32 = vcvtq_u32_f32(add_half); // vcvtq_u32_f32 truncates, so round manually
uint16x4_t fix8_16 = vqmovn_u32(fix8_32);
uint8x8_t fix8 = vqmovn_u16(vcombine_u16(fix8_16, vdup_n_u16(0)));
diff --git a/src/opts/SkPMFloat_none.h b/src/opts/SkPMFloat_none.h
index 86516b1875..19557f11de 100644
--- a/src/opts/SkPMFloat_none.h
+++ b/src/opts/SkPMFloat_none.h
@@ -5,11 +5,6 @@
* found in the LICENSE file.
*/
-inline SkPMFloat& SkPMFloat::operator=(const SkPMFloat& that) {
- for (int i = 0; i < 4; i++) { fColor[i] = that.fColor[i]; }
- return *this;
-}
-
inline SkPMFloat::SkPMFloat(SkPMColor c) {
*this = SkPMFloat::FromARGB(SkGetPackedA32(c),
SkGetPackedR32(c),