aboutsummaryrefslogtreecommitdiffhomepage
path: root/src
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2015-04-14 14:02:52 -0700
committerGravatar Commit bot <commit-bot@chromium.org>2015-04-14 14:02:52 -0700
commit115acee9386e685f9a5938fb2cf13fd5a475012a (patch)
treec15cc68b8505b559f65b2f361dc6372bee725ea0 /src
parenta669bc37c613a18a07a879eb791253e5246f455a (diff)
Sk4h and Sk8h for SSE
These will underly the SkPMFloat-like class for uint16_t components. Sk4h will back a single-pixel version, and Sk8h any larger number than that. BUG=skia: Review URL: https://codereview.chromium.org/1088883005
Diffstat (limited to 'src')
-rw-r--r--src/core/SkNx.h69
-rw-r--r--src/opts/SkNx_sse.h54
2 files changed, 123 insertions, 0 deletions
diff --git a/src/core/SkNx.h b/src/core/SkNx.h
index c35d8fe5d3..8244e9026c 100644
--- a/src/core/SkNx.h
+++ b/src/core/SkNx.h
@@ -36,6 +36,45 @@ private:
};
template <int N, typename T>
+class SkNi {
+public:
+ SkNi() {}
+ explicit SkNi(T val) : fLo(val), fHi(val) {}
+ static SkNi Load(const T vals[N]) {
+ return SkNi(SkNi<N/2,T>::Load(vals), SkNi<N/2,T>::Load(vals+N/2));
+ }
+
+ SkNi(T a, T b) : fLo(a), fHi(b) { REQUIRE(N==2); }
+ SkNi(T a, T b, T c, T d) : fLo(a,b), fHi(c,d) { REQUIRE(N==4); }
+ SkNi(T a, T b, T c, T d, T e, T f, T g, T h) : fLo(a,b,c,d), fHi(e,f,g,h) { REQUIRE(N==8); }
+
+ void store(T vals[N]) const {
+ fLo.store(vals);
+ fHi.store(vals+N/2);
+ }
+
+ SkNi operator + (const SkNi& o) const { return SkNi(fLo + o.fLo, fHi + o.fHi); }
+ SkNi operator - (const SkNi& o) const { return SkNi(fLo - o.fLo, fHi - o.fHi); }
+ SkNi operator * (const SkNi& o) const { return SkNi(fLo * o.fLo, fHi * o.fHi); }
+
+ SkNi operator << (int bits) const { return SkNi(fLo << bits, fHi << bits); }
+ SkNi operator >> (int bits) const { return SkNi(fLo >> bits, fHi >> bits); }
+
+ // TODO: comparisons, min, max?
+
+ template <int k> T kth() const {
+ SkASSERT(0 <= k && k < N);
+ return k < N/2 ? fLo.template kth<k>() : fHi.template kth<k-N/2>();
+ }
+
+private:
+ REQUIRE(0 == (N & (N-1)));
+ SkNi(const SkNi<N/2, T>& lo, const SkNi<N/2, T>& hi) : fLo(lo), fHi(hi) {}
+
+ SkNi<N/2, T> fLo, fHi;
+};
+
+template <int N, typename T>
class SkNf {
typedef SkNb<N, sizeof(T)> Nb;
public:
@@ -106,6 +145,31 @@ private:
};
template <typename T>
+class SkNi<1,T> {
+public:
+ SkNi() {}
+ explicit SkNi(T val) : fVal(val) {}
+ static SkNi Load(const T vals[1]) { return SkNi(vals[0]); }
+
+ void store(T vals[1]) const { vals[0] = fVal; }
+
+ SkNi operator + (const SkNi& o) const { return SkNi(fVal + o.fVal); }
+ SkNi operator - (const SkNi& o) const { return SkNi(fVal - o.fVal); }
+ SkNi operator * (const SkNi& o) const { return SkNi(fVal * o.fVal); }
+
+ SkNi operator << (int bits) const { return SkNi(fVal << bits); }
+ SkNi operator >> (int bits) const { return SkNi(fVal >> bits); }
+
+ template <int k> T kth() const {
+ SkASSERT(0 == k);
+ return fVal;
+ }
+
+private:
+ T fVal;
+};
+
+template <typename T>
class SkNf<1,T> {
typedef SkNb<1, sizeof(T)> Nb;
public:
@@ -159,6 +223,8 @@ template <typename L, typename R> L& operator -= (L& l, const R& r) { return (l
template <typename L, typename R> L& operator *= (L& l, const R& r) { return (l = l * r); }
template <typename L, typename R> L& operator /= (L& l, const R& r) { return (l = l / r); }
+template <typename L> L& operator <<= (L& l, int bits) { return (l = l << bits); }
+template <typename L> L& operator >>= (L& l, int bits) { return (l = l >> bits); }
// Include platform specific specializations if available.
#ifndef SKNX_NO_SIMD
@@ -179,4 +245,7 @@ typedef SkNf<4, float> Sk4f;
typedef SkNf<4, double> Sk4d;
typedef SkNf<4, SkScalar> Sk4s;
+typedef SkNi<4, uint16_t> Sk4h;
+typedef SkNi<8, uint16_t> Sk8h;
+
#endif//SkNx_DEFINED
diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h
index a63cd840c0..46ddcb2d12 100644
--- a/src/opts/SkNx_sse.h
+++ b/src/opts/SkNx_sse.h
@@ -185,5 +185,59 @@ protected:
__m128 fVec;
};
+template <>
+class SkNi<4, uint16_t> {
+public:
+ SkNi(const __m128i& vec) : fVec(vec) {}
+
+ SkNi() {}
+ explicit SkNi(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
+ static SkNi Load(const uint16_t vals[4]) { return _mm_loadl_epi64((const __m128i*)vals); }
+ SkNi(uint16_t a, uint16_t b, uint16_t c, uint16_t d) : fVec(_mm_setr_epi16(a,b,c,d,0,0,0,0)) {}
+
+ void store(uint16_t vals[4]) const { _mm_storel_epi64((__m128i*)vals, fVec); }
+
+ SkNi operator + (const SkNi& o) const { return _mm_add_epi16(fVec, o.fVec); }
+ SkNi operator - (const SkNi& o) const { return _mm_sub_epi16(fVec, o.fVec); }
+ SkNi operator * (const SkNi& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
+
+ SkNi operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
+ SkNi operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
+
+ template <int k> uint16_t kth() const {
+ SkASSERT(0 <= k && k < 4);
+ return _mm_extract_epi16(fVec, k);
+ }
+protected:
+ __m128i fVec;
+};
+
+template <>
+class SkNi<8, uint16_t> {
+public:
+ SkNi(const __m128i& vec) : fVec(vec) {}
+
+ SkNi() {}
+ explicit SkNi(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
+ static SkNi Load(const uint16_t vals[8]) { return _mm_loadu_si128((const __m128i*)vals); }
+ SkNi(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
+ uint16_t e, uint16_t f, uint16_t g, uint16_t h) : fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {}
+
+ void store(uint16_t vals[8]) const { _mm_storeu_si128((__m128i*)vals, fVec); }
+
+ SkNi operator + (const SkNi& o) const { return _mm_add_epi16(fVec, o.fVec); }
+ SkNi operator - (const SkNi& o) const { return _mm_sub_epi16(fVec, o.fVec); }
+ SkNi operator * (const SkNi& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
+
+ SkNi operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
+ SkNi operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
+
+ template <int k> uint16_t kth() const {
+ SkASSERT(0 <= k && k < 8);
+ return _mm_extract_epi16(fVec, k);
+ }
+protected:
+ __m128i fVec;
+};
#endif//SkNx_sse_DEFINED