aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/opts
diff options
context:
space:
mode:
authorGravatar Herb Derby <herb@google.com>2017-11-02 13:18:38 -0400
committerGravatar Skia Commit-Bot <skia-commit-bot@chromium.org>2017-11-02 19:34:11 +0000
commitd1b3c7846d7219c902919576ae3d468e9d55f475 (patch)
tree0deacfa2d3289d53be80863e42ebdefed0789a7a /src/opts
parent9e43934a7270c413088c77f16fee727b40395471 (diff)
Support for direct gaussian blur evaluation
Change-Id: I1b00ba2720648b75fce47d3f4d0f56fb8f2cd171 Reviewed-on: https://skia-review.googlesource.com/67041 Reviewed-by: Mike Klein <mtklein@chromium.org> Commit-Queue: Herb Derby <herb@google.com>
Diffstat (limited to 'src/opts')
-rw-r--r--src/opts/SkNx_neon.h48
-rw-r--r--src/opts/SkNx_sse.h62
2 files changed, 110 insertions, 0 deletions
diff --git a/src/opts/SkNx_neon.h b/src/opts/SkNx_neon.h
index 32be78f66b..16a32e11a2 100644
--- a/src/opts/SkNx_neon.h
+++ b/src/opts/SkNx_neon.h
@@ -324,6 +324,13 @@ public:
return pun.us[k&7];
}
+ AI SkNx mulHi(const SkNx& m) const {
+ uint32x4_t hi = vmull_u16(vget_high_u16(fVec), vget_high_u16(m.fVec));
+ uint32x4_t lo = vmull_u16( vget_low_u16(fVec), vget_low_u16(m.fVec));
+
+ return { vcombine_u16(vshrn_n_u32(lo,16), vshrn_n_u32(hi,16)) };
+ }
+
AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
return vbslq_u16(fVec, t.fVec, e.fVec);
}
@@ -360,6 +367,30 @@ public:
};
template <>
+class SkNx<8, uint8_t> {
+public:
+ AI SkNx(const uint8x8_t& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint8_t val) : fVec(vdup_n_u8(val)) {}
+ AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
+ uint8_t e, uint8_t f, uint8_t g, uint8_t h) {
+ fVec = (uint8x8_t) { a,b,c,d, e,f,g,h };
+ }
+
+ AI static SkNx Load(const void* ptr) { return vld1_u8((const uint8_t*)ptr); }
+ AI void store(void* ptr) const { vst1_u8((uint8_t*)ptr, fVec); }
+
+ AI uint8_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 8);
+ union { uint8x8_t v; uint8_t us[8]; } pun = {fVec};
+ return pun.us[k&7];
+ }
+
+ uint8x8_t fVec;
+};
+
+template <>
class SkNx<16, uint8_t> {
public:
AI SkNx(const uint8x16_t& vec) : fVec(vec) {}
@@ -562,14 +593,31 @@ template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
(uint8x16_t)vcvtq_u32_f32(d.fVec)).val[0]).val[0];
}
+template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, int32_t>(const Sk8i& src) {
+ Sk4i a, b;
+ SkNx_split(src, &a, &b);
+ uint16x4_t a16 = vqmovun_s32(a.fVec);
+ uint16x4_t b16 = vqmovun_s32(b.fVec);
+
+ return vqmovn_u16(vcombine_u16(a16, b16));
+}
+
template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
return vget_low_u16(vmovl_u8(src.fVec));
}
+template<> AI /*static*/ Sk8h SkNx_cast<uint16_t, uint8_t>(const Sk8b& src) {
+ return vmovl_u8(src.fVec);
+}
+
template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
return vmovn_u16(vcombine_u16(src.fVec, src.fVec));
}
+template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, uint16_t>(const Sk8h& src) {
+ return vqmovn_u16(src.fVec);
+}
+
template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
uint16x4_t _16 = vqmovun_s32(src.fVec);
return vqmovn_u16(vcombine_u16(_16, _16));
diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h
index d4d4781e5a..ae340c260f 100644
--- a/src/opts/SkNx_sse.h
+++ b/src/opts/SkNx_sse.h
@@ -455,6 +455,10 @@ public:
_mm_sub_epi8(b.fVec, top_8x)));
}
+ AI SkNx mulHi(const SkNx& m) const {
+ return _mm_mulhi_epu16(fVec, m.fVec);
+ }
+
AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
_mm_andnot_si128(fVec, e.fVec));
@@ -492,6 +496,46 @@ public:
};
template <>
+class SkNx<8, uint8_t> {
+public:
+ AI SkNx(const __m128i& vec) : fVec(vec) {}
+
+ AI SkNx() {}
+ AI SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {}
+ AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
+ AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
+ uint8_t e, uint8_t f, uint8_t g, uint8_t h)
+ : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, 0,0,0,0, 0,0,0,0)) {}
+
+ AI void store(void* ptr) const {_mm_storel_epi64((__m128i*)ptr, fVec);}
+
+ AI SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); }
+
+ AI SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); }
+ AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); }
+
+ AI static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); }
+ AI SkNx operator < (const SkNx& o) const {
+ // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare.
+ auto flip = _mm_set1_epi8(char(0x80));
+ return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec));
+ }
+
+ AI uint8_t operator[](int k) const {
+ SkASSERT(0 <= k && k < 16);
+ union { __m128i v; uint8_t us[16]; } pun = {fVec};
+ return pun.us[k&15];
+ }
+
+ AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
+ return _mm_or_si128(_mm_and_si128 (fVec, t.fVec),
+ _mm_andnot_si128(fVec, e.fVec));
+ }
+
+ __m128i fVec;
+};
+
+template <>
class SkNx<16, uint8_t> {
public:
AI SkNx(const __m128i& vec) : fVec(vec) {}
@@ -536,6 +580,7 @@ public:
template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
return _mm_cvtepi32_ps(src.fVec);
}
+
template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
return SkNx_cast<float>(Sk4i::Load(&src));
}
@@ -597,6 +642,14 @@ template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
return _mm_cvtepi32_ps(_32);
}
+template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, int32_t>(const Sk8i& src) {
+ Sk4i lo, hi;
+ SkNx_split(src, &lo, &hi);
+
+ auto t = _mm_packs_epi32(lo.fVec, hi.fVec);
+ return _mm_packus_epi16(t, t);
+}
+
template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
Sk8f ab, cd;
SkNx_split(src, &ab, &cd);
@@ -615,14 +668,23 @@ template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
}
+template<> AI /*static*/ Sk8h SkNx_cast<uint16_t, uint8_t>(const Sk8b& src) {
+ return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
+}
+
template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
return _mm_packus_epi16(src.fVec, src.fVec);
}
+template<> AI /*static*/ Sk8b SkNx_cast<uint8_t, uint16_t>(const Sk8h& src) {
+ return _mm_packus_epi16(src.fVec, src.fVec);
+}
+
template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
}
+
template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec);
}