aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--src/core/Sk4px.h6
-rw-r--r--src/opts/Sk4px_NEON.h6
-rw-r--r--src/opts/Sk4px_SSE2.h9
-rw-r--r--src/opts/Sk4px_none.h6
4 files changed, 22 insertions, 5 deletions
diff --git a/src/core/Sk4px.h b/src/core/Sk4px.h
index a7f5c9f4c6..3755488a4a 100644
--- a/src/core/Sk4px.h
+++ b/src/core/Sk4px.h
@@ -66,11 +66,7 @@ public:
Sk4px addNarrowHi(const Sk16h&) const;
// Rounds, i.e. (x+127) / 255.
- Sk4px div255() const {
- // Calculated as ((x+128) + ((x+128)>>8)) >> 8.
- auto v = *this + Sk16h(128);
- return v.addNarrowHi(v >> 8);
- }
+ Sk4px div255() const;
// These just keep the types as Wide so the user doesn't have to keep casting.
Wide operator * (const Wide& o) const { return INHERITED::operator*(o); }
diff --git a/src/opts/Sk4px_NEON.h b/src/opts/Sk4px_NEON.h
index 89841d927e..c27bb13764 100644
--- a/src/opts/Sk4px_NEON.h
+++ b/src/opts/Sk4px_NEON.h
@@ -57,6 +57,12 @@ inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
vaddhn_u16(this->fHi.fVec, o.fHi.fVec)));
}
+inline Sk4px Sk4px::Wide::div255() const {
+ // Calculated as ((x+128) + ((x+128)>>8)) >> 8.
+ auto v = *this + Sk16h(128);
+ return v.addNarrowHi(v>>8);
+}
+
inline Sk4px Sk4px::alphas() const {
auto as = vshrq_n_u32((uint32x4_t)fVec, SK_A32_SHIFT); // ___3 ___2 ___1 ___0
return Sk16b((uint8x16_t)vmulq_n_u32(as, 0x01010101)); // 3333 2222 1111 0000
diff --git a/src/opts/Sk4px_SSE2.h b/src/opts/Sk4px_SSE2.h
index 9c3eb1210c..96f21db399 100644
--- a/src/opts/Sk4px_SSE2.h
+++ b/src/opts/Sk4px_SSE2.h
@@ -45,6 +45,15 @@ inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
return Sk4px(_mm_packus_epi16(r.fLo.fVec, r.fHi.fVec));
}
+inline Sk4px Sk4px::Wide::div255() const {
+ // (x + 127) / 255 == ((x+128) * 257)>>16,
+ // and _mm_mulhi_epu16 makes the (_ * 257)>>16 part very convenient.
+ const __m128i _128 = _mm_set1_epi16(128),
+ _257 = _mm_set1_epi16(257);
+ return Sk4px(_mm_packus_epi16(_mm_mulhi_epu16(_mm_add_epi16(fLo.fVec, _128), _257),
+ _mm_mulhi_epu16(_mm_add_epi16(fHi.fVec, _128), _257)));
+}
+
// Load4Alphas and Load2Alphas use possibly-unaligned loads (SkAlpha[] -> uint16_t or uint32_t).
// These are safe on x86, often with no speed penalty.
diff --git a/src/opts/Sk4px_none.h b/src/opts/Sk4px_none.h
index 540edb821d..efbd780c9f 100644
--- a/src/opts/Sk4px_none.h
+++ b/src/opts/Sk4px_none.h
@@ -62,6 +62,12 @@ inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
r.kth<12>(), r.kth<13>(), r.kth<14>(), r.kth<15>());
}
+inline Sk4px Sk4px::Wide::div255() const {
+ // Calculated as ((x+128) + ((x+128)>>8)) >> 8.
+ auto v = *this + Sk16h(128);
+ return v.addNarrowHi(v>>8);
+}
+
inline Sk4px Sk4px::alphas() const {
static_assert(SK_A32_SHIFT == 24, "This method assumes little-endian.");
return Sk16b(this->kth< 3>(), this->kth< 3>(), this->kth< 3>(), this->kth< 3>(),