aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/opts
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2015-11-17 14:19:52 -0800
committerGravatar Commit bot <commit-bot@chromium.org>2015-11-17 14:19:52 -0800
commitcbf4fba43933302a846872e4c5ce8f1adb8b325e (patch)
tree96dad6cc0a2241544a0cf52cccdc7a0fbe89f9b1 /src/opts
parent56847a65648af4d06da9c26c55242949a1bf31ab (diff)
div255(x) as ((x+128)*257)>>16 with SSE
_mm_mulhi_epu16 makes the (...*257)>>16 part simple. This seems to speed up every transfermode that uses div255(), in the 7-25% range. It even appears to obviate the need for approxMulDiv255() on SSE. I'm not sure about NEON yet, so I'll keep approxMulDiv255() for now. Should be no pixels change: https://gold.skia.org/search2?issue=1452903004&unt=true&query=source_type%3Dgm&master=false BUG=skia: CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot Review URL: https://codereview.chromium.org/1452903004
Diffstat (limited to 'src/opts')
-rw-r--r--src/opts/Sk4px_NEON.h6
-rw-r--r--src/opts/Sk4px_SSE2.h9
-rw-r--r--src/opts/Sk4px_none.h6
3 files changed, 21 insertions, 0 deletions
diff --git a/src/opts/Sk4px_NEON.h b/src/opts/Sk4px_NEON.h
index 89841d927e..c27bb13764 100644
--- a/src/opts/Sk4px_NEON.h
+++ b/src/opts/Sk4px_NEON.h
@@ -57,6 +57,12 @@ inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
vaddhn_u16(this->fHi.fVec, o.fHi.fVec)));
}
+inline Sk4px Sk4px::Wide::div255() const {
+ // Calculated as ((x+128) + ((x+128)>>8)) >> 8.
+ auto v = *this + Sk16h(128);
+ return v.addNarrowHi(v>>8);
+}
+
inline Sk4px Sk4px::alphas() const {
auto as = vshrq_n_u32((uint32x4_t)fVec, SK_A32_SHIFT); // ___3 ___2 ___1 ___0
return Sk16b((uint8x16_t)vmulq_n_u32(as, 0x01010101)); // 3333 2222 1111 0000
diff --git a/src/opts/Sk4px_SSE2.h b/src/opts/Sk4px_SSE2.h
index 9c3eb1210c..96f21db399 100644
--- a/src/opts/Sk4px_SSE2.h
+++ b/src/opts/Sk4px_SSE2.h
@@ -45,6 +45,15 @@ inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
return Sk4px(_mm_packus_epi16(r.fLo.fVec, r.fHi.fVec));
}
+inline Sk4px Sk4px::Wide::div255() const {
+ // (x + 127) / 255 == ((x+128) * 257)>>16,
+ // and _mm_mulhi_epu16 makes the (_ * 257)>>16 part very convenient.
+ const __m128i _128 = _mm_set1_epi16(128),
+ _257 = _mm_set1_epi16(257);
+ return Sk4px(_mm_packus_epi16(_mm_mulhi_epu16(_mm_add_epi16(fLo.fVec, _128), _257),
+ _mm_mulhi_epu16(_mm_add_epi16(fHi.fVec, _128), _257)));
+}
+
// Load4Alphas and Load2Alphas use possibly-unaligned loads (SkAlpha[] -> uint16_t or uint32_t).
// These are safe on x86, often with no speed penalty.
diff --git a/src/opts/Sk4px_none.h b/src/opts/Sk4px_none.h
index 540edb821d..efbd780c9f 100644
--- a/src/opts/Sk4px_none.h
+++ b/src/opts/Sk4px_none.h
@@ -62,6 +62,12 @@ inline Sk4px Sk4px::Wide::addNarrowHi(const Sk16h& other) const {
r.kth<12>(), r.kth<13>(), r.kth<14>(), r.kth<15>());
}
+inline Sk4px Sk4px::Wide::div255() const {
+ // Calculated as ((x+128) + ((x+128)>>8)) >> 8.
+ auto v = *this + Sk16h(128);
+ return v.addNarrowHi(v>>8);
+}
+
inline Sk4px Sk4px::alphas() const {
static_assert(SK_A32_SHIFT == 24, "This method assumes little-endian.");
return Sk16b(this->kth< 3>(), this->kth< 3>(), this->kth< 3>(), this->kth< 3>(),