aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2016-02-11 06:30:03 -0800
committerGravatar Commit bot <commit-bot@chromium.org>2016-02-11 06:30:03 -0800
commitfff055cc5f9ca5015678f4f13a4f842084bd62d5 (patch)
treef7e00567455fbd81ab5c1b401e4e80ed52a2095e
parentcbefc5e4ca7fd7aaa5d2a3aa85b30f16148c3d2f (diff)
SkHalfToFloat_01 / SkFloatToHalf_01
These are basically inlined, 4-at-a-time versions of our existing functions, but cut down to avoid any work that's only necessary outside [0,1]. Both f16 and f32 denorms should work fine modulo the usual ARMv7 NEON denorm==zero caveat. In exchange for a little speed, f32->f16 does not round properly. Instead it truncates, so it's never off by more than 1 bit. Support for finite values >1 or <0 is straightforward to add back. >1 might already work as-is. Getting close to _u16 performance: micros bench 261.13 xferu64_bw_1_opaque_u16 1833.51 xferu64_bw_1_alpha_u16 2762.32 ? xferu64_aa_1_opaque_u16 3334.29 xferu64_aa_1_alpha_u16 249.78 xferu64_bw_1_opaque_f16 3383.18 xferu64_bw_1_alpha_f16 4214.72 xferu64_aa_1_opaque_f16 4701.19 xferu64_aa_1_alpha_f16 BUG=skia: GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1685133005 Committed: https://skia.googlesource.com/skia/+/9ea11a4235b3e3521cc8bf914a27c2d0dc062db9 CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot Review URL: https://codereview.chromium.org/1685133005
-rw-r--r--src/core/SkHalf.h63
-rw-r--r--src/core/SkXfermodeU64.cpp25
-rw-r--r--tests/Float16Test.cpp35
3 files changed, 100 insertions, 23 deletions
diff --git a/src/core/SkHalf.h b/src/core/SkHalf.h
index 7e41c6ff0c..3937343b6c 100644
--- a/src/core/SkHalf.h
+++ b/src/core/SkHalf.h
@@ -8,6 +8,7 @@
#ifndef SkHalf_DEFINED
#define SkHalf_DEFINED
+#include "SkNx.h"
#include "SkTypes.h"
// 16-bit floating point value
@@ -23,4 +24,66 @@ typedef uint16_t SkHalf;
float SkHalfToFloat(SkHalf h);
SkHalf SkFloatToHalf(float f);
+// Convert between half and single precision floating point, but pull any dirty
+// trick we can to make it faster as long as it's correct enough for values in [0,1].
+static inline Sk4f SkHalfToFloat_01(uint64_t);
+static inline uint64_t SkFloatToHalf_01(const Sk4f&);
+
+// ~~~~~~~~~~~ impl ~~~~~~~~~~~~~~ //
+
+// Like the serial versions in SkHalf.cpp, these are based on
+// https://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/
+
+// TODO: NEON versions
+static inline Sk4f SkHalfToFloat_01(uint64_t hs) {
+#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ // Load our 16-bit floats into the bottom 16 bits of each 32-bit lane, with zeroes on top.
+ __m128i h = _mm_unpacklo_epi16(_mm_loadl_epi64((const __m128i*)&hs), _mm_setzero_si128());
+
+ // Fork into two paths, depending on whether the 16-bit float is denormalized.
+ __m128 is_denorm = _mm_castsi128_ps(_mm_cmplt_epi32(h, _mm_set1_epi32(0x0400)));
+
+ // TODO: figure out, explain
+ const __m128 half = _mm_set1_ps(0.5f);
+ __m128 denorm = _mm_sub_ps(_mm_or_ps(_mm_castsi128_ps(h), half), half);
+
+ // If we're normalized, just shift ourselves so the exponent/mantissa dividing line
+ // is correct, then re-bias the exponent from 15 to 127.
+ __m128 norm = _mm_castsi128_ps(_mm_add_epi32(_mm_slli_epi32(h, 13),
+ _mm_set1_epi32((127-15) << 23)));
+
+ return _mm_or_ps(_mm_and_ps (is_denorm, denorm),
+ _mm_andnot_ps(is_denorm, norm));
+#else
+ float fs[4];
+ for (int i = 0; i < 4; i++) {
+ fs[i] = SkHalfToFloat(hs >> (i*16));
+ }
+ return Sk4f::Load(fs);
+#endif
+}
+
+static inline uint64_t SkFloatToHalf_01(const Sk4f& fs) {
+#if !defined(SKNX_NO_SIMD) && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE2
+ // Scale our floats down by a tiny power of 2 to pull up our mantissa bits,
+ // then shift back down to 16-bit float layout. This doesn't round, so can be 1 bit small.
+ // TODO: understand better. Why this scale factor?
+ const __m128 scale = _mm_castsi128_ps(_mm_set1_epi32(15 << 23));
+ __m128i h = _mm_srli_epi32(_mm_castps_si128(_mm_mul_ps(fs.fVec, scale)), 13);
+
+ uint64_t r;
+ _mm_storel_epi64((__m128i*)&r, _mm_packs_epi32(h,h));
+ return r;
+#else
+ SkHalf hs[4];
+ for (int i = 0; i < 4; i++) {
+ hs[i] = SkFloatToHalf(fs[i]);
+ }
+ return (uint64_t)hs[3] << 48
+ | (uint64_t)hs[2] << 32
+ | (uint64_t)hs[1] << 16
+ | (uint64_t)hs[0] << 0;
+#endif
+}
+
#endif
diff --git a/src/core/SkXfermodeU64.cpp b/src/core/SkXfermodeU64.cpp
index 6435e6a2e2..6de5f1b09f 100644
--- a/src/core/SkXfermodeU64.cpp
+++ b/src/core/SkXfermodeU64.cpp
@@ -46,36 +46,15 @@ static uint64_t store_to_u16(const Sk4f& x4) {
return value;
}
-static Sk4f load_from_f16(uint64_t value) {
- const uint16_t* u16 = reinterpret_cast<const uint16_t*>(&value);
- float f4[4];
- for (int i = 0; i < 4; ++i) {
- f4[i] = SkHalfToFloat(u16[i]);
- }
- return Sk4f::Load(f4);
-}
-
-static uint64_t store_to_f16(const Sk4f& x4) {
- uint64_t value;
- uint16_t* u16 = reinterpret_cast<uint16_t*>(&value);
-
- float f4[4];
- x4.store(f4);
- for (int i = 0; i < 4; ++i) {
- u16[i] = SkFloatToHalf(f4[i]);
- }
- return value;
-}
-
// Returns dst in its "natural" bias (either unit-float or 16bit int)
//
template <DstType D> Sk4f load_from_dst(uint64_t dst) {
- return (D == kU16_Dst) ? load_from_u16(dst) : load_from_f16(dst);
+ return (D == kU16_Dst) ? load_from_u16(dst) : SkHalfToFloat_01(dst);
}
// Assumes x4 is already in the "natural" bias (either unit-float or 16bit int)
template <DstType D> uint64_t store_to_dst(const Sk4f& x4) {
- return (D == kU16_Dst) ? store_to_u16(x4) : store_to_f16(x4);
+ return (D == kU16_Dst) ? store_to_u16(x4) : SkFloatToHalf_01(x4);
}
///////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/tests/Float16Test.cpp b/tests/Float16Test.cpp
index f437268c79..f96d904567 100644
--- a/tests/Float16Test.cpp
+++ b/tests/Float16Test.cpp
@@ -10,6 +10,7 @@
#include "SkHalf.h"
#include "SkOpts.h"
#include "SkPixmap.h"
+#include "SkRandom.h"
static bool eq_within_half_float(float a, float b) {
const float kTolerance = 1.0f / (1 << (8 + 10));
@@ -64,3 +65,37 @@ DEF_TEST(float_to_half, reporter) {
SkOpts::half_to_float(fscratch, hs, 7);
REPORTER_ASSERT(reporter, 0 == memcmp(fscratch, fs, sizeof(fs)));
}
+
+DEF_TEST(HalfToFloat_01, r) {
+ for (uint16_t h = 0; h < 0x8000; h++) {
+ float f = SkHalfToFloat(h);
+ if (f >= 0 && f <= 1) {
+ REPORTER_ASSERT(r, SkHalfToFloat_01(h)[0] == f);
+ REPORTER_ASSERT(r, SkFloatToHalf_01(SkHalfToFloat_01(h)) == h);
+ }
+ }
+}
+
+DEF_TEST(FloatToHalf_01, r) {
+#if 0
+ for (uint32_t bits = 0; bits < 0x80000000; bits++) {
+#else
+ SkRandom rand;
+ for (int i = 0; i < 1000000; i++) {
+ uint32_t bits = rand.nextU();
+#endif
+ float f;
+ memcpy(&f, &bits, 4);
+ if (f >= 0 && f <= 1) {
+ uint16_t h1 = (uint16_t)SkFloatToHalf_01(Sk4f(f,0,0,0)),
+ h2 = SkFloatToHalf(f);
+ bool ok = (h1 == h2 || h1 == h2-1);
+ REPORTER_ASSERT(r, ok);
+ if (!ok) {
+ SkDebugf("%08x (%d) -> %04x (%d), want %04x (%d)\n",
+ bits, bits>>23, h1, h1>>10, h2, h2>>10);
+ break;
+ }
+ }
+ }
+}