aboutsummaryrefslogtreecommitdiffhomepage
path: root/src/opts
diff options
context:
space:
mode:
authorGravatar mtklein <mtklein@chromium.org>2016-02-10 07:55:56 -0800
committerGravatar Commit bot <commit-bot@chromium.org>2016-02-10 07:55:56 -0800
commite5fe9a42d487c8648101c6f8454575d6de1acafa (patch)
treeab231feab7f5508fee03ad0e1e954a42d306107c /src/opts
parent36c070d82856bd79eaa13bd058af45e19f302b53 (diff)
Sk4f: floor() via int32_t roundtrip.
About 25% faster on both x86 and ARMv7. BUG=skia: GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1682953002 CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-SKNX_NO_SIMD-Trybot Review URL: https://codereview.chromium.org/1682953002
Diffstat (limited to 'src/opts')
-rw-r--r--src/opts/SkNx_neon.h13
-rw-r--r--src/opts/SkNx_sse.h13
2 files changed, 12 insertions, 14 deletions
diff --git a/src/opts/SkNx_neon.h b/src/opts/SkNx_neon.h
index cdc4615849..641e9d296b 100644
--- a/src/opts/SkNx_neon.h
+++ b/src/opts/SkNx_neon.h
@@ -11,14 +11,13 @@
#define SKNX_IS_FAST
// ARMv8 has vrndmq_f32 to floor 4 floats. Here we emulate it:
-// - round by adding (1<<23) with our sign, then subtracting it;
-// - if that rounded value is bigger than our input, subtract 1.
+// - roundtrip through integers via truncation
+// - subtract 1 if that's too big (possible for negative values).
+// This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big.
static inline float32x4_t armv7_vrndmq_f32(float32x4_t v) {
- auto sign = vandq_u32((uint32x4_t)v, vdupq_n_u32(1<<31));
- auto bias = (float32x4_t)(vorrq_u32((uint32x4_t)vdupq_n_f32(1<<23), sign));
- auto rounded = vsubq_f32(vaddq_f32(v, bias), bias);
- auto too_big = vcgtq_f32(rounded, v);
- return vsubq_f32(rounded, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_n_f32(1)));
+ auto roundtrip = vcvtq_f32_s32(vcvtq_s32_f32(v));
+ auto too_big = vcgtq_f32(roundtrip, v);
+ return vsubq_f32(roundtrip, (float32x4_t)vandq_u32(too_big, (uint32x4_t)vdupq_n_f32(1)));
}
// Well, this is absurd. The shifts require compile-time constant arguments.
diff --git a/src/opts/SkNx_sse.h b/src/opts/SkNx_sse.h
index def7ba1719..7d00ddf9ba 100644
--- a/src/opts/SkNx_sse.h
+++ b/src/opts/SkNx_sse.h
@@ -14,14 +14,13 @@
#define SKNX_IS_FAST
// SSE 4.1 has _mm_floor_ps to floor 4 floats. We emulate it:
-// - round by adding (1<<23) with our sign, then subtracting it;
-// - if that rounded value is bigger than our input, subtract 1.
+// - roundtrip through integers via truncation
+// - subtract 1 if that's too big (possible for negative values).
+// This restricts the domain of our inputs to a maximum somehwere around 2^31. Seems plenty big.
static inline __m128 sse2_mm_floor_ps(__m128 v) {
- __m128 sign = _mm_and_ps(v, _mm_set1_ps(-0.0f));
- __m128 bias = _mm_or_ps(sign, _mm_set1_ps(1<<23));
- __m128 rounded = _mm_sub_ps(_mm_add_ps(v, bias), bias);
- __m128 too_big = _mm_cmpgt_ps(rounded, v);
- return _mm_sub_ps(rounded, _mm_and_ps(too_big, _mm_set1_ps(1.0f)));
+ __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(v));
+ __m128 too_big = _mm_cmpgt_ps(roundtrip, v);
+ return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f)));
}
template <>