From 05754100fecf00e13b2a5799e31570a980e4dd72 Mon Sep 17 00:00:00 2001 From: Rasmus Munk Larsen Date: Wed, 16 Dec 2020 18:16:11 +0000 Subject: * Add iterative psqrt for AVX and SSE when FMA is available. This provides a ~10% speedup. * Write iterative sqrt explicitly in terms of pmadd. This gives up to 7% speedup for psqrt with AVX & SSE with FMA. * Remove iterative psqrt for NEON, because the initial rsqrt apprimation is not accurate enough for convergence in 2 Newton-Raphson steps and with 3 steps, just calling the builtin sqrt insn is faster. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The following benchmarks were compiled with clang "-O2 -fast-math -mfma" and with and without -mavx. AVX+FMA (float) name old cpu/op new cpu/op delta BM_eigen_sqrt_float/1 1.08ns ± 0% 1.09ns ± 1% ~ BM_eigen_sqrt_float/8 2.07ns ± 0% 2.08ns ± 1% ~ BM_eigen_sqrt_float/64 12.4ns ± 0% 12.4ns ± 1% ~ BM_eigen_sqrt_float/512 95.7ns ± 0% 95.5ns ± 0% ~ BM_eigen_sqrt_float/4k 776ns ± 0% 763ns ± 0% -1.67% BM_eigen_sqrt_float/32k 6.57µs ± 1% 6.13µs ± 0% -6.69% BM_eigen_sqrt_float/256k 83.7µs ± 3% 83.3µs ± 2% ~ BM_eigen_sqrt_float/1M 335µs ± 2% 332µs ± 2% ~ SSE+FMA (float) name old cpu/op new cpu/op delta BM_eigen_sqrt_float/1 1.08ns ± 0% 1.09ns ± 0% ~ BM_eigen_sqrt_float/8 2.07ns ± 0% 2.06ns ± 0% ~ BM_eigen_sqrt_float/64 12.4ns ± 0% 12.4ns ± 1% ~ BM_eigen_sqrt_float/512 95.7ns ± 0% 96.3ns ± 4% ~ BM_eigen_sqrt_float/4k 774ns ± 0% 763ns ± 0% -1.50% BM_eigen_sqrt_float/32k 6.58µs ± 2% 6.11µs ± 0% -7.06% BM_eigen_sqrt_float/256k 82.7µs ± 1% 82.6µs ± 1% ~ BM_eigen_sqrt_float/1M 330µs ± 1% 329µs ± 2% ~ SSE+FMA (double) BM_eigen_sqrt_double/1 1.63ns ± 0% 1.63ns ± 0% ~ BM_eigen_sqrt_double/8 6.51ns ± 0% 6.08ns ± 0% -6.68% BM_eigen_sqrt_double/64 52.1ns ± 0% 46.5ns ± 1% -10.65% BM_eigen_sqrt_double/512 417ns ± 0% 374ns ± 1% -10.29% BM_eigen_sqrt_double/4k 3.33µs ± 0% 2.97µs ± 1% -11.00% BM_eigen_sqrt_double/32k 26.7µs ± 0% 23.7µs ± 0% -11.07% BM_eigen_sqrt_double/256k 213µs ± 0% 206µs ± 1% -3.31% BM_eigen_sqrt_double/1M 862µs ± 0% 870µs ± 2% +0.96% AVX+FMA (double) name old cpu/op new cpu/op delta BM_eigen_sqrt_double/1 1.63ns ± 0% 1.63ns ± 0% ~ BM_eigen_sqrt_double/8 6.51ns ± 0% 6.06ns ± 0% -6.95% BM_eigen_sqrt_double/64 52.1ns ± 0% 46.5ns ± 1% -10.80% BM_eigen_sqrt_double/512 417ns ± 0% 373ns ± 1% -10.59% BM_eigen_sqrt_double/4k 3.33µs ± 0% 2.97µs ± 1% -10.79% BM_eigen_sqrt_double/32k 26.7µs ± 0% 23.8µs ± 0% -10.94% BM_eigen_sqrt_double/256k 214µs ± 0% 208µs ± 2% -2.76% BM_eigen_sqrt_double/1M 866µs ± 3% 923µs ± 7% ~ --- Eigen/src/Core/arch/NEON/PacketMath.h | 38 +++++------------------------------ 1 file changed, 5 insertions(+), 33 deletions(-) (limited to 'Eigen/src/Core/arch/NEON/PacketMath.h') diff --git a/Eigen/src/Core/arch/NEON/PacketMath.h b/Eigen/src/Core/arch/NEON/PacketMath.h index 5883eca38..14f3dbd0f 100644 --- a/Eigen/src/Core/arch/NEON/PacketMath.h +++ b/Eigen/src/Core/arch/NEON/PacketMath.h @@ -3273,26 +3273,26 @@ template<> EIGEN_STRONG_INLINE Packet4ui psqrt(const Packet4ui& a) { // effective latency. This is similar to Quake3's fast inverse square root. // For more details see: http://www.beyond3d.com/content/articles/8/ template<> EIGEN_STRONG_INLINE Packet4f psqrt(const Packet4f& _x){ - Packet4f half = vmulq_n_f32(_x, 0.5f); + Packet4f minus_half_x = vmulq_n_f32(_x, -0.5f); Packet4ui denormal_mask = vandq_u32(vcgeq_f32(_x, vdupq_n_f32(0.0f)), vcltq_f32(_x, pset1((std::numeric_limits::min)()))); // Compute approximate reciprocal sqrt. Packet4f x = vrsqrteq_f32(_x); // Do a single step of Newton's iteration. //the number 1.5f was set reference to Quake3's fast inverse square root - x = vmulq_f32(x, psub(pset1(1.5f), pmul(half, pmul(x, x)))); + x = pmul(x, pmadd(minus_half_x, pmul(x, x), pset1(1.5f))); // Flush results for denormals to zero. return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(pmul(_x, x)), denormal_mask)); } -template<> EIGEN_STRONG_INLINE Packet2f psqrt(const Packet2f& _x){ - Packet2f half = vmul_n_f32(_x, 0.5f); +template<> EIGEN_STRONG_INLINE Packet2f psqrt(const Packet2f& _x) { + Packet2f minus_half_x = vmul_n_f32(_x, -0.5f); Packet2ui denormal_mask = vand_u32(vcge_f32(_x, vdup_n_f32(0.0f)), vclt_f32(_x, pset1((std::numeric_limits::min)()))); // Compute approximate reciprocal sqrt. Packet2f x = vrsqrte_f32(_x); // Do a single step of Newton's iteration. - x = vmul_f32(x, psub(pset1(1.5f), pmul(half, pmul(x, x)))); + x = pmul(x, pmadd(minus_half_x, pmul(x, x), pset1(1.5f))); // Flush results for denormals to zero. return vreinterpret_f32_u32(vbic_u32(vreinterpret_u32_f32(pmul(_x, x)), denormal_mask)); } @@ -3877,35 +3877,7 @@ template<> EIGEN_STRONG_INLINE Packet2d pfrexp(const Packet2d& a, Pack template<> EIGEN_STRONG_INLINE Packet2d pset1frombits(uint64_t from) { return vreinterpretq_f64_u64(vdupq_n_u64(from)); } -#if EIGEN_FAST_MATH - -// Functions for sqrt support packet2d. -// The EIGEN_FAST_MATH version uses the vrsqrte_f64 approximation and one step -// of Newton's method, at a cost of 1-2 bits of precision as opposed to the -// exact solution. It does not handle +inf, or denormalized numbers correctly. -// The main advantage of this approach is not just speed, but also the fact that -// it can be inlined and pipelined with other computations, further reducing its -// effective latency. This is similar to Quake3's fast inverse square root. -// For more details see: http://www.beyond3d.com/content/articles/8/ -template<> EIGEN_STRONG_INLINE Packet2d psqrt(const Packet2d& _x){ - Packet2d half = vmulq_n_f64(_x, 0.5); - Packet2ul denormal_mask = vandq_u64(vcgeq_f64(_x, vdupq_n_f64(0.0)), - vcltq_f64(_x, pset1((std::numeric_limits::min)()))); - // Compute approximate reciprocal sqrt. - Packet2d x = vrsqrteq_f64(_x); - // Do a single step of Newton's iteration. - //the number 1.5f was set reference to Quake3's fast inverse square root - x = vmulq_f64(x, psub(pset1(1.5), pmul(half, pmul(x, x)))); - // Do two more Newton's iteration to get a result accurate to 1 ulp. - x = vmulq_f64(x, psub(pset1(1.5), pmul(half, pmul(x, x)))); - x = vmulq_f64(x, psub(pset1(1.5), pmul(half, pmul(x, x)))); - // Flush results for denormals to zero. - return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(pmul(_x, x)), denormal_mask)); -} - -#else template<> EIGEN_STRONG_INLINE Packet2d psqrt(const Packet2d& _x){ return vsqrtq_f64(_x); } -#endif #endif // EIGEN_ARCH_ARM64 -- cgit v1.2.3