From 5dfae4524b95a82dfd57cb2073471d4179f49c6c Mon Sep 17 00:00:00 2001 From: Benoit Jacob Date: Thu, 24 Feb 2011 10:31:57 -0500 Subject: fix bug #195: fast unaligned load for integer using _mm_load_sd failed when the value interpreted as a NaN --- Eigen/src/Core/arch/SSE/PacketMath.h | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'Eigen/src/Core/arch/SSE/PacketMath.h') diff --git a/Eigen/src/Core/arch/SSE/PacketMath.h b/Eigen/src/Core/arch/SSE/PacketMath.h index bbe784523..8401efe0b 100644 --- a/Eigen/src/Core/arch/SSE/PacketMath.h +++ b/Eigen/src/Core/arch/SSE/PacketMath.h @@ -237,7 +237,6 @@ template<> EIGEN_STRONG_INLINE Packet4i pload(const int* from) { E #endif } template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); } - template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast(from)); } #else // Fast unaligned loads. Note that here we cannot directly use intrinsics: this would // require pointer casting to incompatible pointer types and leads to invalid code @@ -261,16 +260,13 @@ template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) res = _mm_loadh_pd(res,from+1); return res; } -template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) -{ - EIGEN_DEBUG_UNALIGNED_LOAD - __m128d res; - res = _mm_load_sd((const double*)(from)) ; - res = _mm_loadh_pd(res, (const double*)(from+2)) ; - return _mm_castpd_si128(res); -} #endif +// bug 195: we used to have an optimized ploadu using _mm_load_sd/_mm_loadh_pd but that gave wrong results when some 64bit value, +// interpreted as double, was a NaN +template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast(from)); } + + template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) { return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd((const double*)from)), 0, 0, 1, 1); -- cgit v1.2.3