// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PACKET_MATH_SSE_H #define EIGEN_PACKET_MATH_SSE_H namespace Eigen { namespace internal { #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 #endif #if !defined(EIGEN_VECTORIZE_AVX) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS) // 32 bits => 8 registers // 64 bits => 16 registers #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*)) #endif #ifdef EIGEN_VECTORIZE_FMA #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD 1 #endif #endif #if ((defined EIGEN_VECTORIZE_AVX) && (EIGEN_COMP_GNUC_STRICT || EIGEN_COMP_MINGW) && (__GXX_ABI_VERSION < 1004)) || EIGEN_OS_QNX // With GCC's default ABI version, a __m128 or __m256 are the same types and therefore we cannot // have overloads for both types without linking error. // One solution is to increase ABI version using -fabi-version=4 (or greater). // Otherwise, we workaround this inconvenience by wrapping 128bit types into the following helper // structure: typedef eigen_packet_wrapper<__m128> Packet4f; typedef eigen_packet_wrapper<__m128d> Packet2d; #else typedef __m128 Packet4f; typedef __m128d Packet2d; #endif typedef eigen_packet_wrapper<__m128i, 0> Packet4i; typedef eigen_packet_wrapper<__m128i, 1> Packet16b; template<> struct is_arithmetic<__m128> { enum { value = true }; }; template<> struct is_arithmetic<__m128i> { enum { value = true }; }; template<> struct is_arithmetic<__m128d> { enum { value = true }; }; template<> struct is_arithmetic { enum { value = true }; }; #define EIGEN_SSE_SHUFFLE_MASK(p,q,r,s) ((s)<<6|(r)<<4|(q)<<2|(p)) #define vec4f_swizzle1(v,p,q,r,s) \ (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), EIGEN_SSE_SHUFFLE_MASK(p,q,r,s)))) #define vec4i_swizzle1(v,p,q,r,s) \ (_mm_shuffle_epi32( v, EIGEN_SSE_SHUFFLE_MASK(p,q,r,s))) #define vec2d_swizzle1(v,p,q) \ (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), EIGEN_SSE_SHUFFLE_MASK(2*p,2*p+1,2*q,2*q+1)))) #define vec4f_swizzle2(a,b,p,q,r,s) \ (_mm_shuffle_ps( (a), (b), EIGEN_SSE_SHUFFLE_MASK(p,q,r,s))) #define vec4i_swizzle2(a,b,p,q,r,s) \ (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), EIGEN_SSE_SHUFFLE_MASK(p,q,r,s))))) #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ const Packet4f p4f_##NAME = pset1(X) #define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \ const Packet2d p2d_##NAME = pset1(X) #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ const Packet4f p4f_##NAME = pset1frombits(X) #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ const Packet4i p4i_##NAME = pset1(X) // Use the packet_traits defined in AVX/PacketMath.h instead if we're going // to leverage AVX instructions. #ifndef EIGEN_VECTORIZE_AVX template <> struct packet_traits : default_packet_traits { typedef Packet4f type; typedef Packet4f half; enum { Vectorizable = 1, AlignedOnScalar = 1, size = 4, HasHalfPacket = 0, HasDiv = 1, HasSin = EIGEN_FAST_MATH, HasCos = EIGEN_FAST_MATH, HasLog = 1, HasLog1p = 1, HasExpm1 = 1, HasNdtri = 1, HasExp = 1, HasBessel = 1, HasSqrt = 1, HasRsqrt = 1, HasTanh = EIGEN_FAST_MATH, HasErf = EIGEN_FAST_MATH, HasBlend = 1, HasInsert = 1, HasFloor = 1 #ifdef EIGEN_VECTORIZE_SSE4_1 , HasRint = 1, HasRound = 1, HasCeil = 1 #endif }; }; template <> struct packet_traits : default_packet_traits { typedef Packet2d type; typedef Packet2d half; enum { Vectorizable = 1, AlignedOnScalar = 1, size=2, HasHalfPacket = 0, HasDiv = 1, HasExp = 1, HasSqrt = 1, HasRsqrt = 1, HasBlend = 1, HasInsert = 1 #ifdef EIGEN_VECTORIZE_SSE4_1 , HasRound = 1, HasRint = 1, HasFloor = 1, HasCeil = 1 #endif }; }; #endif template<> struct packet_traits : default_packet_traits { typedef Packet4i type; typedef Packet4i half; enum { Vectorizable = 1, AlignedOnScalar = 1, size=4, HasShift = 1, HasBlend = 1 }; }; template<> struct packet_traits : default_packet_traits { typedef Packet16b type; typedef Packet16b half; enum { Vectorizable = 1, AlignedOnScalar = 1, HasHalfPacket = 0, size=16, HasAdd = 1, HasSub = 0, HasShift = 0, HasMul = 1, HasNegate = 0, HasAbs = 0, HasAbs2 = 0, HasMin = 0, HasMax = 0, HasConj = 0 }; }; template<> struct unpacket_traits { typedef float type; typedef Packet4f half; typedef Packet4i integer_packet; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; }; template<> struct unpacket_traits { typedef double type; typedef Packet2d half; enum {size=2, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; }; template<> struct unpacket_traits { typedef int type; typedef Packet4i half; enum {size=4, alignment=Aligned16, vectorizable=false, masked_load_available=false, masked_store_available=false}; }; template<> struct unpacket_traits { typedef bool type; typedef Packet16b half; enum {size=16, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; }; #ifndef EIGEN_VECTORIZE_AVX template<> struct scalar_div_cost { enum { value = 7 }; }; template<> struct scalar_div_cost { enum { value = 8 }; }; #endif #if EIGEN_COMP_MSVC==1500 // Workaround MSVC 9 internal compiler error. // TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode // TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)). template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return _mm_set_ps(from,from,from,from); } template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { return _mm_set_pd(from,from); } template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { return _mm_set_epi32(from,from,from,from); } #else template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return _mm_set_ps1(from); } template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { return _mm_set1_pd(from); } template<> EIGEN_STRONG_INLINE Packet4i pset1(const int& from) { return _mm_set1_epi32(from); } #endif template<> EIGEN_STRONG_INLINE Packet16b pset1(const bool& from) { return _mm_set1_epi8(static_cast(from)); } template<> EIGEN_STRONG_INLINE Packet4f pset1frombits(unsigned int from) { return _mm_castsi128_ps(pset1(from)); } template<> EIGEN_STRONG_INLINE Packet4f pzero(const Packet4f& /*a*/) { return _mm_setzero_ps(); } template<> EIGEN_STRONG_INLINE Packet2d pzero(const Packet2d& /*a*/) { return _mm_setzero_pd(); } template<> EIGEN_STRONG_INLINE Packet4i pzero(const Packet4i& /*a*/) { return _mm_setzero_si128(); } // GCC generates a shufps instruction for _mm_set1_ps/_mm_load1_ps instead of the more efficient pshufd instruction. // However, using inrinsics for pset1 makes gcc to generate crappy code in some cases (see bug 203) // Using inline assembly is also not an option because then gcc fails to reorder properly the instructions. // Therefore, we introduced the pload1 functions to be used in product kernels for which bug 203 does not apply. // Also note that with AVX, we want it to generate a vbroadcastss. #if EIGEN_COMP_GNUC_STRICT && (!defined __AVX__) template<> EIGEN_STRONG_INLINE Packet4f pload1(const float *from) { return vec4f_swizzle1(_mm_load_ss(from),0,0,0,0); } #endif template<> EIGEN_STRONG_INLINE Packet4f plset(const float& a) { return _mm_add_ps(pset1(a), _mm_set_ps(3,2,1,0)); } template<> EIGEN_STRONG_INLINE Packet2d plset(const double& a) { return _mm_add_pd(pset1(a),_mm_set_pd(1,0)); } template<> EIGEN_STRONG_INLINE Packet4i plset(const int& a) { return _mm_add_epi32(pset1(a),_mm_set_epi32(3,2,1,0)); } template<> EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d padd(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i padd(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); } template<> EIGEN_STRONG_INLINE Packet16b padd(const Packet16b& a, const Packet16b& b) { return _mm_or_si128(a,b); } template<> EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d psub(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i psub(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); } template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000)); return _mm_xor_ps(a,mask); } template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000)); return _mm_xor_pd(a,mask); } template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return psub(Packet4i(_mm_setr_epi32(0,0,0,0)), a); } template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; } template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; } template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; } template<> EIGEN_STRONG_INLINE Packet4f pmul(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pmul(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pmul(const Packet4i& a, const Packet4i& b) { #ifdef EIGEN_VECTORIZE_SSE4_1 return _mm_mullo_epi32(a,b); #else // this version is slightly faster than 4 scalar products return vec4i_swizzle1( vec4i_swizzle2( _mm_mul_epu32(a,b), _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2), vec4i_swizzle1(b,1,0,3,2)), 0,2,0,2), 0,2,1,3); #endif } template<> EIGEN_STRONG_INLINE Packet16b pmul(const Packet16b& a, const Packet16b& b) { return _mm_and_si128(a,b); } template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pdiv(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); } // for some weird raisons, it has to be overloaded for packet of integers template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); } #ifdef EIGEN_VECTORIZE_FMA template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return _mm_fmadd_ps(a,b,c); } template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return _mm_fmadd_pd(a,b,c); } #endif #ifdef EIGEN_VECTORIZE_SSE4_1 template<> EIGEN_DEVICE_FUNC inline Packet4f pselect(const Packet4f& mask, const Packet4f& a, const Packet4f& b) { return _mm_blendv_ps(b,a,mask); } template<> EIGEN_DEVICE_FUNC inline Packet2d pselect(const Packet2d& mask, const Packet2d& a, const Packet2d& b) { return _mm_blendv_pd(b,a,mask); } #endif template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63 // There appears to be a bug in GCC, by which the optimizer may // flip the argument order in calls to _mm_min_ps, so we have to // resort to inline ASM here. This is supposed to be fixed in gcc6.3, // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 #ifdef EIGEN_VECTORIZE_AVX Packet4f res; asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); #else Packet4f res = b; asm("minps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); #endif return res; #else // Arguments are reversed to match NaN propagation behavior of std::min. return _mm_min_ps(b, a); #endif } template<> EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63 // There appears to be a bug in GCC, by which the optimizer may // flip the argument order in calls to _mm_min_pd, so we have to // resort to inline ASM here. This is supposed to be fixed in gcc6.3, // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 #ifdef EIGEN_VECTORIZE_AVX Packet2d res; asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); #else Packet2d res = b; asm("minpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); #endif return res; #else // Arguments are reversed to match NaN propagation behavior of std::min. return _mm_min_pd(b, a); #endif } template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) { #ifdef EIGEN_VECTORIZE_SSE4_1 return _mm_min_epi32(a,b); #else // after some bench, this version *is* faster than a scalar implementation Packet4i mask = _mm_cmplt_epi32(a,b); return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); #endif } template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63 // There appears to be a bug in GCC, by which the optimizer may // flip the argument order in calls to _mm_max_ps, so we have to // resort to inline ASM here. This is supposed to be fixed in gcc6.3, // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 #ifdef EIGEN_VECTORIZE_AVX Packet4f res; asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); #else Packet4f res = b; asm("maxps %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); #endif return res; #else // Arguments are reversed to match NaN propagation behavior of std::max. return _mm_max_ps(b, a); #endif } template<> EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63 // There appears to be a bug in GCC, by which the optimizer may // flip the argument order in calls to _mm_max_pd, so we have to // resort to inline ASM here. This is supposed to be fixed in gcc6.3, // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867 #ifdef EIGEN_VECTORIZE_AVX Packet2d res; asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b)); #else Packet2d res = b; asm("maxpd %[a], %[res]" : [res] "+x" (res) : [a] "x" (a)); #endif return res; #else // Arguments are reversed to match NaN propagation behavior of std::max. return _mm_max_pd(b, a); #endif } template<> EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const Packet4i& b) { #ifdef EIGEN_VECTORIZE_SSE4_1 return _mm_max_epi32(a,b); #else // after some bench, this version *is* faster than a scalar implementation Packet4i mask = _mm_cmpgt_epi32(a,b); return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b)); #endif } template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) { return _mm_cmple_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) { return _mm_cmplt_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f& a, const Packet4f& b) { return _mm_cmpnge_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq(const Packet4f& a, const Packet4f& b) { return _mm_cmpeq_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pcmp_le(const Packet2d& a, const Packet2d& b) { return _mm_cmple_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt(const Packet2d& a, const Packet2d& b) { return _mm_cmplt_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt_or_nan(const Packet2d& a, const Packet2d& b) { return _mm_cmpnge_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b) { return _mm_cmpeq_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pcmp_lt(const Packet4i& a, const Packet4i& b) { return _mm_cmplt_epi32(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) { return _mm_cmpeq_epi32(a,b); } template<> EIGEN_STRONG_INLINE Packet16b pcmp_eq(const Packet16b& a, const Packet16b& b) { return _mm_cmpeq_epi8(a,b); } template<> EIGEN_STRONG_INLINE Packet4i ptrue(const Packet4i& a) { return _mm_cmpeq_epi32(a, a); } template<> EIGEN_STRONG_INLINE Packet16b ptrue(const Packet16b& a) { return _mm_cmpeq_epi8(a, a); } template<> EIGEN_STRONG_INLINE Packet4f ptrue(const Packet4f& a) { Packet4i b = _mm_castps_si128(a); return _mm_castsi128_ps(_mm_cmpeq_epi32(b, b)); } template<> EIGEN_STRONG_INLINE Packet2d ptrue(const Packet2d& a) { Packet4i b = _mm_castpd_si128(a); return _mm_castsi128_pd(_mm_cmpeq_epi32(b, b)); } template<> EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pand(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); } template<> EIGEN_STRONG_INLINE Packet16b pand(const Packet16b& a, const Packet16b& b) { return _mm_and_si128(a,b); } template<> EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d por(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); } template<> EIGEN_STRONG_INLINE Packet16b por(const Packet16b& a, const Packet16b& b) { return _mm_or_si128(a,b); } template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); } template<> EIGEN_STRONG_INLINE Packet16b pxor(const Packet16b& a, const Packet16b& b) { return _mm_xor_si128(a,b); } template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(b,a); } template<> EIGEN_STRONG_INLINE Packet2d pandnot(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(b,a); } template<> EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(b,a); } template EIGEN_STRONG_INLINE Packet4i parithmetic_shift_right(Packet4i a) { return _mm_srai_epi32(a,N); } template EIGEN_STRONG_INLINE Packet4i plogical_shift_right(Packet4i a) { return _mm_srli_epi32(a,N); } template EIGEN_STRONG_INLINE Packet4i plogical_shift_left(Packet4i a) { return _mm_slli_epi32(a,N); } #ifdef EIGEN_VECTORIZE_SSE4_1 template<> EIGEN_STRONG_INLINE Packet4f pround(const Packet4f& a) { // Unfortunatly _mm_round_ps doesn't have a rounding mode to implement numext::round. const Packet4f mask = pset1frombits(0x80000000u); const Packet4f prev0dot5 = pset1frombits(0x3EFFFFFFu); return _mm_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO); } template<> EIGEN_STRONG_INLINE Packet2d pround(const Packet2d& a) { const Packet2d mask = _mm_castsi128_pd(_mm_set_epi64x(0x8000000000000000ull, 0x8000000000000000ull)); const Packet2d prev0dot5 = _mm_castsi128_pd(_mm_set_epi64x(0x3FDFFFFFFFFFFFFFull, 0x3FDFFFFFFFFFFFFFull)); return _mm_round_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO); } template<> EIGEN_STRONG_INLINE Packet4f print(const Packet4f& a) { return _mm_round_ps(a, _MM_FROUND_CUR_DIRECTION); } template<> EIGEN_STRONG_INLINE Packet2d print(const Packet2d& a) { return _mm_round_pd(a, _MM_FROUND_CUR_DIRECTION); } template<> EIGEN_STRONG_INLINE Packet4f pceil(const Packet4f& a) { return _mm_ceil_ps(a); } template<> EIGEN_STRONG_INLINE Packet2d pceil(const Packet2d& a) { return _mm_ceil_pd(a); } template<> EIGEN_STRONG_INLINE Packet4f pfloor(const Packet4f& a) { return _mm_floor_ps(a); } template<> EIGEN_STRONG_INLINE Packet2d pfloor(const Packet2d& a) { return _mm_floor_pd(a); } #else template<> EIGEN_STRONG_INLINE Packet4f pfloor(const Packet4f& a) { const Packet4f cst_1 = pset1(1.0f); Packet4i emm0 = _mm_cvttps_epi32(a); Packet4f tmp = _mm_cvtepi32_ps(emm0); /* if greater, substract 1 */ Packet4f mask = _mm_cmpgt_ps(tmp, a); mask = pand(mask, cst_1); return psub(tmp, mask); } // WARNING: this pfloor implementation makes sense for small inputs only, // It is currently only used by pexp and not exposed through HasFloor. template<> EIGEN_STRONG_INLINE Packet2d pfloor(const Packet2d& a) { const Packet2d cst_1 = pset1(1.0); Packet4i emm0 = _mm_cvttpd_epi32(a); Packet2d tmp = _mm_cvtepi32_pd(emm0); /* if greater, substract 1 */ Packet2d mask = _mm_cmpgt_pd(tmp, a); mask = pand(mask, cst_1); return psub(tmp, mask); } #endif template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); } template<> EIGEN_STRONG_INLINE Packet2d pload(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); } template<> EIGEN_STRONG_INLINE Packet4i pload(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast(from)); } template<> EIGEN_STRONG_INLINE Packet16b pload(const bool* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast(from)); } #if EIGEN_COMP_MSVC template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD #if (EIGEN_COMP_MSVC==1600) // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps // (i.e., it does not generate an unaligned load!! __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from)); res = _mm_loadh_pi(res, (const __m64*)(from+2)); return res; #else return _mm_loadu_ps(from); #endif } #else // NOTE: with the code below, MSVC's compiler crashes! template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_ps(from); } #endif template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); } template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast(from)); } template<> EIGEN_STRONG_INLINE Packet16b ploadu(const bool* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast(from)); } template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) { return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast(from))), 0, 0, 1, 1); } template<> EIGEN_STRONG_INLINE Packet2d ploaddup(const double* from) { return pset1(from[0]); } template<> EIGEN_STRONG_INLINE Packet4i ploaddup(const int* from) { Packet4i tmp; tmp = _mm_loadl_epi64(reinterpret_cast(from)); return vec4i_swizzle1(tmp, 0, 0, 1, 1); } template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); } template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); } template<> EIGEN_STRONG_INLINE void pstore(int* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); } template<> EIGEN_STRONG_INLINE void pstore(bool* to, const Packet16b& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<__m128i*>(to), from); } template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_pd(to, from); } template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_ps(to, from); } template<> EIGEN_STRONG_INLINE void pstoreu(int* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); } template<> EIGEN_STRONG_INLINE void pstoreu(bool* to, const Packet16b& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); } template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, Index stride) { return _mm_set_ps(from[3*stride], from[2*stride], from[1*stride], from[0*stride]); } template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, Index stride) { return _mm_set_pd(from[1*stride], from[0*stride]); } template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int* from, Index stride) { return _mm_set_epi32(from[3*stride], from[2*stride], from[1*stride], from[0*stride]); } template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, Index stride) { to[stride*0] = _mm_cvtss_f32(from); to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 1)); to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 2)); to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(from, from, 3)); } template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, Index stride) { to[stride*0] = _mm_cvtsd_f64(from); to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(from, from, 1)); } template<> EIGEN_DEVICE_FUNC inline void pscatter(int* to, const Packet4i& from, Index stride) { to[stride*0] = _mm_cvtsi128_si32(from); to[stride*1] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 1)); to[stride*2] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 2)); to[stride*3] = _mm_cvtsi128_si32(_mm_shuffle_epi32(from, 3)); } // some compilers might be tempted to perform multiple moves instead of using a vector path. template<> EIGEN_STRONG_INLINE void pstore1(float* to, const float& a) { Packet4f pa = _mm_set_ss(a); pstore(to, Packet4f(vec4f_swizzle1(pa,0,0,0,0))); } // some compilers might be tempted to perform multiple moves instead of using a vector path. template<> EIGEN_STRONG_INLINE void pstore1(double* to, const double& a) { Packet2d pa = _mm_set_sd(a); pstore(to, Packet2d(vec2d_swizzle1(pa,0,0))); } #if EIGEN_COMP_PGI && EIGEN_COMP_PGI < 1900 typedef const void * SsePrefetchPtrType; #else typedef const char * SsePrefetchPtrType; #endif #ifndef EIGEN_VECTORIZE_AVX template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } template<> EIGEN_STRONG_INLINE void prefetch(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); } #endif #if EIGEN_COMP_MSVC_STRICT && EIGEN_OS_WIN64 // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010 // Direct of the struct members fixed bug #62. template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { return a.m128_f32[0]; } template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { return a.m128d_f64[0]; } template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } #elif EIGEN_COMP_MSVC_STRICT // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010 template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; } template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; } template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; } #else template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { return _mm_cvtss_f32(a); } template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { return _mm_cvtsd_f64(a); } template<> EIGEN_STRONG_INLINE int pfirst(const Packet4i& a) { return _mm_cvtsi128_si32(a); } template<> EIGEN_STRONG_INLINE bool pfirst(const Packet16b& a) { int x = _mm_cvtsi128_si32(a); return static_cast(x & 1); } #endif template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { return _mm_shuffle_ps(a,a,0x1B); } template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return _mm_shuffle_pd(a,a,0x1); } template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { return _mm_shuffle_epi32(a,0x1B); } template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF)); return _mm_and_ps(a,mask); } template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF)); return _mm_and_pd(a,mask); } template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { #ifdef EIGEN_VECTORIZE_SSSE3 return _mm_abs_epi32(a); #else Packet4i aux = _mm_srai_epi32(a,31); return _mm_sub_epi32(_mm_xor_si128(a,aux),aux); #endif } template<> EIGEN_STRONG_INLINE Packet4f pfrexp(const Packet4f& a, Packet4f& exponent) { return pfrexp_float(a,exponent); } template<> EIGEN_STRONG_INLINE Packet4f pldexp(const Packet4f& a, const Packet4f& exponent) { return pldexp_float(a,exponent); } template<> EIGEN_STRONG_INLINE Packet2d pldexp(const Packet2d& a, const Packet2d& exponent) { const Packet4i cst_1023_0 = _mm_setr_epi32(1023, 1023, 0, 0); Packet4i emm0 = _mm_cvttpd_epi32(exponent); emm0 = padd(emm0, cst_1023_0); emm0 = _mm_slli_epi32(emm0, 20); emm0 = _mm_shuffle_epi32(emm0, _MM_SHUFFLE(1,2,0,3)); return pmul(a, Packet2d(_mm_castsi128_pd(emm0))); } // with AVX, the default implementations based on pload1 are faster #ifndef __AVX__ template<> EIGEN_STRONG_INLINE void pbroadcast4(const float *a, Packet4f& a0, Packet4f& a1, Packet4f& a2, Packet4f& a3) { a3 = pload(a); a0 = vec4f_swizzle1(a3, 0,0,0,0); a1 = vec4f_swizzle1(a3, 1,1,1,1); a2 = vec4f_swizzle1(a3, 2,2,2,2); a3 = vec4f_swizzle1(a3, 3,3,3,3); } template<> EIGEN_STRONG_INLINE void pbroadcast4(const double *a, Packet2d& a0, Packet2d& a1, Packet2d& a2, Packet2d& a3) { #ifdef EIGEN_VECTORIZE_SSE3 a0 = _mm_loaddup_pd(a+0); a1 = _mm_loaddup_pd(a+1); a2 = _mm_loaddup_pd(a+2); a3 = _mm_loaddup_pd(a+3); #else a1 = pload(a); a0 = vec2d_swizzle1(a1, 0,0); a1 = vec2d_swizzle1(a1, 1,1); a3 = pload(a+2); a2 = vec2d_swizzle1(a3, 0,0); a3 = vec2d_swizzle1(a3, 1,1); #endif } #endif EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs) { vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55)); vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA)); vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF)); vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00)); } template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) { // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures // (from Nehalem to Haswell) // #ifdef EIGEN_VECTORIZE_SSE3 // Packet4f tmp = _mm_add_ps(a, vec4f_swizzle1(a,2,3,2,3)); // return pfirst(_mm_hadd_ps(tmp, tmp)); // #else Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a)); return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); // #endif } template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) { // Disable SSE3 _mm_hadd_pd that is extremely slow on all existing Intel's architectures // (from Nehalem to Haswell) // #ifdef EIGEN_VECTORIZE_SSE3 // return pfirst(_mm_hadd_pd(a, a)); // #else return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a))); // #endif } #ifdef EIGEN_VECTORIZE_SSSE3 template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) { Packet4i tmp0 = _mm_hadd_epi32(a,a); return pfirst(_mm_hadd_epi32(tmp0,tmp0)); } #else template<> EIGEN_STRONG_INLINE int predux(const Packet4i& a) { Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a)); return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1)); } #endif template<> EIGEN_STRONG_INLINE bool predux(const Packet16b& a) { Packet4i tmp = _mm_or_si128(a, _mm_unpackhi_epi64(a,a)); return (pfirst(tmp) != 0) || (pfirst(_mm_shuffle_epi32(tmp, 1)) != 0); } // Other reduction functions: // mul template<> EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) { Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a)); return pfirst(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); } template<> EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) { return pfirst(_mm_mul_sd(a, _mm_unpackhi_pd(a,a))); } template<> EIGEN_STRONG_INLINE int predux_mul(const Packet4i& a) { // after some experiments, it is seems this is the fastest way to implement it // for GCC (eg., reusing pmul is very slow !) // TODO try to call _mm_mul_epu32 directly EIGEN_ALIGN16 int aux[4]; pstore(aux, a); return (aux[0] * aux[1]) * (aux[2] * aux[3]); } // min template<> EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) { Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a)); return pfirst(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); } template<> EIGEN_STRONG_INLINE double predux_min(const Packet2d& a) { return pfirst(_mm_min_sd(a, _mm_unpackhi_pd(a,a))); } template<> EIGEN_STRONG_INLINE int predux_min(const Packet4i& a) { #ifdef EIGEN_VECTORIZE_SSE4_1 Packet4i tmp = _mm_min_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2))); return pfirst(_mm_min_epi32(tmp,_mm_shuffle_epi32(tmp, 1))); #else // after some experiments, it is seems this is the fastest way to implement it // for GCC (eg., it does not like using std::min after the pstore !!) EIGEN_ALIGN16 int aux[4]; pstore(aux, a); int aux0 = aux[0] EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) { Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a)); return pfirst(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1))); } template<> EIGEN_STRONG_INLINE double predux_max(const Packet2d& a) { return pfirst(_mm_max_sd(a, _mm_unpackhi_pd(a,a))); } template<> EIGEN_STRONG_INLINE int predux_max(const Packet4i& a) { #ifdef EIGEN_VECTORIZE_SSE4_1 Packet4i tmp = _mm_max_epi32(a, _mm_shuffle_epi32(a, _MM_SHUFFLE(0,0,3,2))); return pfirst(_mm_max_epi32(tmp,_mm_shuffle_epi32(tmp, 1))); #else // after some experiments, it is seems this is the fastest way to implement it // for GCC (eg., it does not like using std::min after the pstore !!) EIGEN_ALIGN16 int aux[4]; pstore(aux, a); int aux0 = aux[0]>aux[1] ? aux[0] : aux[1]; int aux2 = aux[2]>aux[3] ? aux[2] : aux[3]; return aux0>aux2 ? aux0 : aux2; #endif // EIGEN_VECTORIZE_SSE4_1 } // not needed yet // template<> EIGEN_STRONG_INLINE bool predux_all(const Packet4f& x) // { // return _mm_movemask_ps(x) == 0xF; // } template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x) { return _mm_movemask_ps(x) != 0x0; } EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { _MM_TRANSPOSE4_PS(kernel.packet[0], kernel.packet[1], kernel.packet[2], kernel.packet[3]); } EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { __m128d tmp = _mm_unpackhi_pd(kernel.packet[0], kernel.packet[1]); kernel.packet[0] = _mm_unpacklo_pd(kernel.packet[0], kernel.packet[1]); kernel.packet[1] = tmp; } EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { __m128i T0 = _mm_unpacklo_epi32(kernel.packet[0], kernel.packet[1]); __m128i T1 = _mm_unpacklo_epi32(kernel.packet[2], kernel.packet[3]); __m128i T2 = _mm_unpackhi_epi32(kernel.packet[0], kernel.packet[1]); __m128i T3 = _mm_unpackhi_epi32(kernel.packet[2], kernel.packet[3]); kernel.packet[0] = _mm_unpacklo_epi64(T0, T1); kernel.packet[1] = _mm_unpackhi_epi64(T0, T1); kernel.packet[2] = _mm_unpacklo_epi64(T2, T3); kernel.packet[3] = _mm_unpackhi_epi64(T2, T3); } EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { __m128i T0 = _mm_unpacklo_epi8(kernel.packet[0], kernel.packet[1]); __m128i T1 = _mm_unpackhi_epi8(kernel.packet[0], kernel.packet[1]); __m128i T2 = _mm_unpacklo_epi8(kernel.packet[2], kernel.packet[3]); __m128i T3 = _mm_unpackhi_epi8(kernel.packet[2], kernel.packet[3]); kernel.packet[0] = _mm_unpacklo_epi16(T0, T2); kernel.packet[1] = _mm_unpackhi_epi16(T0, T2); kernel.packet[2] = _mm_unpacklo_epi16(T1, T3); kernel.packet[3] = _mm_unpackhi_epi16(T1, T3); } template<> EIGEN_STRONG_INLINE Packet4i pblend(const Selector<4>& ifPacket, const Packet4i& thenPacket, const Packet4i& elsePacket) { const __m128i zero = _mm_setzero_si128(); const __m128i select = _mm_set_epi32(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]); __m128i false_mask = _mm_cmpeq_epi32(select, zero); #ifdef EIGEN_VECTORIZE_SSE4_1 return _mm_blendv_epi8(thenPacket, elsePacket, false_mask); #else return _mm_or_si128(_mm_andnot_si128(false_mask, thenPacket), _mm_and_si128(false_mask, elsePacket)); #endif } template<> EIGEN_STRONG_INLINE Packet4f pblend(const Selector<4>& ifPacket, const Packet4f& thenPacket, const Packet4f& elsePacket) { const __m128 zero = _mm_setzero_ps(); const __m128 select = _mm_set_ps(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]); __m128 false_mask = _mm_cmpeq_ps(select, zero); #ifdef EIGEN_VECTORIZE_SSE4_1 return _mm_blendv_ps(thenPacket, elsePacket, false_mask); #else return _mm_or_ps(_mm_andnot_ps(false_mask, thenPacket), _mm_and_ps(false_mask, elsePacket)); #endif } template<> EIGEN_STRONG_INLINE Packet2d pblend(const Selector<2>& ifPacket, const Packet2d& thenPacket, const Packet2d& elsePacket) { const __m128d zero = _mm_setzero_pd(); const __m128d select = _mm_set_pd(ifPacket.select[1], ifPacket.select[0]); __m128d false_mask = _mm_cmpeq_pd(select, zero); #ifdef EIGEN_VECTORIZE_SSE4_1 return _mm_blendv_pd(thenPacket, elsePacket, false_mask); #else return _mm_or_pd(_mm_andnot_pd(false_mask, thenPacket), _mm_and_pd(false_mask, elsePacket)); #endif } template<> EIGEN_STRONG_INLINE Packet4f pinsertfirst(const Packet4f& a, float b) { #ifdef EIGEN_VECTORIZE_SSE4_1 return _mm_blend_ps(a,pset1(b),1); #else return _mm_move_ss(a, _mm_load_ss(&b)); #endif } template<> EIGEN_STRONG_INLINE Packet2d pinsertfirst(const Packet2d& a, double b) { #ifdef EIGEN_VECTORIZE_SSE4_1 return _mm_blend_pd(a,pset1(b),1); #else return _mm_move_sd(a, _mm_load_sd(&b)); #endif } template<> EIGEN_STRONG_INLINE Packet4f pinsertlast(const Packet4f& a, float b) { #ifdef EIGEN_VECTORIZE_SSE4_1 return _mm_blend_ps(a,pset1(b),(1<<3)); #else const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x0,0x0,0x0,0xFFFFFFFF)); return _mm_or_ps(_mm_andnot_ps(mask, a), _mm_and_ps(mask, pset1(b))); #endif } template<> EIGEN_STRONG_INLINE Packet2d pinsertlast(const Packet2d& a, double b) { #ifdef EIGEN_VECTORIZE_SSE4_1 return _mm_blend_pd(a,pset1(b),(1<<1)); #else const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x0,0xFFFFFFFF,0xFFFFFFFF)); return _mm_or_pd(_mm_andnot_pd(mask, a), _mm_and_pd(mask, pset1(b))); #endif } // Scalar path for pmadd with FMA to ensure consistency with vectorized path. #ifdef EIGEN_VECTORIZE_FMA template<> EIGEN_STRONG_INLINE float pmadd(const float& a, const float& b, const float& c) { return ::fmaf(a,b,c); } template<> EIGEN_STRONG_INLINE double pmadd(const double& a, const double& b, const double& c) { return ::fma(a,b,c); } #endif // Packet math for Eigen::half // Disable the following code since it's broken on too many platforms / compilers. //#elif defined(EIGEN_VECTORIZE_SSE) && (!EIGEN_ARCH_x86_64) && (!EIGEN_COMP_MSVC) #if 0 typedef struct { __m64 x; } Packet4h; template<> struct is_arithmetic { enum { value = true }; }; template <> struct packet_traits : default_packet_traits { typedef Packet4h type; // There is no half-size packet for Packet4h. typedef Packet4h half; enum { Vectorizable = 1, AlignedOnScalar = 1, size = 4, HasHalfPacket = 0, HasAdd = 1, HasSub = 1, HasMul = 1, HasDiv = 1, HasNegate = 0, HasAbs = 0, HasAbs2 = 0, HasMin = 0, HasMax = 0, HasConj = 0, HasSetLinear = 0, HasSqrt = 0, HasRsqrt = 0, HasExp = 0, HasLog = 0, HasBlend = 0 }; }; template<> struct unpacket_traits { typedef Eigen::half type; enum {size=4, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet4h half; }; template<> EIGEN_STRONG_INLINE Packet4h pset1(const Eigen::half& from) { Packet4h result; result.x = _mm_set1_pi16(from.x); return result; } template<> EIGEN_STRONG_INLINE Eigen::half pfirst(const Packet4h& from) { return half_impl::raw_uint16_to_half(static_cast(_mm_cvtsi64_si32(from.x))); } template<> EIGEN_STRONG_INLINE Packet4h pconj(const Packet4h& a) { return a; } template<> EIGEN_STRONG_INLINE Packet4h padd(const Packet4h& a, const Packet4h& b) { __int64_t a64 = _mm_cvtm64_si64(a.x); __int64_t b64 = _mm_cvtm64_si64(b.x); Eigen::half h[4]; Eigen::half ha = half_impl::raw_uint16_to_half(static_cast(a64)); Eigen::half hb = half_impl::raw_uint16_to_half(static_cast(b64)); h[0] = ha + hb; ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 16)); hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 16)); h[1] = ha + hb; ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 32)); hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 32)); h[2] = ha + hb; ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 48)); hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 48)); h[3] = ha + hb; Packet4h result; result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x); return result; } template<> EIGEN_STRONG_INLINE Packet4h psub(const Packet4h& a, const Packet4h& b) { __int64_t a64 = _mm_cvtm64_si64(a.x); __int64_t b64 = _mm_cvtm64_si64(b.x); Eigen::half h[4]; Eigen::half ha = half_impl::raw_uint16_to_half(static_cast(a64)); Eigen::half hb = half_impl::raw_uint16_to_half(static_cast(b64)); h[0] = ha - hb; ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 16)); hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 16)); h[1] = ha - hb; ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 32)); hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 32)); h[2] = ha - hb; ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 48)); hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 48)); h[3] = ha - hb; Packet4h result; result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x); return result; } template<> EIGEN_STRONG_INLINE Packet4h pmul(const Packet4h& a, const Packet4h& b) { __int64_t a64 = _mm_cvtm64_si64(a.x); __int64_t b64 = _mm_cvtm64_si64(b.x); Eigen::half h[4]; Eigen::half ha = half_impl::raw_uint16_to_half(static_cast(a64)); Eigen::half hb = half_impl::raw_uint16_to_half(static_cast(b64)); h[0] = ha * hb; ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 16)); hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 16)); h[1] = ha * hb; ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 32)); hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 32)); h[2] = ha * hb; ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 48)); hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 48)); h[3] = ha * hb; Packet4h result; result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x); return result; } template<> EIGEN_STRONG_INLINE Packet4h pdiv(const Packet4h& a, const Packet4h& b) { __int64_t a64 = _mm_cvtm64_si64(a.x); __int64_t b64 = _mm_cvtm64_si64(b.x); Eigen::half h[4]; Eigen::half ha = half_impl::raw_uint16_to_half(static_cast(a64)); Eigen::half hb = half_impl::raw_uint16_to_half(static_cast(b64)); h[0] = ha / hb; ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 16)); hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 16)); h[1] = ha / hb; ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 32)); hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 32)); h[2] = ha / hb; ha = half_impl::raw_uint16_to_half(static_cast(a64 >> 48)); hb = half_impl::raw_uint16_to_half(static_cast(b64 >> 48)); h[3] = ha / hb; Packet4h result; result.x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x); return result; } template<> EIGEN_STRONG_INLINE Packet4h pload(const Eigen::half* from) { Packet4h result; result.x = _mm_cvtsi64_m64(*reinterpret_cast(from)); return result; } template<> EIGEN_STRONG_INLINE Packet4h ploadu(const Eigen::half* from) { Packet4h result; result.x = _mm_cvtsi64_m64(*reinterpret_cast(from)); return result; } template<> EIGEN_STRONG_INLINE void pstore(Eigen::half* to, const Packet4h& from) { __int64_t r = _mm_cvtm64_si64(from.x); *(reinterpret_cast<__int64_t*>(to)) = r; } template<> EIGEN_STRONG_INLINE void pstoreu(Eigen::half* to, const Packet4h& from) { __int64_t r = _mm_cvtm64_si64(from.x); *(reinterpret_cast<__int64_t*>(to)) = r; } template<> EIGEN_STRONG_INLINE Packet4h ploadquad(const Eigen::half* from) { return pset1(*from); } template<> EIGEN_STRONG_INLINE Packet4h pgather(const Eigen::half* from, Index stride) { Packet4h result; result.x = _mm_set_pi16(from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x); return result; } template<> EIGEN_STRONG_INLINE void pscatter(Eigen::half* to, const Packet4h& from, Index stride) { __int64_t a = _mm_cvtm64_si64(from.x); to[stride*0].x = static_cast(a); to[stride*1].x = static_cast(a >> 16); to[stride*2].x = static_cast(a >> 32); to[stride*3].x = static_cast(a >> 48); } EIGEN_STRONG_INLINE void ptranspose(PacketBlock& kernel) { __m64 T0 = _mm_unpacklo_pi16(kernel.packet[0].x, kernel.packet[1].x); __m64 T1 = _mm_unpacklo_pi16(kernel.packet[2].x, kernel.packet[3].x); __m64 T2 = _mm_unpackhi_pi16(kernel.packet[0].x, kernel.packet[1].x); __m64 T3 = _mm_unpackhi_pi16(kernel.packet[2].x, kernel.packet[3].x); kernel.packet[0].x = _mm_unpacklo_pi32(T0, T1); kernel.packet[1].x = _mm_unpackhi_pi32(T0, T1); kernel.packet[2].x = _mm_unpacklo_pi32(T2, T3); kernel.packet[3].x = _mm_unpackhi_pi32(T2, T3); } #endif } // end namespace internal } // end namespace Eigen #if EIGEN_COMP_PGI && EIGEN_COMP_PGI < 1900 // PGI++ does not define the following intrinsics in C++ mode. static inline __m128 _mm_castpd_ps (__m128d x) { return reinterpret_cast<__m128&>(x); } static inline __m128i _mm_castpd_si128(__m128d x) { return reinterpret_cast<__m128i&>(x); } static inline __m128d _mm_castps_pd (__m128 x) { return reinterpret_cast<__m128d&>(x); } static inline __m128i _mm_castps_si128(__m128 x) { return reinterpret_cast<__m128i&>(x); } static inline __m128 _mm_castsi128_ps(__m128i x) { return reinterpret_cast<__m128&>(x); } static inline __m128d _mm_castsi128_pd(__m128i x) { return reinterpret_cast<__m128d&>(x); } #endif #endif // EIGEN_PACKET_MATH_SSE_H