// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud // Copyright (C) 2010 Konstantinos Margaritis // Heavily based on Gael's SSE version. // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_PACKET_MATH_NEON_H #define EIGEN_PACKET_MATH_NEON_H namespace Eigen { namespace internal { #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8 #endif #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD #endif #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD #define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD #endif #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS #if EIGEN_ARCH_ARM64 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32 #else #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16 #endif #endif #if EIGEN_COMP_MSVC // In MSVC's arm_neon.h header file, all NEON vector types // are aliases to the same underlying type __n128. // We thus have to wrap them to make them different C++ types. // (See also bug 1428) template struct eigen_packet_wrapper { operator T&() { return m_val; } operator const T&() const { return m_val; } eigen_packet_wrapper() {} eigen_packet_wrapper(const T &v) : m_val(v) {} eigen_packet_wrapper& operator=(const T &v) { m_val = v; return *this; } T m_val; }; typedef eigen_packet_wrapper Packet2f; typedef eigen_packet_wrapper Packet4f; typedef eigen_packet_wrapper Packet2i; typedef eigen_packet_wrapper Packet4i; typedef eigen_packet_wrapper Packet2ui; typedef eigen_packet_wrapper Packet4ui; #else typedef float32x2_t Packet2f; typedef float32x4_t Packet4f; typedef int32x2_t Packet2i; typedef int32x4_t Packet4i; typedef uint32x2_t Packet2ui; typedef uint32x4_t Packet4ui; #endif // EIGEN_COMP_MSVC #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \ const Packet4f p4f_##NAME = pset1(X) #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \ const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1(X)) #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \ const Packet4i p4i_##NAME = pset1(X) #if EIGEN_ARCH_ARM64 // __builtin_prefetch tends to do nothing on ARM64 compilers because the // prefetch instructions there are too detailed for __builtin_prefetch to map // meaningfully to them. #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__("prfm pldl1keep, [%[addr]]\n" ::[addr] "r"(ADDR) : ); #elif EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC #define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR); #elif defined __pld #define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR) #elif EIGEN_ARCH_ARM32 #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ("pld [%[addr]]\n" :: [addr] "r" (ADDR) : ); #else // by default no explicit prefetching #define EIGEN_ARM_PREFETCH(ADDR) #endif template <> struct packet_traits : default_packet_traits { typedef Packet4f type; typedef Packet2f half; enum { Vectorizable = 1, AlignedOnScalar = 1, size = 4, HasHalfPacket = 1, HasAdd = 1, HasSub = 1, HasMul = 1, HasNegate = 1, HasAbs = 1, HasArg = 0, HasAbs2 = 1, HasMin = 1, HasMax = 1, HasConj = 1, HasSetLinear = 0, HasBlend = 0, HasReduxp = 1, HasDiv = 1, HasFloor = 1, HasSin = EIGEN_FAST_MATH, HasCos = EIGEN_FAST_MATH, HasLog = 1, HasExp = 1, HasSqrt = 0, HasTanh = EIGEN_FAST_MATH, HasErf = EIGEN_FAST_MATH }; }; template <> struct packet_traits : default_packet_traits { typedef Packet4i type; typedef Packet2i half; enum { Vectorizable = 1, AlignedOnScalar = 1, size = 4, HasHalfPacket = 1, HasAdd = 1, HasSub = 1, HasMul = 1, HasNegate = 1, HasAbs = 1, HasArg = 0, HasAbs2 = 1, HasMin = 1, HasMax = 1, HasConj = 1, HasSetLinear = 0, HasBlend = 0, HasReduxp = 1 }; }; #if EIGEN_GNUC_AT_MOST(4, 4) && !EIGEN_COMP_LLVM // workaround gcc 4.2, 4.3 and 4.4 compilatin issue EIGEN_STRONG_INLINE float32x4_t vld1q_f32(const float* x) { return ::vld1q_f32((const float32_t*)x); } EIGEN_STRONG_INLINE float32x2_t vld1_f32(const float* x) { return ::vld1_f32 ((const float32_t*)x); } EIGEN_STRONG_INLINE float32x2_t vld1_dup_f32(const float* x) { return ::vld1_dup_f32 ((const float32_t*)x); } EIGEN_STRONG_INLINE void vst1q_f32(float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); } EIGEN_STRONG_INLINE void vst1_f32 (float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); } #endif template<> struct unpacket_traits { typedef float type; typedef Packet2f half; typedef Packet2i integer_packet; enum { size = 2, alignment = Aligned16, vectorizable = true, masked_load_available = false, masked_store_available = false }; }; template<> struct unpacket_traits { typedef float type; typedef Packet2f half; typedef Packet4i integer_packet; enum { size = 4, alignment = Aligned16, vectorizable = true, masked_load_available = false, masked_store_available = false }; }; template<> struct unpacket_traits { typedef int32_t type; typedef Packet2i half; enum { size = 2, alignment = Aligned16, vectorizable = true, masked_load_available = false, masked_store_available = false }; }; template<> struct unpacket_traits { typedef int32_t type; typedef Packet2i half; enum { size = 4, alignment = Aligned16, vectorizable = true, masked_load_available = false, masked_store_available = false }; }; template<> EIGEN_STRONG_INLINE Packet2f pset1(const float& from) { return vdup_n_f32(from); } template<> EIGEN_STRONG_INLINE Packet4f pset1(const float& from) { return vdupq_n_f32(from); } template<> EIGEN_STRONG_INLINE Packet2i pset1(const int32_t& from) { return vdup_n_s32(from); } template<> EIGEN_STRONG_INLINE Packet4i pset1(const int32_t& from) { return vdupq_n_s32(from); } template<> EIGEN_STRONG_INLINE Packet2f pset1frombits(unsigned int from) { return vreinterpret_f32_u32(vdup_n_u32(from)); } template<> EIGEN_STRONG_INLINE Packet4f pset1frombits(unsigned int from) { return vreinterpretq_f32_u32(vdupq_n_u32(from)); } template<> EIGEN_STRONG_INLINE Packet2f plset(const float& a) { const float c[] = {0.0f,1.0f}; return vadd_f32(pset1(a), vld1_f32(c)); } template<> EIGEN_STRONG_INLINE Packet4f plset(const float& a) { const float c[] = {0.0f,1.0f,2.0f,3.0f}; return vaddq_f32(pset1(a), vld1q_f32(c)); } template<> EIGEN_STRONG_INLINE Packet2i plset(const int32_t& a) { const int32_t c[] = {0,1}; return vadd_s32(pset1(a), vld1_s32(c)); } template<> EIGEN_STRONG_INLINE Packet4i plset(const int32_t& a) { const int32_t c[] = {0,1,2,3}; return vaddq_s32(pset1(a), vld1q_s32(c)); } template<> EIGEN_STRONG_INLINE Packet2f padd(const Packet2f& a, const Packet2f& b) { return vadd_f32(a,b); } template<> EIGEN_STRONG_INLINE Packet4f padd(const Packet4f& a, const Packet4f& b) { return vaddq_f32(a,b); } template<> EIGEN_STRONG_INLINE Packet2i padd(const Packet2i& a, const Packet2i& b) { return vadd_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet4i padd(const Packet4i& a, const Packet4i& b) { return vaddq_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet2f psub(const Packet2f& a, const Packet2f& b) { return vsub_f32(a,b); } template<> EIGEN_STRONG_INLINE Packet4f psub(const Packet4f& a, const Packet4f& b) { return vsubq_f32(a,b); } template<> EIGEN_STRONG_INLINE Packet2i psub(const Packet2i& a, const Packet2i& b) { return vsub_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet4i psub(const Packet4i& a, const Packet4i& b) { return vsubq_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet2f pnegate(const Packet2f& a) { return vneg_f32(a); } template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return vnegq_f32(a); } template<> EIGEN_STRONG_INLINE Packet2i pnegate(const Packet2i& a) { return vneg_s32(a); } template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return vnegq_s32(a); } template<> EIGEN_STRONG_INLINE Packet2f pconj(const Packet2f& a) { return a; } template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; } template<> EIGEN_STRONG_INLINE Packet2i pconj(const Packet2i& a) { return a; } template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; } template<> EIGEN_STRONG_INLINE Packet2f pmul(const Packet2f& a, const Packet2f& b) { return vmul_f32(a,b); } template<> EIGEN_STRONG_INLINE Packet4f pmul(const Packet4f& a, const Packet4f& b) { return vmulq_f32(a,b); } template<> EIGEN_STRONG_INLINE Packet2i pmul(const Packet2i& a, const Packet2i& b) { return vmul_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pmul(const Packet4i& a, const Packet4i& b) { return vmulq_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet2f pdiv(const Packet2f& a, const Packet2f& b) { #if EIGEN_ARCH_ARM64 return vdiv_f32(a,b); #else Packet2f inv, restep, div; // NEON does not offer a divide instruction, we have to do a reciprocal approximation // However NEON in contrast to other SIMD engines (AltiVec/SSE), offers // a reciprocal estimate AND a reciprocal step -which saves a few instructions // vrecpeq_f32() returns an estimate to 1/b, which we will finetune with // Newton-Raphson and vrecpsq_f32() inv = vrecpe_f32(b); // This returns a differential, by which we will have to multiply inv to get a better // approximation of 1/b. restep = vrecps_f32(b, inv); inv = vmul_f32(restep, inv); // Finally, multiply a by 1/b and get the wanted result of the division. div = vmul_f32(a, inv); return div; #endif } template<> EIGEN_STRONG_INLINE Packet4f pdiv(const Packet4f& a, const Packet4f& b) { #if EIGEN_ARCH_ARM64 return vdivq_f32(a,b); #else Packet4f inv, restep, div; // NEON does not offer a divide instruction, we have to do a reciprocal approximation // However NEON in contrast to other SIMD engines (AltiVec/SSE), offers // a reciprocal estimate AND a reciprocal step -which saves a few instructions // vrecpeq_f32() returns an estimate to 1/b, which we will finetune with // Newton-Raphson and vrecpsq_f32() inv = vrecpeq_f32(b); // This returns a differential, by which we will have to multiply inv to get a better // approximation of 1/b. restep = vrecpsq_f32(b, inv); inv = vmulq_f32(restep, inv); // Finally, multiply a by 1/b and get the wanted result of the division. div = vmulq_f32(a, inv); return div; #endif } template<> EIGEN_STRONG_INLINE Packet2i pdiv(const Packet2i& /*a*/, const Packet2i& /*b*/) { eigen_assert(false && "packet integer division are not supported by NEON"); return pset1(0); } template<> EIGEN_STRONG_INLINE Packet4i pdiv(const Packet4i& /*a*/, const Packet4i& /*b*/) { eigen_assert(false && "packet integer division are not supported by NEON"); return pset1(0); } // Clang/ARM wrongly advertises __ARM_FEATURE_FMA even when it's not available, // then implements a slow software scalar fallback calling fmaf()! // Filed LLVM bug: // https://llvm.org/bugs/show_bug.cgi?id=27216 #if (defined __ARM_FEATURE_FMA) && !(EIGEN_COMP_CLANG && EIGEN_ARCH_ARM) // See bug 936. // FMA is available on VFPv4 i.e. when compiling with -mfpu=neon-vfpv4. // FMA is a true fused multiply-add i.e. only 1 rounding at the end, no intermediate rounding. // MLA is not fused i.e. does 2 roundings. // In addition to giving better accuracy, FMA also gives better performance here on a Krait (Nexus 4): // MLA: 10 GFlop/s ; FMA: 12 GFlops/s. template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vfmaq_f32(c,a,b); } #else template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { #if EIGEN_COMP_CLANG && EIGEN_ARCH_ARM // Clang/ARM will replace VMLA by VMUL+VADD at least for some values of -mcpu, // at least -mcpu=cortex-a8 and -mcpu=cortex-a7. Since the former is the default on // -march=armv7-a, that is a very common case. // See e.g. this thread: // http://lists.llvm.org/pipermail/llvm-dev/2013-December/068806.html // Filed LLVM bug: // https://llvm.org/bugs/show_bug.cgi?id=27219 Packet4f r = c; asm volatile( "vmla.f32 %q[r], %q[a], %q[b]" : [r] "+w" (r) : [a] "w" (a), [b] "w" (b) : ); return r; #else return vmlaq_f32(c,a,b); #endif } #endif // No FMA instruction for int, so use MLA unconditionally. template<> EIGEN_STRONG_INLINE Packet2i pmadd(const Packet2i& a, const Packet2i& b, const Packet2i& c) { return vmla_s32(c,a,b); } template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return vmlaq_s32(c,a,b); } template<> EIGEN_STRONG_INLINE Packet2f pmin(const Packet2f& a, const Packet2f& b) { return vmin_f32(a,b); } template<> EIGEN_STRONG_INLINE Packet4f pmin(const Packet4f& a, const Packet4f& b) { return vminq_f32(a,b); } template<> EIGEN_STRONG_INLINE Packet2i pmin(const Packet2i& a, const Packet2i& b) { return vmin_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pmin(const Packet4i& a, const Packet4i& b) { return vminq_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet2f pmax(const Packet2f& a, const Packet2f& b) { return vmax_f32(a,b); } template<> EIGEN_STRONG_INLINE Packet4f pmax(const Packet4f& a, const Packet4f& b) { return vmaxq_f32(a,b); } template<> EIGEN_STRONG_INLINE Packet2i pmax(const Packet2i& a, const Packet2i& b) { return vmax_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pmax(const Packet4i& a, const Packet4i& b) { return vmaxq_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet2f pcmp_le(const Packet2f& a, const Packet2f& b) { return vreinterpret_f32_u32(vcle_f32(a,b)); } template<> EIGEN_STRONG_INLINE Packet4f pcmp_le(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vcleq_f32(a,b)); } template<> EIGEN_STRONG_INLINE Packet2i pcmp_le(const Packet2i& a, const Packet2i& b) { return vreinterpret_s32_u32(vcle_s32(a,b)); } template<> EIGEN_STRONG_INLINE Packet4i pcmp_le(const Packet4i& a, const Packet4i& b) { return vreinterpretq_s32_u32(vcleq_s32(a,b)); } template<> EIGEN_STRONG_INLINE Packet2f pcmp_lt(const Packet2f& a, const Packet2f& b) { return vreinterpret_f32_u32(vclt_f32(a,b)); } template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vcltq_f32(a,b)); } template<> EIGEN_STRONG_INLINE Packet2i pcmp_lt(const Packet2i& a, const Packet2i& b) { return vreinterpret_s32_u32(vclt_s32(a,b)); } template<> EIGEN_STRONG_INLINE Packet4i pcmp_lt(const Packet4i& a, const Packet4i& b) { return vreinterpretq_s32_u32(vcltq_s32(a,b)); } template<> EIGEN_STRONG_INLINE Packet2f pcmp_eq(const Packet2f& a, const Packet2f& b) { return vreinterpret_f32_u32(vceq_f32(a,b)); } template<> EIGEN_STRONG_INLINE Packet4f pcmp_eq(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vceqq_f32(a,b)); } template<> EIGEN_STRONG_INLINE Packet2i pcmp_eq(const Packet2i& a, const Packet2i& b) { return vreinterpret_s32_u32(vceq_s32(a,b)); } template<> EIGEN_STRONG_INLINE Packet4i pcmp_eq(const Packet4i& a, const Packet4i& b) { return vreinterpretq_s32_u32(vceqq_s32(a,b)); } template<> EIGEN_STRONG_INLINE Packet2f pcmp_lt_or_nan(const Packet2f& a, const Packet2f& b) { return vreinterpret_f32_u32(vmvn_u32(vcge_f32(a,b))); } template<> EIGEN_STRONG_INLINE Packet4f pcmp_lt_or_nan(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vmvnq_u32(vcgeq_f32(a,b))); } template<> EIGEN_STRONG_INLINE Packet2f pfloor(const Packet2f& a) { const Packet2f cst_1 = pset1(1.0f); /* perform a floorf */ Packet2f tmp = vcvt_f32_s32(vcvt_s32_f32(a)); /* if greater, substract 1 */ Packet2ui mask = vcgt_f32(tmp, a); mask = vand_u32(mask, vreinterpret_u32_f32(cst_1)); return vsub_f32(tmp, vreinterpret_f32_u32(mask)); } template<> EIGEN_STRONG_INLINE Packet4f pfloor(const Packet4f& a) { const Packet4f cst_1 = pset1(1.0f); /* perform a floorf */ Packet4f tmp = vcvtq_f32_s32(vcvtq_s32_f32(a)); /* if greater, substract 1 */ Packet4ui mask = vcgtq_f32(tmp, a); mask = vandq_u32(mask, vreinterpretq_u32_f32(cst_1)); return vsubq_f32(tmp, vreinterpretq_f32_u32(mask)); } // Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics template<> EIGEN_STRONG_INLINE Packet2f pand(const Packet2f& a, const Packet2f& b) { return vreinterpret_f32_u32(vand_u32(vreinterpret_u32_f32(a),vreinterpret_u32_f32(b))); } template<> EIGEN_STRONG_INLINE Packet4f pand(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); } template<> EIGEN_STRONG_INLINE Packet2i pand(const Packet2i& a, const Packet2i& b) { return vand_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pand(const Packet4i& a, const Packet4i& b) { return vandq_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet2f por(const Packet2f& a, const Packet2f& b) { return vreinterpret_f32_u32(vorr_u32(vreinterpret_u32_f32(a),vreinterpret_u32_f32(b))); } template<> EIGEN_STRONG_INLINE Packet4f por(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); } template<> EIGEN_STRONG_INLINE Packet2i por(const Packet2i& a, const Packet2i& b) { return vorr_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet4i por(const Packet4i& a, const Packet4i& b) { return vorrq_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet2f pxor(const Packet2f& a, const Packet2f& b) { return vreinterpret_f32_u32(veor_u32(vreinterpret_u32_f32(a),vreinterpret_u32_f32(b))); } template<> EIGEN_STRONG_INLINE Packet4f pxor(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); } template<> EIGEN_STRONG_INLINE Packet2i pxor(const Packet2i& a, const Packet2i& b) { return veor_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pxor(const Packet4i& a, const Packet4i& b) { return veorq_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet2f pandnot(const Packet2f& a, const Packet2f& b) { return vreinterpret_f32_u32(vbic_u32(vreinterpret_u32_f32(a),vreinterpret_u32_f32(b))); } template<> EIGEN_STRONG_INLINE Packet4f pandnot(const Packet4f& a, const Packet4f& b) { return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(a),vreinterpretq_u32_f32(b))); } template<> EIGEN_STRONG_INLINE Packet2i pandnot(const Packet2i& a, const Packet2i& b) { return vbic_s32(a,b); } template<> EIGEN_STRONG_INLINE Packet4i pandnot(const Packet4i& a, const Packet4i& b) { return vbicq_s32(a,b); } template EIGEN_STRONG_INLINE Packet2i pshiftright(Packet2i a) { return vshr_n_s32(a,N); } template EIGEN_STRONG_INLINE Packet4i pshiftright(Packet4i a) { return vshrq_n_s32(a,N); } template EIGEN_STRONG_INLINE Packet2i pshiftleft(Packet2i a) { return vshl_n_s32(a,N); } template EIGEN_STRONG_INLINE Packet4i pshiftleft(Packet4i a) { return vshlq_n_s32(a,N); } template<> EIGEN_STRONG_INLINE Packet2f pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1_f32(from); } template<> EIGEN_STRONG_INLINE Packet4f pload(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f32(from); } template<> EIGEN_STRONG_INLINE Packet2i pload(const int32_t* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1_s32(from); } template<> EIGEN_STRONG_INLINE Packet4i pload(const int32_t* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_s32(from); } template<> EIGEN_STRONG_INLINE Packet2f ploadu(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1_f32(from); } template<> EIGEN_STRONG_INLINE Packet4f ploadu(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f32(from); } template<> EIGEN_STRONG_INLINE Packet2i ploadu(const int32_t* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1_s32(from); } template<> EIGEN_STRONG_INLINE Packet4i ploadu(const int32_t* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_s32(from); } template<> EIGEN_STRONG_INLINE Packet2f ploaddup(const float* from) { return vld1_dup_f32(from); } template<> EIGEN_STRONG_INLINE Packet4f ploaddup(const float* from) { return vcombine_f32(vld1_dup_f32(from), vld1_dup_f32(from+1)); } template<> EIGEN_STRONG_INLINE Packet2i ploaddup(const int32_t* from) { return vld1_dup_s32(from); } template<> EIGEN_STRONG_INLINE Packet4i ploaddup(const int32_t* from) { return vcombine_s32(vld1_dup_s32(from), vld1_dup_s32(from+1)); } template<> EIGEN_STRONG_INLINE Packet4f ploadquad(const float* from) { return vld1q_dup_f32(from); } template<> EIGEN_STRONG_INLINE Packet4i ploadquad(const int32_t* from) { return vld1q_dup_s32(from); } template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet2f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1_f32(to,from); } template<> EIGEN_STRONG_INLINE void pstore(float* to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f32(to,from); } template<> EIGEN_STRONG_INLINE void pstore(int32_t* to, const Packet2i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1_s32(to,from); } template<> EIGEN_STRONG_INLINE void pstore(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_s32(to,from); } template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet2f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1_f32(to,from); } template<> EIGEN_STRONG_INLINE void pstoreu(float* to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f32(to,from); } template<> EIGEN_STRONG_INLINE void pstoreu(int32_t* to, const Packet2i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1_s32(to,from); } template<> EIGEN_STRONG_INLINE void pstoreu(int32_t* to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_s32(to,from); } template<> EIGEN_DEVICE_FUNC inline Packet2f pgather(const float* from, Index stride) { Packet2f res = vld1_dup_f32(from); res = vld1_lane_f32(from + 1*stride, res, 1); return res; } template<> EIGEN_DEVICE_FUNC inline Packet4f pgather(const float* from, Index stride) { Packet4f res = vld1q_dup_f32(from); res = vld1q_lane_f32(from + 1*stride, res, 1); res = vld1q_lane_f32(from + 2*stride, res, 2); res = vld1q_lane_f32(from + 3*stride, res, 3); return res; } template<> EIGEN_DEVICE_FUNC inline Packet2i pgather(const int32_t* from, Index stride) { Packet2i res = vld1_dup_s32(from); res = vld1_lane_s32(from + 1*stride, res, 1); return res; } template<> EIGEN_DEVICE_FUNC inline Packet4i pgather(const int32_t* from, Index stride) { Packet4i res = vld1q_dup_s32(from); res = vld1q_lane_s32(from + 1*stride, res, 1); res = vld1q_lane_s32(from + 2*stride, res, 2); res = vld1q_lane_s32(from + 3*stride, res, 3); return res; } template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet2f& from, Index stride) { vst1_lane_f32(to + stride*0, from, 0); vst1_lane_f32(to + stride*1, from, 1); } template<> EIGEN_DEVICE_FUNC inline void pscatter(float* to, const Packet4f& from, Index stride) { vst1q_lane_f32(to + stride*0, from, 0); vst1q_lane_f32(to + stride*1, from, 1); vst1q_lane_f32(to + stride*2, from, 2); vst1q_lane_f32(to + stride*3, from, 3); } template<> EIGEN_DEVICE_FUNC inline void pscatter(int32_t* to, const Packet2i& from, Index stride) { vst1_lane_s32(to + stride*0, from, 0); vst1_lane_s32(to + stride*1, from, 1); } template<> EIGEN_DEVICE_FUNC inline void pscatter(int32_t* to, const Packet4i& from, Index stride) { vst1q_lane_s32(to + stride*0, from, 0); vst1q_lane_s32(to + stride*1, from, 1); vst1q_lane_s32(to + stride*2, from, 2); vst1q_lane_s32(to + stride*3, from, 3); } template<> EIGEN_STRONG_INLINE void prefetch(const float* addr) { EIGEN_ARM_PREFETCH(addr); } template<> EIGEN_STRONG_INLINE void prefetch(const int32_t* addr) { EIGEN_ARM_PREFETCH(addr); } template<> EIGEN_STRONG_INLINE float pfirst(const Packet2f& a) { return vget_lane_f32(a,0); } template<> EIGEN_STRONG_INLINE float pfirst(const Packet4f& a) { return vgetq_lane_f32(a,0); } template<> EIGEN_STRONG_INLINE int32_t pfirst(const Packet2i& a) { return vget_lane_s32(a,0); } template<> EIGEN_STRONG_INLINE int32_t pfirst(const Packet4i& a) { return vgetq_lane_s32(a,0); } template<> EIGEN_STRONG_INLINE Packet2f preverse(const Packet2f& a) { return vrev64_f32(a); } template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { const float32x4_t a_r64 = vrev64q_f32(a); return vcombine_f32(vget_high_f32(a_r64), vget_low_f32(a_r64)); } template<> EIGEN_STRONG_INLINE Packet2i preverse(const Packet2i& a) { return vrev64_s32(a); } template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { const int32x4_t a_r64 = vrev64q_s32(a); return vcombine_s32(vget_high_s32(a_r64), vget_low_s32(a_r64)); } template<> EIGEN_STRONG_INLINE Packet4ui preverse(const Packet4ui& a) { const uint32x4_t a_r64 = vrev64q_u32(a); return vcombine_u32(vget_high_u32(a_r64), vget_low_u32(a_r64)); } template<> EIGEN_STRONG_INLINE Packet2f pabs(const Packet2f& a) { return vabs_f32(a); } template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vabsq_f32(a); } template<> EIGEN_STRONG_INLINE Packet2i pabs(const Packet2i& a) { return vabs_s32(a); } template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vabsq_s32(a); } template<> EIGEN_STRONG_INLINE Packet2f pfrexp(const Packet2f& a, Packet2f& exponent) { return pfrexp_float(a,exponent); } template<> EIGEN_STRONG_INLINE Packet4f pfrexp(const Packet4f& a, Packet4f& exponent) { return pfrexp_float(a,exponent); } template<> EIGEN_STRONG_INLINE Packet2f pldexp(const Packet2f& a, const Packet2f& exponent) { return pldexp_float(a,exponent); } template<> EIGEN_STRONG_INLINE Packet4f pldexp(const Packet4f& a, const Packet4f& exponent) { return pldexp_float(a,exponent); } template<> EIGEN_STRONG_INLINE float predux(const Packet2f& a) { return vget_lane_f32(vpadd_f32(a,a), 0); } template<> EIGEN_STRONG_INLINE float predux(const Packet4f& a) { const float32x2_t sum = vadd_f32(vget_low_f32(a), vget_high_f32(a)); return vget_lane_f32(vpadd_f32(sum, sum), 0); } template<> EIGEN_STRONG_INLINE int32_t predux(const Packet2i& a) { return vget_lane_s32(vpadd_s32(a,a), 0); } template<> EIGEN_STRONG_INLINE int32_t predux(const Packet4i& a) { const int32x2_t sum = vadd_s32(vget_low_s32(a), vget_high_s32(a)); return vget_lane_s32(vpadd_s32(sum, sum), 0); } template<> EIGEN_STRONG_INLINE Packet2f preduxp(const Packet2f* vecs) { const float32x2x2_t vtrn = vzip_f32(vecs[0], vecs[1]); return vadd_f32(vtrn.val[0], vtrn.val[1]); } template<> EIGEN_STRONG_INLINE Packet4f preduxp(const Packet4f* vecs) { const float32x4x2_t vtrn1 = vzipq_f32(vecs[0], vecs[2]); const float32x4x2_t vtrn2 = vzipq_f32(vecs[1], vecs[3]); const float32x4x2_t res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]); const float32x4x2_t res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]); return vaddq_f32(vaddq_f32(res1.val[0], res1.val[1]), vaddq_f32(res2.val[0], res2.val[1])); } template<> EIGEN_STRONG_INLINE Packet2i preduxp(const Packet2i* vecs) { const int32x2x2_t vtrn = vzip_s32(vecs[0], vecs[1]); return vadd_s32(vtrn.val[0], vtrn.val[1]); } template<> EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs) { const int32x4x2_t vtrn1 = vzipq_s32(vecs[0], vecs[2]); const int32x4x2_t vtrn2 = vzipq_s32(vecs[1], vecs[3]); const int32x4x2_t res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]); const int32x4x2_t res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]); return vaddq_s32(vaddq_s32(res1.val[0], res1.val[1]), vaddq_s32(res2.val[0], res2.val[1])); } // Other reduction functions: // mul template<> EIGEN_STRONG_INLINE float predux_mul(const Packet2f& a) { return vget_lane_f32(a, 0) * vget_lane_f32(a, 1); } template<> EIGEN_STRONG_INLINE float predux_mul(const Packet4f& a) { return predux_mul(vmul_f32(vget_low_f32(a), vget_high_f32(a))); } template<> EIGEN_STRONG_INLINE int32_t predux_mul(const Packet2i& a) { return vget_lane_s32(a, 0) * vget_lane_s32(a, 1); } template<> EIGEN_STRONG_INLINE int32_t predux_mul(const Packet4i& a) { return predux_mul(vmul_s32(vget_low_s32(a), vget_high_s32(a))); } // min template<> EIGEN_STRONG_INLINE float predux_min(const Packet2f& a) { return vget_lane_f32(vpmin_f32(a,a), 0); } template<> EIGEN_STRONG_INLINE float predux_min(const Packet4f& a) { const float32x2_t min = vmin_f32(vget_low_f32(a), vget_high_f32(a)); return vget_lane_f32(vpmin_f32(min, min), 0); } template<> EIGEN_STRONG_INLINE int32_t predux_min(const Packet2i& a) { return vget_lane_s32(vpmin_s32(a,a), 0); } template<> EIGEN_STRONG_INLINE int32_t predux_min(const Packet4i& a) { const int32x2_t min = vmin_s32(vget_low_s32(a), vget_high_s32(a)); return vget_lane_s32(vpmin_s32(min, min), 0); } // max template<> EIGEN_STRONG_INLINE float predux_max(const Packet2f& a) { return vget_lane_f32(vpmax_f32(a,a), 0); } template<> EIGEN_STRONG_INLINE float predux_max(const Packet4f& a) { const float32x2_t max = vmax_f32(vget_low_f32(a), vget_high_f32(a)); return vget_lane_f32(vpmax_f32(max, max), 0); } template<> EIGEN_STRONG_INLINE int32_t predux_max(const Packet2i& a) { return vget_lane_s32(vpmax_s32(a,a), 0); } template<> EIGEN_STRONG_INLINE int32_t predux_max(const Packet4i& a) { const int32x2_t max = vmax_s32(vget_low_s32(a), vget_high_s32(a)); return vget_lane_s32(vpmax_s32(max, max), 0); } template<> EIGEN_STRONG_INLINE bool predux_any(const Packet4f& x) { uint32x2_t tmp = vorr_u32(vget_low_u32( vreinterpretq_u32_f32(x)), vget_high_u32(vreinterpretq_u32_f32(x))); return vget_lane_u32(vpmax_u32(tmp, tmp), 0); } // this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors, // see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074 #define PALIGN_NEON(Offset,Type,Command) \ template<>\ struct palign_impl\ {\ EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\ {\ if (Offset!=0)\ first = Command(first, second, Offset);\ }\ };\ PALIGN_NEON(0, Packet2f, vext_f32) PALIGN_NEON(1, Packet2f, vext_f32) PALIGN_NEON(0, Packet4f, vextq_f32) PALIGN_NEON(1, Packet4f, vextq_f32) PALIGN_NEON(2, Packet4f, vextq_f32) PALIGN_NEON(3, Packet4f, vextq_f32) PALIGN_NEON(0, Packet2i, vext_s32) PALIGN_NEON(1, Packet2i, vext_s32) PALIGN_NEON(0, Packet4i, vextq_s32) PALIGN_NEON(1, Packet4i, vextq_s32) PALIGN_NEON(2, Packet4i, vextq_s32) PALIGN_NEON(3, Packet4i, vextq_s32) #undef PALIGN_NEON EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { const float32x2x2_t z = vzip_f32(kernel.packet[0], kernel.packet[1]); kernel.packet[0] = z.val[0]; kernel.packet[1] = z.val[1]; } EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { const float32x4x2_t tmp1 = vzipq_f32(kernel.packet[0], kernel.packet[1]); const float32x4x2_t tmp2 = vzipq_f32(kernel.packet[2], kernel.packet[3]); kernel.packet[0] = vcombine_f32(vget_low_f32(tmp1.val[0]), vget_low_f32(tmp2.val[0])); kernel.packet[1] = vcombine_f32(vget_high_f32(tmp1.val[0]), vget_high_f32(tmp2.val[0])); kernel.packet[2] = vcombine_f32(vget_low_f32(tmp1.val[1]), vget_low_f32(tmp2.val[1])); kernel.packet[3] = vcombine_f32(vget_high_f32(tmp1.val[1]), vget_high_f32(tmp2.val[1])); } EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { const int32x2x2_t z = vzip_s32(kernel.packet[0], kernel.packet[1]); kernel.packet[0] = z.val[0]; kernel.packet[1] = z.val[1]; } EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { const int32x4x2_t tmp1 = vzipq_s32(kernel.packet[0], kernel.packet[1]); const int32x4x2_t tmp2 = vzipq_s32(kernel.packet[2], kernel.packet[3]); kernel.packet[0] = vcombine_s32(vget_low_s32(tmp1.val[0]), vget_low_s32(tmp2.val[0])); kernel.packet[1] = vcombine_s32(vget_high_s32(tmp1.val[0]), vget_high_s32(tmp2.val[0])); kernel.packet[2] = vcombine_s32(vget_low_s32(tmp1.val[1]), vget_low_s32(tmp2.val[1])); kernel.packet[3] = vcombine_s32(vget_high_s32(tmp1.val[1]), vget_high_s32(tmp2.val[1])); } //---------- double ---------- // Clang 3.5 in the iOS toolchain has an ICE triggered by NEON intrisics for double. // Confirmed at least with __apple_build_version__ = 6000054. #ifdef __apple_build_version__ // Let's hope that by the time __apple_build_version__ hits the 601* range, the bug will be fixed. // https://gist.github.com/yamaya/2924292 suggests that the 3 first digits are only updated with // major toolchain updates. #define EIGEN_APPLE_DOUBLE_NEON_BUG (__apple_build_version__ < 6010000) #else #define EIGEN_APPLE_DOUBLE_NEON_BUG 0 #endif #if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG // Bug 907: workaround missing declarations of the following two functions in the ADK // Defining these functions as templates ensures that if these intrinsics are // already defined in arm_neon.h, then our workaround doesn't cause a conflict // and has lower priority in overload resolution. template uint64x2_t vreinterpretq_u64_f64(T a) { return (uint64x2_t) a; } template float64x2_t vreinterpretq_f64_u64(T a) { return (float64x2_t) a; } typedef float64x2_t Packet2d; typedef float64x1_t Packet1d; template<> struct packet_traits : default_packet_traits { typedef Packet2d type; typedef Packet2d half; enum { Vectorizable = 1, AlignedOnScalar = 1, size = 2, HasHalfPacket = 0, HasCast = 1, HasCmp = 1, HasAdd = 1, HasSub = 1, HasShift = 1, HasMul = 1, HasNegate = 1, HasAbs = 1, HasArg = 0, HasAbs2 = 1, HasAbsDiff = 1, HasMin = 1, HasMax = 1, HasConj = 1, HasSetLinear = 0, HasBlend = 0, HasInsert = 1, HasReduxp = 1, HasDiv = 1, HasFloor = 0, HasSin = 0, HasCos = 0, HasLog = 0, HasExp = 0, HasSqrt = 0, HasTanh = 0, HasErf = 0 }; }; template<> struct unpacket_traits { typedef double type; enum { size = 2, alignment = Aligned16, vectorizable = true, masked_load_available = false, masked_store_available = false }; typedef Packet2d half; }; template<> EIGEN_STRONG_INLINE Packet2d pset1(const double& from) { return vdupq_n_f64(from); } template<> EIGEN_STRONG_INLINE Packet2d plset(const double& a) { const double c[] = {0.0,1.0}; return vaddq_f64(pset1(a), vld1q_f64(c)); } template<> EIGEN_STRONG_INLINE Packet2d padd(const Packet2d& a, const Packet2d& b) { return vaddq_f64(a,b); } template<> EIGEN_STRONG_INLINE Packet2d psub(const Packet2d& a, const Packet2d& b) { return vsubq_f64(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a) { return vnegq_f64(a); } template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; } template<> EIGEN_STRONG_INLINE Packet2d pmul(const Packet2d& a, const Packet2d& b) { return vmulq_f64(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pdiv(const Packet2d& a, const Packet2d& b) { return vdivq_f64(a,b); } #ifdef __ARM_FEATURE_FMA // See bug 936. See above comment about FMA for float. template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vfmaq_f64(c,a,b); } #else template<> EIGEN_STRONG_INLINE Packet2d pmadd(const Packet2d& a, const Packet2d& b, const Packet2d& c) { return vmlaq_f64(c,a,b); } #endif template<> EIGEN_STRONG_INLINE Packet2d pmin(const Packet2d& a, const Packet2d& b) { return vminq_f64(a,b); } template<> EIGEN_STRONG_INLINE Packet2d pmax(const Packet2d& a, const Packet2d& b) { return vmaxq_f64(a,b); } // Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics template<> EIGEN_STRONG_INLINE Packet2d pand(const Packet2d& a, const Packet2d& b) { return vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); } template<> EIGEN_STRONG_INLINE Packet2d por(const Packet2d& a, const Packet2d& b) { return vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); } template<> EIGEN_STRONG_INLINE Packet2d pxor(const Packet2d& a, const Packet2d& b) { return vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); } template<> EIGEN_STRONG_INLINE Packet2d pandnot(const Packet2d& a, const Packet2d& b) { return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(a),vreinterpretq_u64_f64(b))); } template<> EIGEN_STRONG_INLINE Packet2d pcmp_le(const Packet2d& a, const Packet2d& b) { return vreinterpretq_f64_u64(vcleq_f64(a,b)); } template<> EIGEN_STRONG_INLINE Packet2d pcmp_lt(const Packet2d& a, const Packet2d& b) { return vreinterpretq_f64_u64(vcltq_f64(a,b)); } template<> EIGEN_STRONG_INLINE Packet2d pcmp_eq(const Packet2d& a, const Packet2d& b) { return vreinterpretq_f64_u64(vceqq_f64(a,b)); } template<> EIGEN_STRONG_INLINE Packet2d pload(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return vld1q_f64(from); } template<> EIGEN_STRONG_INLINE Packet2d ploadu(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return vld1q_f64(from); } template<> EIGEN_STRONG_INLINE Packet2d ploaddup(const double* from) { return vld1q_dup_f64(from); } template<> EIGEN_STRONG_INLINE void pstore(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE vst1q_f64(to,from); } template<> EIGEN_STRONG_INLINE void pstoreu(double* to, const Packet2d& from) { EIGEN_DEBUG_UNALIGNED_STORE vst1q_f64(to,from); } template<> EIGEN_DEVICE_FUNC inline Packet2d pgather(const double* from, Index stride) { Packet2d res = pset1(0.0); res = vld1q_lane_f64(from + 0*stride, res, 0); res = vld1q_lane_f64(from + 1*stride, res, 1); return res; } template<> EIGEN_DEVICE_FUNC inline void pscatter(double* to, const Packet2d& from, Index stride) { vst1q_lane_f64(to + stride*0, from, 0); vst1q_lane_f64(to + stride*1, from, 1); } template<> EIGEN_STRONG_INLINE void prefetch(const double* addr) { EIGEN_ARM_PREFETCH(addr); } // FIXME only store the 2 first elements ? template<> EIGEN_STRONG_INLINE double pfirst(const Packet2d& a) { return vgetq_lane_f64(a,0); } template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a) { return vcombine_f64(vget_high_f64(a), vget_low_f64(a)); } template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a) { return vabsq_f64(a); } #if EIGEN_COMP_CLANG && defined(__apple_build_version__) // workaround ICE, see bug 907 template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) { return (vget_low_f64(a) + vget_high_f64(a))[0]; } #else template<> EIGEN_STRONG_INLINE double predux(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) + vget_high_f64(a), 0); } #endif template<> EIGEN_STRONG_INLINE Packet2d preduxp(const Packet2d* vecs) { return vaddq_f64(vzip1q_f64(vecs[0], vecs[1]), vzip2q_f64(vecs[0], vecs[1])); } // Other reduction functions: // mul #if EIGEN_COMP_CLANG && defined(__apple_build_version__) template<> EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) { return (vget_low_f64(a) * vget_high_f64(a))[0]; } #else template<> EIGEN_STRONG_INLINE double predux_mul(const Packet2d& a) { return vget_lane_f64(vget_low_f64(a) * vget_high_f64(a), 0); } #endif // min template<> EIGEN_STRONG_INLINE double predux_min(const Packet2d& a) { return vgetq_lane_f64(vpminq_f64(a,a), 0); } // max template<> EIGEN_STRONG_INLINE double predux_max(const Packet2d& a) { return vgetq_lane_f64(vpmaxq_f64(a,a), 0); } // this PALIGN_NEON business is to work around a bug in LLVM Clang 3.0 causing incorrect compilation errors, // see bug 347 and this LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=11074 #define PALIGN_NEON(Offset,Type,Command) \ template<>\ struct palign_impl\ {\ EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\ {\ if (Offset!=0)\ first = Command(first, second, Offset);\ }\ };\ PALIGN_NEON(0, Packet2d, vextq_f64) PALIGN_NEON(1, Packet2d, vextq_f64) #undef PALIGN_NEON EIGEN_DEVICE_FUNC inline void ptranspose(PacketBlock& kernel) { const float64x2_t tmp1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]); const float64x2_t tmp2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]); kernel.packet[0] = tmp1; kernel.packet[1] = tmp2; } #endif // EIGEN_ARCH_ARM64 } // end namespace internal } // end namespace Eigen #endif // EIGEN_PACKET_MATH_NEON_H