Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-19 09:51:37

0001 // This file is part of Eigen, a lightweight C++ template library
0002 // for linear algebra.
0003 //
0004 // Copyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com)
0005 //
0006 // This Source Code Form is subject to the terms of the Mozilla
0007 // Public License v. 2.0. If a copy of the MPL was not distributed
0008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
0009 
0010 #ifndef EIGEN_PACKET_MATH_AVX_H
0011 #define EIGEN_PACKET_MATH_AVX_H
0012 
0013 namespace Eigen {
0014 
0015 namespace internal {
0016 
0017 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
0018 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
0019 #endif
0020 
0021 #if !defined(EIGEN_VECTORIZE_AVX512) && !defined(EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS)
0022 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
0023 #endif
0024 
0025 #ifdef EIGEN_VECTORIZE_FMA
0026 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
0027 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
0028 #endif
0029 #endif
0030 
0031 typedef __m256  Packet8f;
0032 typedef __m256i Packet8i;
0033 typedef __m256d Packet4d;
0034 typedef eigen_packet_wrapper<__m128i, 2> Packet8h;
0035 typedef eigen_packet_wrapper<__m128i, 3> Packet8bf;
0036 
0037 template<> struct is_arithmetic<__m256>  { enum { value = true }; };
0038 template<> struct is_arithmetic<__m256i> { enum { value = true }; };
0039 template<> struct is_arithmetic<__m256d> { enum { value = true }; };
0040 template<> struct is_arithmetic<Packet8h> { enum { value = true }; };
0041 template<> struct is_arithmetic<Packet8bf> { enum { value = true }; };
0042 
0043 #define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \
0044   const Packet8f p8f_##NAME = pset1<Packet8f>(X)
0045 
0046 #define _EIGEN_DECLARE_CONST_Packet4d(NAME,X) \
0047   const Packet4d p4d_##NAME = pset1<Packet4d>(X)
0048 
0049 #define _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(NAME,X) \
0050   const Packet8f p8f_##NAME = _mm256_castsi256_ps(pset1<Packet8i>(X))
0051 
0052 #define _EIGEN_DECLARE_CONST_Packet8i(NAME,X) \
0053   const Packet8i p8i_##NAME = pset1<Packet8i>(X)
0054 
0055 // Use the packet_traits defined in AVX512/PacketMath.h instead if we're going
0056 // to leverage AVX512 instructions.
0057 #ifndef EIGEN_VECTORIZE_AVX512
0058 template<> struct packet_traits<float>  : default_packet_traits
0059 {
0060   typedef Packet8f type;
0061   typedef Packet4f half;
0062   enum {
0063     Vectorizable = 1,
0064     AlignedOnScalar = 1,
0065     size = 8,
0066     HasHalfPacket = 1,
0067 
0068     HasCmp  = 1,
0069     HasDiv = 1,
0070     HasSin = EIGEN_FAST_MATH,
0071     HasCos = EIGEN_FAST_MATH,
0072     HasLog = 1,
0073     HasLog1p = 1,
0074     HasExpm1 = 1,
0075     HasExp = 1,
0076     HasNdtri = 1,
0077     HasBessel = 1,
0078     HasSqrt = 1,
0079     HasRsqrt = 1,
0080     HasTanh = EIGEN_FAST_MATH,
0081     HasErf = EIGEN_FAST_MATH,
0082     HasBlend = 1,
0083     HasRound = 1,
0084     HasFloor = 1,
0085     HasCeil = 1,
0086     HasRint = 1
0087   };
0088 };
0089 template<> struct packet_traits<double> : default_packet_traits
0090 {
0091   typedef Packet4d type;
0092   typedef Packet2d half;
0093   enum {
0094     Vectorizable = 1,
0095     AlignedOnScalar = 1,
0096     size=4,
0097     HasHalfPacket = 1,
0098 
0099     HasCmp  = 1,
0100     HasDiv  = 1,
0101     HasLog  = 1,
0102     HasExp  = 1,
0103     HasSqrt = 1,
0104     HasRsqrt = 1,
0105     HasBlend = 1,
0106     HasRound = 1,
0107     HasFloor = 1,
0108     HasCeil = 1,
0109     HasRint = 1
0110   };
0111 };
0112 
0113 template <>
0114 struct packet_traits<Eigen::half> : default_packet_traits {
0115   typedef Packet8h type;
0116   // There is no half-size packet for Packet8h.
0117   typedef Packet8h half;
0118   enum {
0119     Vectorizable = 1,
0120     AlignedOnScalar = 1,
0121     size = 8,
0122     HasHalfPacket = 0,
0123 
0124     HasCmp    = 1,
0125     HasAdd    = 1,
0126     HasSub    = 1,
0127     HasMul    = 1,
0128     HasDiv    = 1,
0129     HasSin    = EIGEN_FAST_MATH,
0130     HasCos    = EIGEN_FAST_MATH,
0131     HasNegate = 1,
0132     HasAbs    = 1,
0133     HasAbs2   = 0,
0134     HasMin    = 1,
0135     HasMax    = 1,
0136     HasConj   = 1,
0137     HasSetLinear = 0,
0138     HasLog    = 1,
0139     HasLog1p  = 1,
0140     HasExpm1  = 1,
0141     HasExp    = 1,
0142     HasSqrt   = 1,
0143     HasRsqrt  = 1,
0144     HasTanh   = EIGEN_FAST_MATH,
0145     HasErf    = EIGEN_FAST_MATH,
0146     HasBlend  = 0,
0147     HasRound  = 1,
0148     HasFloor  = 1,
0149     HasCeil   = 1,
0150     HasRint   = 1,
0151     HasBessel = 1,
0152     HasNdtri  = 1
0153   };
0154 };
0155 
0156 template <>
0157 struct packet_traits<bfloat16> : default_packet_traits {
0158   typedef Packet8bf type;
0159   // There is no half-size packet for current Packet8bf.
0160   // TODO: support as SSE path.
0161   typedef Packet8bf half;
0162   enum {
0163     Vectorizable = 1,
0164     AlignedOnScalar = 1,
0165     size = 8,
0166     HasHalfPacket = 0,
0167 
0168     HasCmp = 1,
0169     HasAdd = 1,
0170     HasSub = 1,
0171     HasMul = 1,
0172     HasDiv = 1,
0173     HasSin = EIGEN_FAST_MATH,
0174     HasCos = EIGEN_FAST_MATH,
0175     HasNegate = 1,
0176     HasAbs    = 1,
0177     HasAbs2   = 0,
0178     HasMin    = 1,
0179     HasMax    = 1,
0180     HasConj   = 1,
0181     HasSetLinear = 0,
0182     HasLog = 1,
0183     HasLog1p  = 1,
0184     HasExpm1  = 1,
0185     HasExp = 1,
0186     HasSqrt = 1,
0187     HasRsqrt = 1,
0188     HasTanh = EIGEN_FAST_MATH,
0189     HasErf = EIGEN_FAST_MATH,
0190     HasBlend = 0,
0191     HasRound = 1,
0192     HasFloor = 1,
0193     HasCeil = 1,
0194     HasRint = 1,
0195     HasBessel = 1,
0196     HasNdtri  = 1
0197   };
0198 };
0199 #endif
0200 
0201 template<> struct scalar_div_cost<float,true> { enum { value = 14 }; };
0202 template<> struct scalar_div_cost<double,true> { enum { value = 16 }; };
0203 
0204 /* Proper support for integers is only provided by AVX2. In the meantime, we'll
0205    use SSE instructions and packets to deal with integers.
0206 template<> struct packet_traits<int>    : default_packet_traits
0207 {
0208   typedef Packet8i type;
0209   enum {
0210     Vectorizable = 1,
0211     AlignedOnScalar = 1,
0212     size=8
0213   };
0214 };
0215 */
0216 
0217 template<> struct unpacket_traits<Packet8f> {
0218   typedef float     type;
0219   typedef Packet4f  half;
0220   typedef Packet8i  integer_packet;
0221   typedef uint8_t   mask_t;
0222   enum {size=8, alignment=Aligned32, vectorizable=true, masked_load_available=true, masked_store_available=true};
0223 };
0224 template<> struct unpacket_traits<Packet4d> {
0225   typedef double type;
0226   typedef Packet2d half;
0227   enum {size=4, alignment=Aligned32, vectorizable=true, masked_load_available=false, masked_store_available=false};
0228 };
0229 template<> struct unpacket_traits<Packet8i> { typedef int    type; typedef Packet4i half; enum {size=8, alignment=Aligned32, vectorizable=false, masked_load_available=false, masked_store_available=false}; };
0230 template<> struct unpacket_traits<Packet8bf> { typedef bfloat16 type; typedef Packet8bf half; enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; };
0231 
0232 // Helper function for bit packing snippet of low precision comparison.
0233 // It packs the flags from 16x16 to 8x16.
0234 EIGEN_STRONG_INLINE __m128i Pack16To8(Packet8f rf) {
0235   return _mm_packs_epi32(_mm256_extractf128_si256(_mm256_castps_si256(rf), 0),
0236                          _mm256_extractf128_si256(_mm256_castps_si256(rf), 1));
0237 }
0238 
0239 
0240 template<> EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(const float&  from) { return _mm256_set1_ps(from); }
0241 template<> EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(const double& from) { return _mm256_set1_pd(from); }
0242 template<> EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(const int&    from) { return _mm256_set1_epi32(from); }
0243 
0244 template<> EIGEN_STRONG_INLINE Packet8f pset1frombits<Packet8f>(unsigned int from) { return _mm256_castsi256_ps(pset1<Packet8i>(from)); }
0245 template<> EIGEN_STRONG_INLINE Packet4d pset1frombits<Packet4d>(uint64_t from) { return _mm256_castsi256_pd(_mm256_set1_epi64x(from)); }
0246 
0247 template<> EIGEN_STRONG_INLINE Packet8f pzero(const Packet8f& /*a*/) { return _mm256_setzero_ps(); }
0248 template<> EIGEN_STRONG_INLINE Packet4d pzero(const Packet4d& /*a*/) { return _mm256_setzero_pd(); }
0249 template<> EIGEN_STRONG_INLINE Packet8i pzero(const Packet8i& /*a*/) { return _mm256_setzero_si256(); }
0250 
0251 
0252 template<> EIGEN_STRONG_INLINE Packet8f peven_mask(const Packet8f& /*a*/) { return _mm256_castsi256_ps(_mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1)); }
0253 template<> EIGEN_STRONG_INLINE Packet8i peven_mask(const Packet8i& /*a*/) { return _mm256_set_epi32(0, -1, 0, -1, 0, -1, 0, -1); }
0254 template<> EIGEN_STRONG_INLINE Packet4d peven_mask(const Packet4d& /*a*/) { return _mm256_castsi256_pd(_mm256_set_epi32(0, 0, -1, -1, 0, 0, -1, -1)); }
0255 
0256 template<> EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(const float*  from) { return _mm256_broadcast_ss(from); }
0257 template<> EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(const double* from) { return _mm256_broadcast_sd(from); }
0258 
0259 template<> EIGEN_STRONG_INLINE Packet8f plset<Packet8f>(const float& a) { return _mm256_add_ps(_mm256_set1_ps(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); }
0260 template<> EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(const double& a) { return _mm256_add_pd(_mm256_set1_pd(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); }
0261 
0262 template<> EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_add_ps(a,b); }
0263 template<> EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_add_pd(a,b); }
0264 template<> EIGEN_STRONG_INLINE Packet8i padd<Packet8i>(const Packet8i& a, const Packet8i& b) {
0265 #ifdef EIGEN_VECTORIZE_AVX2
0266   return _mm256_add_epi32(a,b);
0267 #else
0268   __m128i lo = _mm_add_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
0269   __m128i hi = _mm_add_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
0270   return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
0271 #endif
0272 }
0273 
0274 template<> EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_sub_ps(a,b); }
0275 template<> EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_sub_pd(a,b); }
0276 template<> EIGEN_STRONG_INLINE Packet8i psub<Packet8i>(const Packet8i& a, const Packet8i& b) {
0277 #ifdef EIGEN_VECTORIZE_AVX2
0278   return _mm256_sub_epi32(a,b);
0279 #else
0280   __m128i lo = _mm_sub_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
0281   __m128i hi = _mm_sub_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
0282   return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
0283 #endif
0284 }
0285 
0286 template<> EIGEN_STRONG_INLINE Packet8f pnegate(const Packet8f& a)
0287 {
0288   return _mm256_sub_ps(_mm256_set1_ps(0.0),a);
0289 }
0290 template<> EIGEN_STRONG_INLINE Packet4d pnegate(const Packet4d& a)
0291 {
0292   return _mm256_sub_pd(_mm256_set1_pd(0.0),a);
0293 }
0294 
0295 template<> EIGEN_STRONG_INLINE Packet8f pconj(const Packet8f& a) { return a; }
0296 template<> EIGEN_STRONG_INLINE Packet4d pconj(const Packet4d& a) { return a; }
0297 template<> EIGEN_STRONG_INLINE Packet8i pconj(const Packet8i& a) { return a; }
0298 
0299 template<> EIGEN_STRONG_INLINE Packet8f pmul<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_mul_ps(a,b); }
0300 template<> EIGEN_STRONG_INLINE Packet4d pmul<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_mul_pd(a,b); }
0301 template<> EIGEN_STRONG_INLINE Packet8i pmul<Packet8i>(const Packet8i& a, const Packet8i& b) {
0302 #ifdef EIGEN_VECTORIZE_AVX2
0303   return _mm256_mullo_epi32(a,b);
0304 #else
0305   const __m128i lo = _mm_mullo_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
0306   const __m128i hi = _mm_mullo_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
0307   return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
0308 #endif
0309 }
0310 
0311 template<> EIGEN_STRONG_INLINE Packet8f pdiv<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_div_ps(a,b); }
0312 template<> EIGEN_STRONG_INLINE Packet4d pdiv<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_div_pd(a,b); }
0313 template<> EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(const Packet8i& /*a*/, const Packet8i& /*b*/)
0314 { eigen_assert(false && "packet integer division are not supported by AVX");
0315   return pset1<Packet8i>(0);
0316 }
0317 
0318 #ifdef EIGEN_VECTORIZE_FMA
0319 template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
0320 #if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
0321   // Clang stupidly generates a vfmadd213ps instruction plus some vmovaps on registers,
0322   //  and even register spilling with clang>=6.0 (bug 1637).
0323   // Gcc stupidly generates a vfmadd132ps instruction.
0324   // So let's enforce it to generate a vfmadd231ps instruction since the most common use
0325   //  case is to accumulate the result of the product.
0326   Packet8f res = c;
0327   __asm__("vfmadd231ps %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
0328   return res;
0329 #else
0330   return _mm256_fmadd_ps(a,b,c);
0331 #endif
0332 }
0333 template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
0334 #if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
0335   // see above
0336   Packet4d res = c;
0337   __asm__("vfmadd231pd %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
0338   return res;
0339 #else
0340   return _mm256_fmadd_pd(a,b,c);
0341 #endif
0342 }
0343 #endif
0344 
0345 template<> EIGEN_STRONG_INLINE Packet8f pcmp_le(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_LE_OQ); }
0346 template<> EIGEN_STRONG_INLINE Packet8f pcmp_lt(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_LT_OQ); }
0347 template<> EIGEN_STRONG_INLINE Packet8f pcmp_lt_or_nan(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a, b, _CMP_NGE_UQ); }
0348 template<> EIGEN_STRONG_INLINE Packet8f pcmp_eq(const Packet8f& a, const Packet8f& b) { return _mm256_cmp_ps(a,b,_CMP_EQ_OQ); }
0349 
0350 template<> EIGEN_STRONG_INLINE Packet4d pcmp_le(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_LE_OQ); }
0351 template<> EIGEN_STRONG_INLINE Packet4d pcmp_lt(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_LT_OQ); }
0352 template<> EIGEN_STRONG_INLINE Packet4d pcmp_lt_or_nan(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a, b, _CMP_NGE_UQ); }
0353 template<> EIGEN_STRONG_INLINE Packet4d pcmp_eq(const Packet4d& a, const Packet4d& b) { return _mm256_cmp_pd(a,b,_CMP_EQ_OQ); }
0354 
0355 
0356 template<> EIGEN_STRONG_INLINE Packet8i pcmp_eq(const Packet8i& a, const Packet8i& b) {
0357 #ifdef EIGEN_VECTORIZE_AVX2
0358   return _mm256_cmpeq_epi32(a,b);
0359 #else
0360   __m128i lo = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 0), _mm256_extractf128_si256(b, 0));
0361   __m128i hi = _mm_cmpeq_epi32(_mm256_extractf128_si256(a, 1), _mm256_extractf128_si256(b, 1));
0362   return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
0363 #endif
0364 }
0365 
0366 template<> EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(const Packet8f& a, const Packet8f& b) {
0367 #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
0368   // There appears to be a bug in GCC, by which the optimizer may flip
0369   // the argument order in calls to _mm_min_ps/_mm_max_ps, so we have to
0370   // resort to inline ASM here. This is supposed to be fixed in gcc6.3,
0371   // see also: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=72867
0372   Packet8f res;
0373   asm("vminps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
0374   return res;
0375 #else
0376   // Arguments are swapped to match NaN propagation behavior of std::min.
0377   return _mm256_min_ps(b,a);
0378 #endif
0379 }
0380 template<> EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(const Packet4d& a, const Packet4d& b) {
0381 #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
0382   // See pmin above
0383   Packet4d res;
0384   asm("vminpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
0385   return res;
0386 #else
0387   // Arguments are swapped to match NaN propagation behavior of std::min.
0388   return _mm256_min_pd(b,a);
0389 #endif
0390 }
0391 
0392 template<> EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(const Packet8f& a, const Packet8f& b) {
0393 #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
0394   // See pmin above
0395   Packet8f res;
0396   asm("vmaxps %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
0397   return res;
0398 #else
0399   // Arguments are swapped to match NaN propagation behavior of std::max.
0400   return _mm256_max_ps(b,a);
0401 #endif
0402 }
0403 template<> EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(const Packet4d& a, const Packet4d& b) {
0404 #if EIGEN_COMP_GNUC && EIGEN_COMP_GNUC < 63
0405   // See pmin above
0406   Packet4d res;
0407   asm("vmaxpd %[a], %[b], %[res]" : [res] "=x" (res) : [a] "x" (a), [b] "x" (b));
0408   return res;
0409 #else
0410   // Arguments are swapped to match NaN propagation behavior of std::max.
0411   return _mm256_max_pd(b,a);
0412 #endif
0413 }
0414 
0415 // Add specializations for min/max with prescribed NaN progation.
0416 template<>
0417 EIGEN_STRONG_INLINE Packet8f pmin<PropagateNumbers, Packet8f>(const Packet8f& a, const Packet8f& b) {
0418   return pminmax_propagate_numbers(a, b, pmin<Packet8f>);
0419 }
0420 template<>
0421 EIGEN_STRONG_INLINE Packet4d pmin<PropagateNumbers, Packet4d>(const Packet4d& a, const Packet4d& b) {
0422   return pminmax_propagate_numbers(a, b, pmin<Packet4d>);
0423 }
0424 template<>
0425 EIGEN_STRONG_INLINE Packet8f pmax<PropagateNumbers, Packet8f>(const Packet8f& a, const Packet8f& b) {
0426   return pminmax_propagate_numbers(a, b, pmax<Packet8f>);
0427 }
0428 template<>
0429 EIGEN_STRONG_INLINE Packet4d pmax<PropagateNumbers, Packet4d>(const Packet4d& a, const Packet4d& b) {
0430   return pminmax_propagate_numbers(a, b, pmax<Packet4d>);
0431 }
0432 template<>
0433 EIGEN_STRONG_INLINE Packet8f pmin<PropagateNaN, Packet8f>(const Packet8f& a, const Packet8f& b) {
0434   return pminmax_propagate_nan(a, b, pmin<Packet8f>);
0435 }
0436 template<>
0437 EIGEN_STRONG_INLINE Packet4d pmin<PropagateNaN, Packet4d>(const Packet4d& a, const Packet4d& b) {
0438   return pminmax_propagate_nan(a, b, pmin<Packet4d>);
0439 }
0440 template<>
0441 EIGEN_STRONG_INLINE Packet8f pmax<PropagateNaN, Packet8f>(const Packet8f& a, const Packet8f& b) {
0442   return pminmax_propagate_nan(a, b, pmax<Packet8f>);
0443 }
0444 template<>
0445 EIGEN_STRONG_INLINE Packet4d pmax<PropagateNaN, Packet4d>(const Packet4d& a, const Packet4d& b) {
0446   return pminmax_propagate_nan(a, b, pmax<Packet4d>);
0447 }
0448 
0449 template<> EIGEN_STRONG_INLINE Packet8f print<Packet8f>(const Packet8f& a) { return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
0450 template<> EIGEN_STRONG_INLINE Packet4d print<Packet4d>(const Packet4d& a) { return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
0451 
0452 template<> EIGEN_STRONG_INLINE Packet8f pceil<Packet8f>(const Packet8f& a) { return _mm256_ceil_ps(a); }
0453 template<> EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(const Packet4d& a) { return _mm256_ceil_pd(a); }
0454 
0455 template<> EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(const Packet8f& a) { return _mm256_floor_ps(a); }
0456 template<> EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(const Packet4d& a) { return _mm256_floor_pd(a); }
0457 
0458 
0459 template<> EIGEN_STRONG_INLINE Packet8i ptrue<Packet8i>(const Packet8i& a) {
0460 #ifdef EIGEN_VECTORIZE_AVX2
0461   // vpcmpeqd has lower latency than the more general vcmpps
0462   return _mm256_cmpeq_epi32(a,a);
0463 #else
0464   const __m256 b = _mm256_castsi256_ps(a);
0465   return _mm256_castps_si256(_mm256_cmp_ps(b,b,_CMP_TRUE_UQ));
0466 #endif
0467 }
0468 
0469 template<> EIGEN_STRONG_INLINE Packet8f ptrue<Packet8f>(const Packet8f& a) {
0470 #ifdef EIGEN_VECTORIZE_AVX2
0471   // vpcmpeqd has lower latency than the more general vcmpps
0472   const __m256i b = _mm256_castps_si256(a);
0473   return _mm256_castsi256_ps(_mm256_cmpeq_epi32(b,b));
0474 #else
0475   return _mm256_cmp_ps(a,a,_CMP_TRUE_UQ);
0476 #endif
0477 }
0478 
0479 template<> EIGEN_STRONG_INLINE Packet4d ptrue<Packet4d>(const Packet4d& a) {
0480 #ifdef EIGEN_VECTORIZE_AVX2
0481   // vpcmpeqq has lower latency than the more general vcmppd
0482   const __m256i b = _mm256_castpd_si256(a);
0483   return _mm256_castsi256_pd(_mm256_cmpeq_epi64(b,b));
0484 #else
0485   return _mm256_cmp_pd(a,a,_CMP_TRUE_UQ);
0486 #endif
0487 }
0488 
0489 template<> EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_and_ps(a,b); }
0490 template<> EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_and_pd(a,b); }
0491 template<> EIGEN_STRONG_INLINE Packet8i pand<Packet8i>(const Packet8i& a, const Packet8i& b) {
0492 #ifdef EIGEN_VECTORIZE_AVX2
0493   return _mm256_and_si256(a,b);
0494 #else
0495   return _mm256_castps_si256(_mm256_and_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
0496 #endif
0497 }
0498 
0499 template<> EIGEN_STRONG_INLINE Packet8f por<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_or_ps(a,b); }
0500 template<> EIGEN_STRONG_INLINE Packet4d por<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_or_pd(a,b); }
0501 template<> EIGEN_STRONG_INLINE Packet8i por<Packet8i>(const Packet8i& a, const Packet8i& b) {
0502 #ifdef EIGEN_VECTORIZE_AVX2
0503   return _mm256_or_si256(a,b);
0504 #else
0505   return _mm256_castps_si256(_mm256_or_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
0506 #endif
0507 }
0508 
0509 template<> EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_xor_ps(a,b); }
0510 template<> EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_xor_pd(a,b); }
0511 template<> EIGEN_STRONG_INLINE Packet8i pxor<Packet8i>(const Packet8i& a, const Packet8i& b) {
0512 #ifdef EIGEN_VECTORIZE_AVX2
0513   return _mm256_xor_si256(a,b);
0514 #else
0515   return _mm256_castps_si256(_mm256_xor_ps(_mm256_castsi256_ps(a),_mm256_castsi256_ps(b)));
0516 #endif
0517 }
0518 
0519 template<> EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_andnot_ps(b,a); }
0520 template<> EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_andnot_pd(b,a); }
0521 template<> EIGEN_STRONG_INLINE Packet8i pandnot<Packet8i>(const Packet8i& a, const Packet8i& b) {
0522 #ifdef EIGEN_VECTORIZE_AVX2
0523   return _mm256_andnot_si256(b,a);
0524 #else
0525   return _mm256_castps_si256(_mm256_andnot_ps(_mm256_castsi256_ps(b),_mm256_castsi256_ps(a)));
0526 #endif
0527 }
0528 
0529 template<> EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(const Packet8f& a)
0530 {
0531   const Packet8f mask = pset1frombits<Packet8f>(static_cast<numext::uint32_t>(0x80000000u));
0532   const Packet8f prev0dot5 = pset1frombits<Packet8f>(static_cast<numext::uint32_t>(0x3EFFFFFFu));
0533   return _mm256_round_ps(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
0534 }
0535 template<> EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(const Packet4d& a)
0536 {
0537   const Packet4d mask = pset1frombits<Packet4d>(static_cast<numext::uint64_t>(0x8000000000000000ull));
0538   const Packet4d prev0dot5 = pset1frombits<Packet4d>(static_cast<numext::uint64_t>(0x3FDFFFFFFFFFFFFFull));
0539   return _mm256_round_pd(padd(por(pand(a, mask), prev0dot5), a), _MM_FROUND_TO_ZERO);
0540 }
0541 
0542 template<> EIGEN_STRONG_INLINE Packet8f pselect<Packet8f>(const Packet8f& mask, const Packet8f& a, const Packet8f& b)
0543 { return _mm256_blendv_ps(b,a,mask); }
0544 template<> EIGEN_STRONG_INLINE Packet4d pselect<Packet4d>(const Packet4d& mask, const Packet4d& a, const Packet4d& b)
0545 { return _mm256_blendv_pd(b,a,mask); }
0546 
0547 template<int N> EIGEN_STRONG_INLINE Packet8i parithmetic_shift_right(Packet8i a) {
0548 #ifdef EIGEN_VECTORIZE_AVX2
0549   return _mm256_srai_epi32(a, N);
0550 #else
0551   __m128i lo = _mm_srai_epi32(_mm256_extractf128_si256(a, 0), N);
0552   __m128i hi = _mm_srai_epi32(_mm256_extractf128_si256(a, 1), N);
0553   return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
0554 #endif
0555 }
0556 
0557 template<int N> EIGEN_STRONG_INLINE Packet8i plogical_shift_right(Packet8i a) {
0558 #ifdef EIGEN_VECTORIZE_AVX2
0559   return _mm256_srli_epi32(a, N);
0560 #else
0561   __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(a, 0), N);
0562   __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(a, 1), N);
0563   return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
0564 #endif
0565 }
0566 
0567 template<int N> EIGEN_STRONG_INLINE Packet8i plogical_shift_left(Packet8i a) {
0568 #ifdef EIGEN_VECTORIZE_AVX2
0569   return _mm256_slli_epi32(a, N);
0570 #else
0571   __m128i lo = _mm_slli_epi32(_mm256_extractf128_si256(a, 0), N);
0572   __m128i hi = _mm_slli_epi32(_mm256_extractf128_si256(a, 1), N);
0573   return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), (hi), 1);
0574 #endif
0575 }
0576 
0577 template<> EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(const float*   from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from); }
0578 template<> EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(const double*  from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from); }
0579 template<> EIGEN_STRONG_INLINE Packet8i pload<Packet8i>(const int*     from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from)); }
0580 
0581 template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_ps(from); }
0582 template<> EIGEN_STRONG_INLINE Packet4d ploadu<Packet4d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from); }
0583 template<> EIGEN_STRONG_INLINE Packet8i ploadu<Packet8i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from)); }
0584 
0585 template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from, uint8_t umask) {
0586   Packet8i mask = _mm256_set1_epi8(static_cast<char>(umask));
0587   const Packet8i bit_mask = _mm256_set_epi32(0xffffff7f, 0xffffffbf, 0xffffffdf, 0xffffffef, 0xfffffff7, 0xfffffffb, 0xfffffffd, 0xfffffffe);
0588   mask = por<Packet8i>(mask, bit_mask);
0589   mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
0590   EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_maskload_ps(from, mask);
0591 }
0592 
0593 // Loads 4 floats from memory a returns the packet {a0, a0  a1, a1, a2, a2, a3, a3}
0594 template<> EIGEN_STRONG_INLINE Packet8f ploaddup<Packet8f>(const float* from)
0595 {
0596   // TODO try to find a way to avoid the need of a temporary register
0597 //   Packet8f tmp  = _mm256_castps128_ps256(_mm_loadu_ps(from));
0598 //   tmp = _mm256_insertf128_ps(tmp, _mm_movehl_ps(_mm256_castps256_ps128(tmp),_mm256_castps256_ps128(tmp)), 1);
0599 //   return _mm256_unpacklo_ps(tmp,tmp);
0600 
0601   // _mm256_insertf128_ps is very slow on Haswell, thus:
0602   Packet8f tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);
0603   // mimic an "inplace" permutation of the lower 128bits using a blend
0604   tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15);
0605   // then we can perform a consistent permutation on the global register to get everything in shape:
0606   return  _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2));
0607 }
0608 // Loads 2 doubles from memory a returns the packet {a0, a0  a1, a1}
0609 template<> EIGEN_STRONG_INLINE Packet4d ploaddup<Packet4d>(const double* from)
0610 {
0611   Packet4d tmp = _mm256_broadcast_pd((const __m128d*)(const void*)from);
0612   return  _mm256_permute_pd(tmp, 3<<2);
0613 }
0614 
0615 // Loads 2 floats from memory a returns the packet {a0, a0  a0, a0, a1, a1, a1, a1}
0616 template<> EIGEN_STRONG_INLINE Packet8f ploadquad<Packet8f>(const float* from)
0617 {
0618   Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
0619   return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1);
0620 }
0621 
0622 template<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); }
0623 template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); }
0624 template<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
0625 
0626 template<> EIGEN_STRONG_INLINE void pstoreu<float>(float*   to, const Packet8f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); }
0627 template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); }
0628 template<> EIGEN_STRONG_INLINE void pstoreu<int>(int*       to, const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
0629 
0630 template<> EIGEN_STRONG_INLINE void pstoreu<float>(float*   to, const Packet8f& from, uint8_t umask) {
0631   Packet8i mask = _mm256_set1_epi8(static_cast<char>(umask));
0632   const Packet8i bit_mask = _mm256_set_epi32(0xffffff7f, 0xffffffbf, 0xffffffdf, 0xffffffef, 0xfffffff7, 0xfffffffb, 0xfffffffd, 0xfffffffe);
0633   mask = por<Packet8i>(mask, bit_mask);
0634   mask = pcmp_eq<Packet8i>(mask, _mm256_set1_epi32(0xffffffff));
0635   EIGEN_DEBUG_UNALIGNED_STORE return _mm256_maskstore_ps(to, mask, from);
0636 }
0637 
0638 // NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available
0639 // NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4);
0640 template<> EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, Index stride)
0641 {
0642   return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
0643                        from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
0644 }
0645 template<> EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, Index stride)
0646 {
0647   return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
0648 }
0649 
0650 template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, Index stride)
0651 {
0652   __m128 low = _mm256_extractf128_ps(from, 0);
0653   to[stride*0] = _mm_cvtss_f32(low);
0654   to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
0655   to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
0656   to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));
0657 
0658   __m128 high = _mm256_extractf128_ps(from, 1);
0659   to[stride*4] = _mm_cvtss_f32(high);
0660   to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
0661   to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
0662   to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
0663 }
0664 template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, Index stride)
0665 {
0666   __m128d low = _mm256_extractf128_pd(from, 0);
0667   to[stride*0] = _mm_cvtsd_f64(low);
0668   to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
0669   __m128d high = _mm256_extractf128_pd(from, 1);
0670   to[stride*2] = _mm_cvtsd_f64(high);
0671   to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
0672 }
0673 
0674 template<> EIGEN_STRONG_INLINE void pstore1<Packet8f>(float* to, const float& a)
0675 {
0676   Packet8f pa = pset1<Packet8f>(a);
0677   pstore(to, pa);
0678 }
0679 template<> EIGEN_STRONG_INLINE void pstore1<Packet4d>(double* to, const double& a)
0680 {
0681   Packet4d pa = pset1<Packet4d>(a);
0682   pstore(to, pa);
0683 }
0684 template<> EIGEN_STRONG_INLINE void pstore1<Packet8i>(int* to, const int& a)
0685 {
0686   Packet8i pa = pset1<Packet8i>(a);
0687   pstore(to, pa);
0688 }
0689 
0690 #ifndef EIGEN_VECTORIZE_AVX512
0691 template<> EIGEN_STRONG_INLINE void prefetch<float>(const float*   addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
0692 template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
0693 template<> EIGEN_STRONG_INLINE void prefetch<int>(const int*       addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
0694 #endif
0695 
0696 template<> EIGEN_STRONG_INLINE float  pfirst<Packet8f>(const Packet8f& a) {
0697   return _mm_cvtss_f32(_mm256_castps256_ps128(a));
0698 }
0699 template<> EIGEN_STRONG_INLINE double pfirst<Packet4d>(const Packet4d& a) {
0700   return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
0701 }
0702 template<> EIGEN_STRONG_INLINE int    pfirst<Packet8i>(const Packet8i& a) {
0703   return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
0704 }
0705 
0706 
0707 template<> EIGEN_STRONG_INLINE Packet8f preverse(const Packet8f& a)
0708 {
0709   __m256 tmp = _mm256_shuffle_ps(a,a,0x1b);
0710   return _mm256_permute2f128_ps(tmp, tmp, 1);
0711 }
0712 template<> EIGEN_STRONG_INLINE Packet4d preverse(const Packet4d& a)
0713 {
0714    __m256d tmp = _mm256_shuffle_pd(a,a,5);
0715   return _mm256_permute2f128_pd(tmp, tmp, 1);
0716   #if 0
0717   // This version is unlikely to be faster as _mm256_shuffle_ps and _mm256_permute_pd
0718   // exhibit the same latency/throughput, but it is here for future reference/benchmarking...
0719   __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
0720     return _mm256_permute_pd(swap_halves,5);
0721   #endif
0722 }
0723 
0724 // pabs should be ok
0725 template<> EIGEN_STRONG_INLINE Packet8f pabs(const Packet8f& a)
0726 {
0727   const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
0728   return _mm256_and_ps(a,mask);
0729 }
0730 template<> EIGEN_STRONG_INLINE Packet4d pabs(const Packet4d& a)
0731 {
0732   const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
0733   return _mm256_and_pd(a,mask);
0734 }
0735 
0736 template<> EIGEN_STRONG_INLINE Packet8f pfrexp<Packet8f>(const Packet8f& a, Packet8f& exponent) {
0737   return pfrexp_generic(a,exponent);
0738 }
0739 
0740 // Extract exponent without existence of Packet4l.
0741 template<>
0742 EIGEN_STRONG_INLINE  
0743 Packet4d pfrexp_generic_get_biased_exponent(const Packet4d& a) {
0744   const Packet4d cst_exp_mask  = pset1frombits<Packet4d>(static_cast<uint64_t>(0x7ff0000000000000ull));
0745   __m256i a_expo = _mm256_castpd_si256(pand(a, cst_exp_mask));
0746 #ifdef EIGEN_VECTORIZE_AVX2
0747   a_expo = _mm256_srli_epi64(a_expo, 52);
0748   __m128i lo = _mm256_extractf128_si256(a_expo, 0);
0749   __m128i hi = _mm256_extractf128_si256(a_expo, 1);
0750 #else
0751   __m128i lo = _mm256_extractf128_si256(a_expo, 0);
0752   __m128i hi = _mm256_extractf128_si256(a_expo, 1);
0753   lo = _mm_srli_epi64(lo, 52);
0754   hi = _mm_srli_epi64(hi, 52);
0755 #endif
0756   Packet2d exponent_lo = _mm_cvtepi32_pd(vec4i_swizzle1(lo, 0, 2, 1, 3));
0757   Packet2d exponent_hi = _mm_cvtepi32_pd(vec4i_swizzle1(hi, 0, 2, 1, 3));
0758   Packet4d exponent = _mm256_insertf128_pd(_mm256_setzero_pd(), exponent_lo, 0);
0759   exponent = _mm256_insertf128_pd(exponent, exponent_hi, 1);
0760   return exponent;
0761 }
0762 
0763 
0764 template<> EIGEN_STRONG_INLINE Packet4d pfrexp<Packet4d>(const Packet4d& a, Packet4d& exponent) {
0765   return pfrexp_generic(a, exponent);
0766 }
0767 
0768 template<> EIGEN_STRONG_INLINE Packet8f pldexp<Packet8f>(const Packet8f& a, const Packet8f& exponent) {
0769   return pldexp_generic(a, exponent);
0770 }
0771 
0772 template<> EIGEN_STRONG_INLINE Packet4d pldexp<Packet4d>(const Packet4d& a, const Packet4d& exponent) {
0773   // Clamp exponent to [-2099, 2099]
0774   const Packet4d max_exponent = pset1<Packet4d>(2099.0);
0775   const Packet4i e = _mm256_cvtpd_epi32(pmin(pmax(exponent, pnegate(max_exponent)), max_exponent));
0776   
0777   // Split 2^e into four factors and multiply.
0778   const Packet4i bias = pset1<Packet4i>(1023);
0779   Packet4i b = parithmetic_shift_right<2>(e);  // floor(e/4)
0780   
0781   // 2^b
0782   Packet4i hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
0783   Packet4i lo = _mm_slli_epi64(hi, 52);
0784   hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
0785   Packet4d c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
0786   Packet4d out = pmul(pmul(pmul(a, c), c), c);  // a * 2^(3b)
0787   
0788   // 2^(e - 3b)
0789   b = psub(psub(psub(e, b), b), b);  // e - 3b
0790   hi = vec4i_swizzle1(padd(b, bias), 0, 2, 1, 3);
0791   lo = _mm_slli_epi64(hi, 52);
0792   hi = _mm_slli_epi64(_mm_srli_epi64(hi, 32), 52);
0793   c = _mm256_castsi256_pd(_mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1));
0794   out = pmul(out, c); // a * 2^e
0795   return out;
0796 }
0797 
0798 template<> EIGEN_STRONG_INLINE float predux<Packet8f>(const Packet8f& a)
0799 {
0800   return predux(Packet4f(_mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1))));
0801 }
0802 template<> EIGEN_STRONG_INLINE double predux<Packet4d>(const Packet4d& a)
0803 {
0804   return predux(Packet2d(_mm_add_pd(_mm256_castpd256_pd128(a),_mm256_extractf128_pd(a,1))));
0805 }
0806 
0807 template<> EIGEN_STRONG_INLINE Packet4f predux_half_dowto4<Packet8f>(const Packet8f& a)
0808 {
0809   return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
0810 }
0811 
0812 template<> EIGEN_STRONG_INLINE float predux_mul<Packet8f>(const Packet8f& a)
0813 {
0814   Packet8f tmp;
0815   tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1));
0816   tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
0817   return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
0818 }
0819 template<> EIGEN_STRONG_INLINE double predux_mul<Packet4d>(const Packet4d& a)
0820 {
0821   Packet4d tmp;
0822   tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1));
0823   return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1)));
0824 }
0825 
0826 template<> EIGEN_STRONG_INLINE float predux_min<Packet8f>(const Packet8f& a)
0827 {
0828   Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1));
0829   tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
0830   return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
0831 }
0832 template<> EIGEN_STRONG_INLINE double predux_min<Packet4d>(const Packet4d& a)
0833 {
0834   Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1));
0835   return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
0836 }
0837 
0838 template<> EIGEN_STRONG_INLINE float predux_max<Packet8f>(const Packet8f& a)
0839 {
0840   Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1));
0841   tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
0842   return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
0843 }
0844 
0845 template<> EIGEN_STRONG_INLINE double predux_max<Packet4d>(const Packet4d& a)
0846 {
0847   Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1));
0848   return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
0849 }
0850 
0851 // not needed yet
0852 // template<> EIGEN_STRONG_INLINE bool predux_all(const Packet8f& x)
0853 // {
0854 //   return _mm256_movemask_ps(x)==0xFF;
0855 // }
0856 
0857 template<> EIGEN_STRONG_INLINE bool predux_any(const Packet8f& x)
0858 {
0859   return _mm256_movemask_ps(x)!=0;
0860 }
0861 
0862 EIGEN_DEVICE_FUNC inline void
0863 ptranspose(PacketBlock<Packet8f,8>& kernel) {
0864   __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
0865   __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
0866   __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
0867   __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
0868   __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
0869   __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
0870   __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
0871   __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
0872   __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
0873   __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
0874   __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
0875   __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
0876   __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0));
0877   __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2));
0878   __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0));
0879   __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2));
0880   kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
0881   kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
0882   kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
0883   kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
0884   kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
0885   kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
0886   kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
0887   kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
0888 }
0889 
0890 EIGEN_DEVICE_FUNC inline void
0891 ptranspose(PacketBlock<Packet8f,4>& kernel) {
0892   __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
0893   __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
0894   __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
0895   __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
0896 
0897   __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
0898   __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
0899   __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
0900   __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
0901 
0902   kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
0903   kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
0904   kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
0905   kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
0906 }
0907 
0908 EIGEN_DEVICE_FUNC inline void
0909 ptranspose(PacketBlock<Packet4d,4>& kernel) {
0910   __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
0911   __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
0912   __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
0913   __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
0914 
0915   kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
0916   kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
0917   kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
0918   kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
0919 }
0920 
0921 template<> EIGEN_STRONG_INLINE Packet8f pblend(const Selector<8>& ifPacket, const Packet8f& thenPacket, const Packet8f& elsePacket) {
0922   const __m256 zero = _mm256_setzero_ps();
0923   const __m256 select = _mm256_set_ps(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
0924   __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ);
0925   return _mm256_blendv_ps(thenPacket, elsePacket, false_mask);
0926 }
0927 template<> EIGEN_STRONG_INLINE Packet4d pblend(const Selector<4>& ifPacket, const Packet4d& thenPacket, const Packet4d& elsePacket) {
0928   const __m256d zero = _mm256_setzero_pd();
0929   const __m256d select = _mm256_set_pd(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
0930   __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ);
0931   return _mm256_blendv_pd(thenPacket, elsePacket, false_mask);
0932 }
0933 
0934 // Packet math for Eigen::half
0935 
0936 template<> struct unpacket_traits<Packet8h> { typedef Eigen::half type; enum {size=8, alignment=Aligned16, vectorizable=true, masked_load_available=false, masked_store_available=false}; typedef Packet8h half; };
0937 
0938 template<> EIGEN_STRONG_INLINE Packet8h pset1<Packet8h>(const Eigen::half& from) {
0939   return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
0940 }
0941 
0942 template<> EIGEN_STRONG_INLINE Eigen::half pfirst<Packet8h>(const Packet8h& from) {
0943   return numext::bit_cast<Eigen::half>(static_cast<numext::uint16_t>(_mm_extract_epi16(from, 0)));
0944 }
0945 
0946 template<> EIGEN_STRONG_INLINE Packet8h pload<Packet8h>(const Eigen::half* from) {
0947   return _mm_load_si128(reinterpret_cast<const __m128i*>(from));
0948 }
0949 
0950 template<> EIGEN_STRONG_INLINE Packet8h ploadu<Packet8h>(const Eigen::half* from) {
0951   return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
0952 }
0953 
0954 template<> EIGEN_STRONG_INLINE void pstore<Eigen::half>(Eigen::half* to, const Packet8h& from) {
0955   _mm_store_si128(reinterpret_cast<__m128i*>(to), from);
0956 }
0957 
0958 template<> EIGEN_STRONG_INLINE void pstoreu<Eigen::half>(Eigen::half* to, const Packet8h& from) {
0959   _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from);
0960 }
0961 
0962 template<> EIGEN_STRONG_INLINE Packet8h
0963 ploaddup<Packet8h>(const Eigen::half*  from) {
0964   const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
0965   const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
0966   const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
0967   const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
0968   return _mm_set_epi16(d, d, c, c, b, b, a, a);
0969 }
0970 
0971 template<> EIGEN_STRONG_INLINE Packet8h
0972 ploadquad<Packet8h>(const Eigen::half* from) {
0973   const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
0974   const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
0975   return _mm_set_epi16(b, b, b, b, a, a, a, a);
0976 }
0977 
0978 template<> EIGEN_STRONG_INLINE Packet8h ptrue(const Packet8h& a) {
0979  return _mm_cmpeq_epi32(a, a);
0980 }
0981 
0982 template <>
0983 EIGEN_STRONG_INLINE Packet8h pabs(const Packet8h& a) {
0984   const __m128i sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
0985   return _mm_andnot_si128(sign_mask, a);
0986 }
0987 
0988 EIGEN_STRONG_INLINE Packet8f half2float(const Packet8h& a) {
0989 #ifdef EIGEN_HAS_FP16_C
0990   return _mm256_cvtph_ps(a);
0991 #else
0992   EIGEN_ALIGN32 Eigen::half aux[8];
0993   pstore(aux, a);
0994   float f0(aux[0]);
0995   float f1(aux[1]);
0996   float f2(aux[2]);
0997   float f3(aux[3]);
0998   float f4(aux[4]);
0999   float f5(aux[5]);
1000   float f6(aux[6]);
1001   float f7(aux[7]);
1002 
1003   return _mm256_set_ps(f7, f6, f5, f4, f3, f2, f1, f0);
1004 #endif
1005 }
1006 
1007 EIGEN_STRONG_INLINE Packet8h float2half(const Packet8f& a) {
1008 #ifdef EIGEN_HAS_FP16_C
1009   return _mm256_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
1010 #else
1011   EIGEN_ALIGN32 float aux[8];
1012   pstore(aux, a);
1013   const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[0]));
1014   const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[1]));
1015   const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[2]));
1016   const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[3]));
1017   const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[4]));
1018   const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[5]));
1019   const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[6]));
1020   const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(Eigen::half(aux[7]));
1021   return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
1022 #endif
1023 }
1024 
1025 template <>
1026 EIGEN_STRONG_INLINE Packet8h pmin<Packet8h>(const Packet8h& a,
1027                                             const Packet8h& b) {
1028   return float2half(pmin<Packet8f>(half2float(a), half2float(b)));
1029 }
1030 
1031 template <>
1032 EIGEN_STRONG_INLINE Packet8h pmax<Packet8h>(const Packet8h& a,
1033                                             const Packet8h& b) {
1034   return float2half(pmax<Packet8f>(half2float(a), half2float(b)));
1035 }
1036 
1037 template <>
1038 EIGEN_STRONG_INLINE Packet8h plset<Packet8h>(const half& a) {
1039   return float2half(plset<Packet8f>(static_cast<float>(a)));
1040 }
1041 
1042 template<> EIGEN_STRONG_INLINE Packet8h por(const Packet8h& a,const Packet8h& b) {
1043   // in some cases Packet4i is a wrapper around __m128i, so we either need to
1044   // cast to Packet4i to directly call the intrinsics as below:
1045   return _mm_or_si128(a,b);
1046 }
1047 template<> EIGEN_STRONG_INLINE Packet8h pxor(const Packet8h& a,const Packet8h& b) {
1048   return _mm_xor_si128(a,b);
1049 }
1050 template<> EIGEN_STRONG_INLINE Packet8h pand(const Packet8h& a,const Packet8h& b) {
1051   return _mm_and_si128(a,b);
1052 }
1053 template<> EIGEN_STRONG_INLINE Packet8h pandnot(const Packet8h& a,const Packet8h& b) {
1054   return _mm_andnot_si128(b,a);
1055 }
1056 
1057 template<> EIGEN_STRONG_INLINE Packet8h pselect(const Packet8h& mask, const Packet8h& a, const Packet8h& b) {
1058   return _mm_blendv_epi8(b, a, mask);
1059 }
1060 
1061 template<> EIGEN_STRONG_INLINE Packet8h pround<Packet8h>(const Packet8h& a) {
1062   return float2half(pround<Packet8f>(half2float(a)));
1063 }
1064 
1065 template<> EIGEN_STRONG_INLINE Packet8h print<Packet8h>(const Packet8h& a) {
1066   return float2half(print<Packet8f>(half2float(a)));
1067 }
1068 
1069 template<> EIGEN_STRONG_INLINE Packet8h pceil<Packet8h>(const Packet8h& a) {
1070   return float2half(pceil<Packet8f>(half2float(a)));
1071 }
1072 
1073 template<> EIGEN_STRONG_INLINE Packet8h pfloor<Packet8h>(const Packet8h& a) {
1074   return float2half(pfloor<Packet8f>(half2float(a)));
1075 }
1076 
1077 template<> EIGEN_STRONG_INLINE Packet8h pcmp_eq(const Packet8h& a,const Packet8h& b) {
1078   return Pack16To8(pcmp_eq(half2float(a), half2float(b)));
1079 }
1080 
1081 template<> EIGEN_STRONG_INLINE Packet8h pcmp_le(const Packet8h& a,const Packet8h& b) {
1082   return Pack16To8(pcmp_le(half2float(a), half2float(b)));
1083 }
1084 
1085 template<> EIGEN_STRONG_INLINE Packet8h pcmp_lt(const Packet8h& a,const Packet8h& b) {
1086   return Pack16To8(pcmp_lt(half2float(a), half2float(b)));
1087 }
1088 
1089 template<> EIGEN_STRONG_INLINE Packet8h pcmp_lt_or_nan(const Packet8h& a,const Packet8h& b) {
1090   return Pack16To8(pcmp_lt_or_nan(half2float(a), half2float(b)));
1091 }
1092 
1093 template<> EIGEN_STRONG_INLINE Packet8h pconj(const Packet8h& a) { return a; }
1094 
1095 template<> EIGEN_STRONG_INLINE Packet8h pnegate(const Packet8h& a) {
1096   Packet8h sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
1097   return _mm_xor_si128(a, sign_mask);
1098 }
1099 
1100 template<> EIGEN_STRONG_INLINE Packet8h padd<Packet8h>(const Packet8h& a, const Packet8h& b) {
1101   Packet8f af = half2float(a);
1102   Packet8f bf = half2float(b);
1103   Packet8f rf = padd(af, bf);
1104   return float2half(rf);
1105 }
1106 
1107 template<> EIGEN_STRONG_INLINE Packet8h psub<Packet8h>(const Packet8h& a, const Packet8h& b) {
1108   Packet8f af = half2float(a);
1109   Packet8f bf = half2float(b);
1110   Packet8f rf = psub(af, bf);
1111   return float2half(rf);
1112 }
1113 
1114 template<> EIGEN_STRONG_INLINE Packet8h pmul<Packet8h>(const Packet8h& a, const Packet8h& b) {
1115   Packet8f af = half2float(a);
1116   Packet8f bf = half2float(b);
1117   Packet8f rf = pmul(af, bf);
1118   return float2half(rf);
1119 }
1120 
1121 template<> EIGEN_STRONG_INLINE Packet8h pdiv<Packet8h>(const Packet8h& a, const Packet8h& b) {
1122   Packet8f af = half2float(a);
1123   Packet8f bf = half2float(b);
1124   Packet8f rf = pdiv(af, bf);
1125   return float2half(rf);
1126 }
1127 
1128 template<> EIGEN_STRONG_INLINE Packet8h pgather<Eigen::half, Packet8h>(const Eigen::half* from, Index stride)
1129 {
1130   const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0*stride]);
1131   const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1*stride]);
1132   const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2*stride]);
1133   const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3*stride]);
1134   const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4*stride]);
1135   const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5*stride]);
1136   const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6*stride]);
1137   const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7*stride]);
1138   return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
1139 }
1140 
1141 template<> EIGEN_STRONG_INLINE void pscatter<Eigen::half, Packet8h>(Eigen::half* to, const Packet8h& from, Index stride)
1142 {
1143   EIGEN_ALIGN32 Eigen::half aux[8];
1144   pstore(aux, from);
1145   to[stride*0] = aux[0];
1146   to[stride*1] = aux[1];
1147   to[stride*2] = aux[2];
1148   to[stride*3] = aux[3];
1149   to[stride*4] = aux[4];
1150   to[stride*5] = aux[5];
1151   to[stride*6] = aux[6];
1152   to[stride*7] = aux[7];
1153 }
1154 
1155 template<> EIGEN_STRONG_INLINE Eigen::half predux<Packet8h>(const Packet8h& a) {
1156   Packet8f af = half2float(a);
1157   float reduced = predux<Packet8f>(af);
1158   return Eigen::half(reduced);
1159 }
1160 
1161 template<> EIGEN_STRONG_INLINE Eigen::half predux_max<Packet8h>(const Packet8h& a) {
1162   Packet8f af = half2float(a);
1163   float reduced = predux_max<Packet8f>(af);
1164   return Eigen::half(reduced);
1165 }
1166 
1167 template<> EIGEN_STRONG_INLINE Eigen::half predux_min<Packet8h>(const Packet8h& a) {
1168   Packet8f af = half2float(a);
1169   float reduced = predux_min<Packet8f>(af);
1170   return Eigen::half(reduced);
1171 }
1172 
1173 template<> EIGEN_STRONG_INLINE Eigen::half predux_mul<Packet8h>(const Packet8h& a) {
1174   Packet8f af = half2float(a);
1175   float reduced = predux_mul<Packet8f>(af);
1176   return Eigen::half(reduced);
1177 }
1178 
1179 template<> EIGEN_STRONG_INLINE Packet8h preverse(const Packet8h& a)
1180 {
1181   __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
1182   return _mm_shuffle_epi8(a,m);
1183 }
1184 
1185 EIGEN_STRONG_INLINE void
1186 ptranspose(PacketBlock<Packet8h,8>& kernel) {
1187   __m128i a = kernel.packet[0];
1188   __m128i b = kernel.packet[1];
1189   __m128i c = kernel.packet[2];
1190   __m128i d = kernel.packet[3];
1191   __m128i e = kernel.packet[4];
1192   __m128i f = kernel.packet[5];
1193   __m128i g = kernel.packet[6];
1194   __m128i h = kernel.packet[7];
1195 
1196   __m128i a03b03 = _mm_unpacklo_epi16(a, b);
1197   __m128i c03d03 = _mm_unpacklo_epi16(c, d);
1198   __m128i e03f03 = _mm_unpacklo_epi16(e, f);
1199   __m128i g03h03 = _mm_unpacklo_epi16(g, h);
1200   __m128i a47b47 = _mm_unpackhi_epi16(a, b);
1201   __m128i c47d47 = _mm_unpackhi_epi16(c, d);
1202   __m128i e47f47 = _mm_unpackhi_epi16(e, f);
1203   __m128i g47h47 = _mm_unpackhi_epi16(g, h);
1204 
1205   __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
1206   __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
1207   __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
1208   __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
1209   __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
1210   __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
1211   __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
1212   __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
1213 
1214   __m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
1215   __m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
1216   __m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
1217   __m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
1218   __m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
1219   __m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
1220   __m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
1221   __m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
1222 
1223   kernel.packet[0] = a0b0c0d0e0f0g0h0;
1224   kernel.packet[1] = a1b1c1d1e1f1g1h1;
1225   kernel.packet[2] = a2b2c2d2e2f2g2h2;
1226   kernel.packet[3] = a3b3c3d3e3f3g3h3;
1227   kernel.packet[4] = a4b4c4d4e4f4g4h4;
1228   kernel.packet[5] = a5b5c5d5e5f5g5h5;
1229   kernel.packet[6] = a6b6c6d6e6f6g6h6;
1230   kernel.packet[7] = a7b7c7d7e7f7g7h7;
1231 }
1232 
1233 EIGEN_STRONG_INLINE void
1234 ptranspose(PacketBlock<Packet8h,4>& kernel) {
1235   EIGEN_ALIGN32 Eigen::half in[4][8];
1236   pstore<Eigen::half>(in[0], kernel.packet[0]);
1237   pstore<Eigen::half>(in[1], kernel.packet[1]);
1238   pstore<Eigen::half>(in[2], kernel.packet[2]);
1239   pstore<Eigen::half>(in[3], kernel.packet[3]);
1240 
1241   EIGEN_ALIGN32 Eigen::half out[4][8];
1242 
1243   for (int i = 0; i < 4; ++i) {
1244     for (int j = 0; j < 4; ++j) {
1245       out[i][j] = in[j][2*i];
1246     }
1247     for (int j = 0; j < 4; ++j) {
1248       out[i][j+4] = in[j][2*i+1];
1249     }
1250   }
1251 
1252   kernel.packet[0] = pload<Packet8h>(out[0]);
1253   kernel.packet[1] = pload<Packet8h>(out[1]);
1254   kernel.packet[2] = pload<Packet8h>(out[2]);
1255   kernel.packet[3] = pload<Packet8h>(out[3]);
1256 }
1257 
1258 // BFloat16 implementation.
1259 
1260 EIGEN_STRONG_INLINE Packet8f Bf16ToF32(const Packet8bf& a) {
1261 #ifdef EIGEN_VECTORIZE_AVX2
1262   __m256i extend = _mm256_cvtepu16_epi32(a);
1263   return _mm256_castsi256_ps(_mm256_slli_epi32(extend, 16));
1264 #else
1265   __m128i lo = _mm_cvtepu16_epi32(a);
1266   __m128i hi = _mm_cvtepu16_epi32(_mm_srli_si128(a, 8));
1267   __m128i lo_shift = _mm_slli_epi32(lo, 16);
1268   __m128i hi_shift = _mm_slli_epi32(hi, 16);
1269   return _mm256_castsi256_ps(_mm256_insertf128_si256(_mm256_castsi128_si256(lo_shift), hi_shift, 1));
1270 #endif
1271 }
1272 
1273 // Convert float to bfloat16 according to round-to-nearest-even/denormals algorithm.
1274 EIGEN_STRONG_INLINE Packet8bf F32ToBf16(const Packet8f& a) {
1275   Packet8bf r;
1276 
1277   __m256i input = _mm256_castps_si256(a);
1278 
1279 #ifdef EIGEN_VECTORIZE_AVX2
1280   // uint32_t lsb = (input >> 16);
1281   __m256i t = _mm256_srli_epi32(input, 16);
1282   // uint32_t lsb = lsb & 1;
1283   t = _mm256_and_si256(t, _mm256_set1_epi32(1));
1284   // uint32_t rounding_bias = 0x7fff + lsb;
1285   t = _mm256_add_epi32(t, _mm256_set1_epi32(0x7fff));
1286   // input += rounding_bias;
1287   t = _mm256_add_epi32(t, input);
1288   // input = input >> 16;
1289   t = _mm256_srli_epi32(t, 16);
1290   // Check NaN before converting back to bf16
1291   __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
1292   __m256i nan = _mm256_set1_epi32(0x7fc0);
1293   t = _mm256_blendv_epi8(nan, t, _mm256_castps_si256(mask));
1294   // output = numext::bit_cast<uint16_t>(input);
1295   return _mm_packus_epi32(_mm256_extractf128_si256(t, 0),
1296                          _mm256_extractf128_si256(t, 1));
1297 #else
1298   // uint32_t lsb = (input >> 16);
1299   __m128i lo = _mm_srli_epi32(_mm256_extractf128_si256(input, 0), 16);
1300   __m128i hi = _mm_srli_epi32(_mm256_extractf128_si256(input, 1), 16);
1301   // uint32_t lsb = lsb & 1;
1302   lo = _mm_and_si128(lo, _mm_set1_epi32(1));
1303   hi = _mm_and_si128(hi, _mm_set1_epi32(1));
1304   // uint32_t rounding_bias = 0x7fff + lsb;
1305   lo = _mm_add_epi32(lo, _mm_set1_epi32(0x7fff));
1306   hi = _mm_add_epi32(hi, _mm_set1_epi32(0x7fff));
1307   // input += rounding_bias;
1308   lo = _mm_add_epi32(lo, _mm256_extractf128_si256(input, 0));
1309   hi = _mm_add_epi32(hi, _mm256_extractf128_si256(input, 1));
1310   // input = input >> 16;
1311   lo = _mm_srli_epi32(lo, 16);
1312   hi = _mm_srli_epi32(hi, 16);
1313   // Check NaN before converting back to bf16
1314   __m256 mask = _mm256_cmp_ps(a, a, _CMP_ORD_Q);
1315   __m128i nan = _mm_set1_epi32(0x7fc0);
1316   lo = _mm_blendv_epi8(nan, lo, _mm_castps_si128(_mm256_castps256_ps128(mask)));
1317   hi = _mm_blendv_epi8(nan, hi, _mm_castps_si128(_mm256_extractf128_ps(mask, 1)));
1318   // output = numext::bit_cast<uint16_t>(input);
1319   return _mm_packus_epi32(lo, hi);
1320 #endif
1321 }
1322 
1323 template<> EIGEN_STRONG_INLINE Packet8bf pset1<Packet8bf>(const bfloat16& from) {
1324   return _mm_set1_epi16(numext::bit_cast<numext::uint16_t>(from));
1325 }
1326 
1327 template<> EIGEN_STRONG_INLINE bfloat16 pfirst<Packet8bf>(const Packet8bf& from) {
1328   return numext::bit_cast<bfloat16>(static_cast<numext::uint16_t>(_mm_extract_epi16(from, 0)));
1329 }
1330 
1331 template<> EIGEN_STRONG_INLINE Packet8bf pload<Packet8bf>(const bfloat16* from) {
1332   return _mm_load_si128(reinterpret_cast<const __m128i*>(from));
1333 }
1334 
1335 template<> EIGEN_STRONG_INLINE Packet8bf ploadu<Packet8bf>(const bfloat16* from) {
1336   return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
1337 }
1338 
1339 template<> EIGEN_STRONG_INLINE void pstore<bfloat16>(bfloat16* to, const Packet8bf& from) {
1340   _mm_store_si128(reinterpret_cast<__m128i*>(to), from);
1341 }
1342 
1343 template<> EIGEN_STRONG_INLINE void pstoreu<bfloat16>(bfloat16* to, const Packet8bf& from) {
1344   _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from);
1345 }
1346 
1347 template<> EIGEN_STRONG_INLINE Packet8bf
1348 ploaddup<Packet8bf>(const bfloat16* from) {
1349   const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
1350   const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
1351   const numext::uint16_t c = numext::bit_cast<numext::uint16_t>(from[2]);
1352   const numext::uint16_t d = numext::bit_cast<numext::uint16_t>(from[3]);
1353   return _mm_set_epi16(d, d, c, c, b, b, a, a);
1354 }
1355 
1356 template<> EIGEN_STRONG_INLINE Packet8bf
1357 ploadquad<Packet8bf>(const bfloat16* from) {
1358   const numext::uint16_t a = numext::bit_cast<numext::uint16_t>(from[0]);
1359   const numext::uint16_t b = numext::bit_cast<numext::uint16_t>(from[1]);
1360   return _mm_set_epi16(b, b, b, b, a, a, a, a);
1361 }
1362 
1363 template<> EIGEN_STRONG_INLINE Packet8bf ptrue(const Packet8bf& a) {
1364  return _mm_cmpeq_epi32(a, a);
1365 }
1366 
1367 template <>
1368 EIGEN_STRONG_INLINE Packet8bf pabs(const Packet8bf& a) {
1369   const __m128i sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
1370   return _mm_andnot_si128(sign_mask, a);
1371 }
1372 
1373 template <>
1374 EIGEN_STRONG_INLINE Packet8bf pmin<Packet8bf>(const Packet8bf& a,
1375                                                 const Packet8bf& b) {
1376   return F32ToBf16(pmin<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1377 }
1378 
1379 template <>
1380 EIGEN_STRONG_INLINE Packet8bf pmax<Packet8bf>(const Packet8bf& a,
1381                                                 const Packet8bf& b) {
1382   return F32ToBf16(pmax<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1383 }
1384 
1385 template <>
1386 EIGEN_STRONG_INLINE Packet8bf plset<Packet8bf>(const bfloat16& a) {
1387   return F32ToBf16(plset<Packet8f>(static_cast<float>(a)));
1388 }
1389 
1390 template<> EIGEN_STRONG_INLINE Packet8bf por(const Packet8bf& a,const Packet8bf& b) {
1391   return _mm_or_si128(a,b);
1392 }
1393 template<> EIGEN_STRONG_INLINE Packet8bf pxor(const Packet8bf& a,const Packet8bf& b) {
1394   return _mm_xor_si128(a,b);
1395 }
1396 template<> EIGEN_STRONG_INLINE Packet8bf pand(const Packet8bf& a,const Packet8bf& b) {
1397   return _mm_and_si128(a,b);
1398 }
1399 template<> EIGEN_STRONG_INLINE Packet8bf pandnot(const Packet8bf& a,const Packet8bf& b) {
1400   return _mm_andnot_si128(b,a);
1401 }
1402 
1403 template<> EIGEN_STRONG_INLINE Packet8bf pselect(const Packet8bf& mask, const Packet8bf& a, const Packet8bf& b) {
1404   return _mm_blendv_epi8(b, a, mask);
1405 }
1406 
1407 template<> EIGEN_STRONG_INLINE Packet8bf pround<Packet8bf>(const Packet8bf& a)
1408 {
1409   return F32ToBf16(pround<Packet8f>(Bf16ToF32(a)));
1410 }
1411 
1412 template<> EIGEN_STRONG_INLINE Packet8bf print<Packet8bf>(const Packet8bf& a) {
1413   return F32ToBf16(print<Packet8f>(Bf16ToF32(a)));
1414 }
1415 
1416 template<> EIGEN_STRONG_INLINE Packet8bf pceil<Packet8bf>(const Packet8bf& a) {
1417   return F32ToBf16(pceil<Packet8f>(Bf16ToF32(a)));
1418 }
1419 
1420 template<> EIGEN_STRONG_INLINE Packet8bf pfloor<Packet8bf>(const Packet8bf& a) {
1421   return F32ToBf16(pfloor<Packet8f>(Bf16ToF32(a)));
1422 }
1423 
1424 template<> EIGEN_STRONG_INLINE Packet8bf pcmp_eq(const Packet8bf& a,const Packet8bf& b) {
1425   return Pack16To8(pcmp_eq(Bf16ToF32(a), Bf16ToF32(b)));
1426 }
1427 
1428 template<> EIGEN_STRONG_INLINE Packet8bf pcmp_le(const Packet8bf& a,const Packet8bf& b) {
1429   return Pack16To8(pcmp_le(Bf16ToF32(a), Bf16ToF32(b)));
1430 }
1431 
1432 template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt(const Packet8bf& a,const Packet8bf& b) {
1433   return Pack16To8(pcmp_lt(Bf16ToF32(a), Bf16ToF32(b)));
1434 }
1435 
1436 template<> EIGEN_STRONG_INLINE Packet8bf pcmp_lt_or_nan(const Packet8bf& a,const Packet8bf& b) {
1437   return Pack16To8(pcmp_lt_or_nan(Bf16ToF32(a), Bf16ToF32(b)));
1438 }
1439 
1440 template<> EIGEN_STRONG_INLINE Packet8bf pconj(const Packet8bf& a) { return a; }
1441 
1442 template<> EIGEN_STRONG_INLINE Packet8bf pnegate(const Packet8bf& a) {
1443   Packet8bf sign_mask = _mm_set1_epi16(static_cast<numext::uint16_t>(0x8000));
1444   return _mm_xor_si128(a, sign_mask);
1445 }
1446 
1447 template<> EIGEN_STRONG_INLINE Packet8bf padd<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
1448   return F32ToBf16(padd<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1449 }
1450 
1451 template<> EIGEN_STRONG_INLINE Packet8bf psub<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
1452   return F32ToBf16(psub<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1453 }
1454 
1455 template<> EIGEN_STRONG_INLINE Packet8bf pmul<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
1456   return F32ToBf16(pmul<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1457 }
1458 
1459 template<> EIGEN_STRONG_INLINE Packet8bf pdiv<Packet8bf>(const Packet8bf& a, const Packet8bf& b) {
1460   return F32ToBf16(pdiv<Packet8f>(Bf16ToF32(a), Bf16ToF32(b)));
1461 }
1462 
1463 
1464 template<> EIGEN_STRONG_INLINE Packet8bf pgather<bfloat16, Packet8bf>(const bfloat16* from, Index stride)
1465 {
1466   const numext::uint16_t s0 = numext::bit_cast<numext::uint16_t>(from[0*stride]);
1467   const numext::uint16_t s1 = numext::bit_cast<numext::uint16_t>(from[1*stride]);
1468   const numext::uint16_t s2 = numext::bit_cast<numext::uint16_t>(from[2*stride]);
1469   const numext::uint16_t s3 = numext::bit_cast<numext::uint16_t>(from[3*stride]);
1470   const numext::uint16_t s4 = numext::bit_cast<numext::uint16_t>(from[4*stride]);
1471   const numext::uint16_t s5 = numext::bit_cast<numext::uint16_t>(from[5*stride]);
1472   const numext::uint16_t s6 = numext::bit_cast<numext::uint16_t>(from[6*stride]);
1473   const numext::uint16_t s7 = numext::bit_cast<numext::uint16_t>(from[7*stride]);
1474   return _mm_set_epi16(s7, s6, s5, s4, s3, s2, s1, s0);
1475 }
1476 
1477 template<> EIGEN_STRONG_INLINE void pscatter<bfloat16, Packet8bf>(bfloat16* to, const Packet8bf& from, Index stride)
1478 {
1479   EIGEN_ALIGN32 bfloat16 aux[8];
1480   pstore(aux, from);
1481   to[stride*0] = aux[0];
1482   to[stride*1] = aux[1];
1483   to[stride*2] = aux[2];
1484   to[stride*3] = aux[3];
1485   to[stride*4] = aux[4];
1486   to[stride*5] = aux[5];
1487   to[stride*6] = aux[6];
1488   to[stride*7] = aux[7];
1489 }
1490 
1491 template<> EIGEN_STRONG_INLINE bfloat16 predux<Packet8bf>(const Packet8bf& a) {
1492   return static_cast<bfloat16>(predux<Packet8f>(Bf16ToF32(a)));
1493 }
1494 
1495 template<> EIGEN_STRONG_INLINE bfloat16 predux_max<Packet8bf>(const Packet8bf& a) {
1496   return static_cast<bfloat16>(predux_max<Packet8f>(Bf16ToF32(a)));
1497 }
1498 
1499 template<> EIGEN_STRONG_INLINE bfloat16 predux_min<Packet8bf>(const Packet8bf& a) {
1500   return static_cast<bfloat16>(predux_min<Packet8f>(Bf16ToF32(a)));
1501 }
1502 
1503 template<> EIGEN_STRONG_INLINE bfloat16 predux_mul<Packet8bf>(const Packet8bf& a) {
1504   return static_cast<bfloat16>(predux_mul<Packet8f>(Bf16ToF32(a)));
1505 }
1506 
1507 template<> EIGEN_STRONG_INLINE Packet8bf preverse(const Packet8bf& a)
1508 {
1509   __m128i m = _mm_setr_epi8(14,15,12,13,10,11,8,9,6,7,4,5,2,3,0,1);
1510   return _mm_shuffle_epi8(a,m);
1511 }
1512 
1513 EIGEN_STRONG_INLINE void
1514 ptranspose(PacketBlock<Packet8bf,8>& kernel) {
1515   __m128i a = kernel.packet[0];
1516   __m128i b = kernel.packet[1];
1517   __m128i c = kernel.packet[2];
1518   __m128i d = kernel.packet[3];
1519   __m128i e = kernel.packet[4];
1520   __m128i f = kernel.packet[5];
1521   __m128i g = kernel.packet[6];
1522   __m128i h = kernel.packet[7];
1523 
1524   __m128i a03b03 = _mm_unpacklo_epi16(a, b);
1525   __m128i c03d03 = _mm_unpacklo_epi16(c, d);
1526   __m128i e03f03 = _mm_unpacklo_epi16(e, f);
1527   __m128i g03h03 = _mm_unpacklo_epi16(g, h);
1528   __m128i a47b47 = _mm_unpackhi_epi16(a, b);
1529   __m128i c47d47 = _mm_unpackhi_epi16(c, d);
1530   __m128i e47f47 = _mm_unpackhi_epi16(e, f);
1531   __m128i g47h47 = _mm_unpackhi_epi16(g, h);
1532 
1533   __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
1534   __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
1535   __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
1536   __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
1537   __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
1538   __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
1539   __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
1540   __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
1541 
1542   kernel.packet[0] = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
1543   kernel.packet[1] = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
1544   kernel.packet[2] = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
1545   kernel.packet[3] = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
1546   kernel.packet[4] = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
1547   kernel.packet[5] = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
1548   kernel.packet[6] = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
1549   kernel.packet[7] = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
1550 }
1551 
1552 EIGEN_STRONG_INLINE void
1553 ptranspose(PacketBlock<Packet8bf,4>& kernel) {
1554   __m128i a = kernel.packet[0];
1555   __m128i b = kernel.packet[1];
1556   __m128i c = kernel.packet[2];
1557   __m128i d = kernel.packet[3];
1558 
1559   __m128i ab_03 = _mm_unpacklo_epi16(a, b);
1560   __m128i cd_03 = _mm_unpacklo_epi16(c, d);
1561   __m128i ab_47 = _mm_unpackhi_epi16(a, b);
1562   __m128i cd_47 = _mm_unpackhi_epi16(c, d);
1563 
1564   kernel.packet[0] = _mm_unpacklo_epi32(ab_03, cd_03);
1565   kernel.packet[1] = _mm_unpackhi_epi32(ab_03, cd_03);
1566   kernel.packet[2] = _mm_unpacklo_epi32(ab_47, cd_47);
1567   kernel.packet[3] = _mm_unpackhi_epi32(ab_47, cd_47);
1568 }
1569 
1570 } // end namespace internal
1571 
1572 } // end namespace Eigen
1573 
1574 #endif // EIGEN_PACKET_MATH_AVX_H