Back to home page

EIC code displayed by LXR

 
 

    


Warning, file /include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h was not indexed or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).

0001 // This file is part of Eigen, a lightweight C++ template library
0002 // for linear algebra.
0003 //
0004 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
0005 //
0006 // This Source Code Form is subject to the terms of the Mozilla
0007 // Public License v. 2.0. If a copy of the MPL was not distributed
0008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
0009 
0010 #ifndef EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
0011 #define EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
0012 
0013 
0014 namespace Eigen {
0015 
0016 /** \internal
0017   *
0018   * \class TensorIntDiv
0019   * \ingroup CXX11_Tensor_Module
0020   *
0021   * \brief Fast integer division by a constant.
0022   *
0023   * See the paper from Granlund and Montgomery for explanation.
0024   *   (at https://doi.org/10.1145/773473.178249)
0025   *
0026   * \sa Tensor
0027   */
0028 
0029 namespace internal {
0030 
0031 namespace {
0032 
0033   // Note: result is undefined if val == 0
0034   template <typename T>
0035   EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
0036   typename internal::enable_if<sizeof(T)==4,int>::type count_leading_zeros(const T val)
0037   {
0038 #ifdef EIGEN_GPU_COMPILE_PHASE
0039     return __clz(val);
0040 #elif defined(SYCL_DEVICE_ONLY)
0041     return cl::sycl::clz(val);
0042 #elif EIGEN_COMP_MSVC
0043     unsigned long index;
0044     _BitScanReverse(&index, val);
0045     return 31 - index;
0046 #else
0047     EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
0048     return __builtin_clz(static_cast<uint32_t>(val));
0049 #endif
0050   }
0051 
0052   template <typename T>
0053   EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
0054   typename internal::enable_if<sizeof(T)==8,int>::type count_leading_zeros(const T val)
0055   {
0056 #ifdef EIGEN_GPU_COMPILE_PHASE
0057     return __clzll(val);
0058 #elif defined(SYCL_DEVICE_ONLY)
0059     return static_cast<int>(cl::sycl::clz(val));
0060 #elif EIGEN_COMP_MSVC && EIGEN_ARCH_x86_64
0061     unsigned long index;
0062     _BitScanReverse64(&index, val);
0063     return 63 - index;
0064 #elif EIGEN_COMP_MSVC
0065     // MSVC's _BitScanReverse64 is not available for 32bits builds.
0066     unsigned int lo = (unsigned int)(val&0xffffffff);
0067     unsigned int hi = (unsigned int)((val>>32)&0xffffffff);
0068     int n;
0069     if(hi==0)
0070       n = 32 + count_leading_zeros<unsigned int>(lo);
0071     else
0072       n = count_leading_zeros<unsigned int>(hi);
0073     return n;
0074 #else
0075     EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
0076     return __builtin_clzll(static_cast<uint64_t>(val));
0077 #endif
0078   }
0079 
0080   template <typename T>
0081   struct UnsignedTraits {
0082     typedef typename conditional<sizeof(T) == 8, uint64_t, uint32_t>::type type;
0083   };
0084 
0085   template <typename T>
0086   struct DividerTraits {
0087     typedef typename UnsignedTraits<T>::type type;
0088     static const int N = sizeof(T) * 8;
0089   };
0090 
0091   template <typename T>
0092   EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t muluh(const uint32_t a, const T b) {
0093 #if defined(EIGEN_GPU_COMPILE_PHASE)
0094     return __umulhi(a, b);
0095 #elif defined(SYCL_DEVICE_ONLY)
0096     return cl::sycl::mul_hi(a, static_cast<uint32_t>(b));
0097 #else
0098     return (static_cast<uint64_t>(a) * b) >> 32;
0099 #endif
0100   }
0101 
0102   template <typename T>
0103   EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t muluh(const uint64_t a, const T b) {
0104 #if defined(EIGEN_GPU_COMPILE_PHASE)
0105     return __umul64hi(a, b);
0106 #elif defined(SYCL_DEVICE_ONLY)
0107     return cl::sycl::mul_hi(a, static_cast<uint64_t>(b));
0108 #elif EIGEN_HAS_BUILTIN_INT128
0109     __uint128_t v = static_cast<__uint128_t>(a) * static_cast<__uint128_t>(b);
0110     return static_cast<uint64_t>(v >> 64);
0111 #else
0112     return (TensorUInt128<static_val<0>, uint64_t>(a) * TensorUInt128<static_val<0>, uint64_t>(b)).upper();
0113 #endif
0114   }
0115 
0116   template <int N, typename T>
0117   struct DividerHelper {
0118     static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t computeMultiplier(const int log_div, const T divider) {
0119       EIGEN_STATIC_ASSERT(N == 32, YOU_MADE_A_PROGRAMMING_MISTAKE);
0120       return static_cast<uint32_t>((static_cast<uint64_t>(1) << (N+log_div)) / divider - (static_cast<uint64_t>(1) << N) + 1);
0121     }
0122   };
0123 
0124   template <typename T>
0125   struct DividerHelper<64, T> {
0126     static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t computeMultiplier(const int log_div, const T divider) {
0127 #if EIGEN_HAS_BUILTIN_INT128 && !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY)
0128       return static_cast<uint64_t>((static_cast<__uint128_t>(1) << (64+log_div)) / static_cast<__uint128_t>(divider) - (static_cast<__uint128_t>(1) << 64) + 1);
0129 #else
0130       const uint64_t shift = 1ULL << log_div;
0131       TensorUInt128<uint64_t, uint64_t> result = TensorUInt128<uint64_t, static_val<0> >(shift, 0) / TensorUInt128<static_val<0>, uint64_t>(divider)
0132                                                - TensorUInt128<static_val<1>, static_val<0> >(1, 0)
0133                                                + TensorUInt128<static_val<0>, static_val<1> >(1);
0134       return static_cast<uint64_t>(result);
0135 #endif
0136     }
0137   };
0138 }
0139 
0140 
0141 template <typename T, bool div_gt_one = false>
0142 struct TensorIntDivisor {
0143  public:
0144   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() {
0145     multiplier = 0;
0146     shift1 = 0;
0147     shift2 = 0;
0148   }
0149 
0150   // Must have 0 < divider < 2^31. This is relaxed to
0151   // 0 < divider < 2^63 when using 64-bit indices on platforms that support
0152   // the __uint128_t type.
0153   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor(const T divider) {
0154     const int N = DividerTraits<T>::N;
0155     eigen_assert(static_cast<typename UnsignedTraits<T>::type>(divider) < NumTraits<UnsignedType>::highest()/2);
0156     eigen_assert(divider > 0);
0157 
0158     // fast ln2
0159     const int leading_zeros = count_leading_zeros(static_cast<UnsignedType>(divider));
0160     int log_div = N - leading_zeros;
0161     // if divider is a power of two then log_div is 1 more than it should be.
0162     if ((static_cast<typename UnsignedTraits<T>::type>(1) << (log_div-1)) == static_cast<typename UnsignedTraits<T>::type>(divider))
0163       log_div--;
0164 
0165     multiplier = DividerHelper<N, T>::computeMultiplier(log_div, divider);
0166     shift1 = log_div > 1 ? 1 : log_div;
0167     shift2 = log_div > 1 ? log_div-1 : 0;
0168   }
0169 
0170   // Must have 0 <= numerator. On platforms that don't support the __uint128_t
0171   // type numerator should also be less than 2^32-1.
0172   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(const T numerator) const {
0173     eigen_assert(static_cast<typename UnsignedTraits<T>::type>(numerator) < NumTraits<UnsignedType>::highest()/2);
0174     //eigen_assert(numerator >= 0); // this is implicitly asserted by the line above
0175 
0176     UnsignedType t1 = muluh(multiplier, numerator);
0177     UnsignedType t = (static_cast<UnsignedType>(numerator) - t1) >> shift1;
0178     return (t1 + t) >> shift2;
0179   }
0180 
0181  private:
0182   typedef typename DividerTraits<T>::type UnsignedType;
0183   UnsignedType multiplier;
0184   int32_t shift1;
0185   int32_t shift2;
0186 };
0187 
0188 
0189 // Optimized version for signed 32 bit integers.
0190 // Derived from Hacker's Delight.
0191 // Only works for divisors strictly greater than one
0192 template <>
0193 class TensorIntDivisor<int32_t, true> {
0194  public:
0195   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() {
0196     magic = 0;
0197     shift = 0;
0198   }
0199   // Must have 2 <= divider
0200   EIGEN_DEVICE_FUNC TensorIntDivisor(int32_t divider)  {
0201     eigen_assert(divider >= 2);
0202     calcMagic(divider);
0203   }
0204 
0205   EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int divide(const int32_t n) const {
0206 #ifdef EIGEN_GPU_COMPILE_PHASE
0207     return (__umulhi(magic, n) >> shift);
0208 #elif defined(SYCL_DEVICE_ONLY)
0209     return (cl::sycl::mul_hi(magic, static_cast<uint32_t>(n)) >> shift);
0210 #else
0211     uint64_t v = static_cast<uint64_t>(magic) * static_cast<uint64_t>(n);
0212     return (static_cast<uint32_t>(v >> 32) >> shift);
0213 #endif
0214   }
0215 
0216 private:
0217   // Compute the magic numbers. See Hacker's Delight section 10 for an in
0218   // depth explanation.
0219   EIGEN_DEVICE_FUNC void calcMagic(int32_t d) {
0220    const unsigned two31 = 0x80000000;     // 2**31.
0221    unsigned ad = d;
0222    unsigned t = two31 + (ad >> 31);
0223    unsigned anc = t - 1 - t%ad;     // Absolute value of nc.
0224    int p = 31;                      // Init. p.
0225    unsigned q1 = two31/anc;         // Init. q1 = 2**p/|nc|.
0226    unsigned r1 = two31 - q1*anc;    // Init. r1 = rem(2**p, |nc|).
0227    unsigned q2 = two31/ad;          // Init. q2 = 2**p/|d|.
0228    unsigned r2 = two31 - q2*ad;     // Init. r2 = rem(2**p, |d|).
0229    unsigned delta = 0;
0230    do {
0231       p = p + 1;
0232       q1 = 2*q1;           // Update q1 = 2**p/|nc|.
0233       r1 = 2*r1;           // Update r1 = rem(2**p, |nc|).
0234       if (r1 >= anc) {     // (Must be an unsigned
0235          q1 = q1 + 1;      // comparison here).
0236          r1 = r1 - anc;}
0237       q2 = 2*q2;           // Update q2 = 2**p/|d|.
0238       r2 = 2*r2;           // Update r2 = rem(2**p, |d|).
0239       if (r2 >= ad) {      // (Must be an unsigned
0240          q2 = q2 + 1;      // comparison here).
0241          r2 = r2 - ad;}
0242       delta = ad - r2;
0243    } while (q1 < delta || (q1 == delta && r1 == 0));
0244 
0245    magic = (unsigned)(q2 + 1);
0246    shift = p - 32;
0247   }
0248 
0249   uint32_t magic;
0250   int32_t shift;
0251 };
0252 
0253 
0254 template <typename T, bool div_gt_one>
0255 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator / (const T& numerator, const TensorIntDivisor<T, div_gt_one>& divisor) {
0256   return divisor.divide(numerator);
0257 }
0258 
0259 
0260 } // end namespace internal
0261 } // end namespace Eigen
0262 
0263 #endif // EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H