Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 10:13:50

0001 #ifndef VECCORE_BACKEND_SCALAR_WRAPPER_H
0002 #define VECCORE_BACKEND_SCALAR_WRAPPER_H
0003 
0004 #include <type_traits>
0005 
0006 namespace vecCore {
0007 
0008 class WrappedBool;
0009 template <typename>
0010 class MaskedScalar;
0011 template <typename>
0012 class WrappedScalar;
0013 
0014 template <>
0015 struct TypeTraits<WrappedBool> {
0016   using ScalarType = bool;
0017   using IndexType  = WrappedScalar<size_t>;
0018   static constexpr size_t Size = 1;
0019 };
0020 
0021 template <typename T>
0022 struct TypeTraits<WrappedScalar<T>> {
0023   using ScalarType = T;
0024   using MaskType   = WrappedBool;
0025   using IndexType  = WrappedScalar<size_t>;
0026   static constexpr size_t Size = 1;
0027 };
0028 
0029 namespace backend {
0030 
0031 template <typename T = Real_s>
0032 class ScalarWrapperT {
0033 public:
0034   using Real_v   = WrappedScalar<T>;
0035   using Float_v  = WrappedScalar<float>;
0036   using Double_v = WrappedScalar<double>;
0037 
0038   using Int_v   = WrappedScalar<int>;
0039   using Int16_v = WrappedScalar<int16_t>;
0040   using Int32_v = WrappedScalar<int32_t>;
0041   using Int64_v = WrappedScalar<int64_t>;
0042 
0043   using UInt_v   = WrappedScalar<unsigned int>;
0044   using UInt16_v = WrappedScalar<uint16_t>;
0045   using UInt32_v = WrappedScalar<uint32_t>;
0046   using UInt64_v = WrappedScalar<uint64_t>;
0047 };
0048 
0049 using ScalarWrapper = ScalarWrapperT<>;
0050 
0051 } // namespace backend
0052 
0053 class WrappedBool {
0054 public:
0055   static constexpr size_t Size = 1;
0056 
0057   VECCORE_ATT_HOST_DEVICE
0058   WrappedBool() { /* uninitialized */}
0059 
0060   VECCORE_ATT_HOST_DEVICE
0061   WrappedBool(bool val) : fBool(val) {}
0062 
0063   VECCORE_ATT_HOST_DEVICE
0064   bool isFull() const { return fBool; }
0065 
0066   VECCORE_ATT_HOST_DEVICE
0067   bool isEmpty() const { return !fBool; }
0068 
0069   VECCORE_ATT_HOST_DEVICE
0070   static constexpr size_t size() { return 1; }
0071 
0072   VECCORE_ATT_HOST_DEVICE
0073   operator bool &() noexcept { return fBool; }
0074 
0075   VECCORE_ATT_HOST_DEVICE
0076   operator bool const &() const noexcept { return fBool; }
0077 
0078   VECCORE_ATT_HOST_DEVICE
0079   bool &operator[](int index)
0080   {
0081     assert(index == 0);
0082     (void)index;
0083     return fBool;
0084   }
0085 
0086   VECCORE_ATT_HOST_DEVICE
0087   bool operator[](int index) const
0088   {
0089     assert(index == 0);
0090     (void)index;
0091     return fBool;
0092   }
0093 
0094   VECCORE_ATT_HOST_DEVICE
0095   void store(bool *dest) const { *dest = fBool; }
0096 
0097 private:
0098   bool fBool{};
0099 };
0100 
0101 template <class T>
0102 class MaskedScalar {
0103 public:
0104   using Mask = WrappedBool;
0105 
0106   VECCORE_ATT_HOST_DEVICE
0107   MaskedScalar() = delete;
0108 
0109   VECCORE_ATT_HOST_DEVICE
0110   MaskedScalar(T &ref, Mask mask = true) : fRef(ref), fMask(mask) {}
0111 
0112 #define MASK_ASSIGN_OPERATOR(OP) \
0113   VECCORE_ATT_HOST_DEVICE        \
0114   T &operator OP(const T &ref)   \
0115   {                              \
0116     if (fMask) fRef OP ref;      \
0117     return fRef;                 \
0118   }
0119 
0120   MASK_ASSIGN_OPERATOR(=)
0121   MASK_ASSIGN_OPERATOR(+=)
0122   MASK_ASSIGN_OPERATOR(-=)
0123   MASK_ASSIGN_OPERATOR(*=)
0124   MASK_ASSIGN_OPERATOR(/=)
0125   MASK_ASSIGN_OPERATOR(%=)
0126   MASK_ASSIGN_OPERATOR(&=)
0127   MASK_ASSIGN_OPERATOR(^=)
0128   MASK_ASSIGN_OPERATOR(|=)
0129   MASK_ASSIGN_OPERATOR(<<=)
0130   MASK_ASSIGN_OPERATOR(>>=)
0131 
0132 #undef MASK_ASSIGN_OPERATOR
0133 
0134 private:
0135   T &fRef;
0136   Mask fMask;
0137 };
0138 
0139 template <class T>
0140 class WrappedScalar {
0141 public:
0142   using Type = T;
0143   using Mask = WrappedBool;
0144 
0145   static constexpr size_t Size = 1;
0146 
0147   VECCORE_ATT_HOST_DEVICE
0148   WrappedScalar() { /* uninitialized */}
0149 
0150   VECCORE_ATT_HOST_DEVICE
0151   WrappedScalar(const T &val) : fVal(val) {}
0152 
0153   VECCORE_ATT_HOST_DEVICE
0154   WrappedScalar(const T *const val_ptr) : fVal(*val_ptr) {}
0155 
0156   VECCORE_ATT_HOST_DEVICE
0157   WrappedScalar(const WrappedScalar *const s) : fVal(s->val_ptr) {}
0158 
0159   /* allow type conversion from other scalar types at initialization */
0160   template <typename Type, class = typename std::enable_if<std::is_integral<Type>::value>::type>
0161   VECCORE_ATT_HOST_DEVICE
0162   WrappedScalar(const Type &val) : fVal(static_cast<T>(val))
0163   {
0164   }
0165 
0166   VECCORE_ATT_HOST_DEVICE
0167   static constexpr size_t size() { return 1; }
0168 
0169   VECCORE_ATT_HOST_DEVICE
0170   operator T &() noexcept { return fVal; }
0171 
0172   VECCORE_ATT_HOST_DEVICE
0173   operator T const &() const noexcept { return fVal; }
0174 
0175   VECCORE_ATT_HOST_DEVICE
0176   MaskedScalar<T> operator()(Mask m) { return MaskedScalar<T>(fVal, m); }
0177 
0178   VECCORE_ATT_HOST_DEVICE
0179   T &operator[](int index)
0180   {
0181     assert(index == 0);
0182     (void)index;
0183     return fVal;
0184   }
0185 
0186   VECCORE_ATT_HOST_DEVICE
0187   T const operator[](int index) const
0188   {
0189     assert(index == 0);
0190     (void)index;
0191     return fVal;
0192   }
0193 
0194   VECCORE_ATT_HOST_DEVICE
0195   void load(T const *const src) { fVal = *src; }
0196 
0197   VECCORE_ATT_HOST_DEVICE
0198   void store(T &dest) const { dest = fVal; }
0199 
0200   VECCORE_ATT_HOST_DEVICE
0201   void store(T *dest) const { *dest = fVal; }
0202 
0203 #define SCALAR_WRAPPER_OPERATOR(OP)                                                                 \
0204   VECCORE_FORCE_INLINE                                                                              \
0205   VECCORE_ATT_HOST_DEVICE                                                                           \
0206   WrappedScalar operator OP(const WrappedScalar &x) const { return WrappedScalar(fVal OP x.fVal); } \
0207                                                                                                     \
0208   VECCORE_FORCE_INLINE                                                                              \
0209   VECCORE_ATT_HOST_DEVICE                                                                           \
0210   WrappedScalar operator OP(const T &x) const { return WrappedScalar(fVal OP x); }
0211 
0212   SCALAR_WRAPPER_OPERATOR(+)
0213   SCALAR_WRAPPER_OPERATOR(-)
0214   SCALAR_WRAPPER_OPERATOR(*)
0215   SCALAR_WRAPPER_OPERATOR(/)
0216   SCALAR_WRAPPER_OPERATOR(%)
0217 
0218 #undef SCALAR_WRAPPER_OPERATOR
0219 
0220 private:
0221   T fVal;
0222 };
0223 
0224 template <>
0225 VECCORE_FORCE_INLINE
0226 VECCORE_ATT_HOST_DEVICE
0227 bool MaskEmpty<WrappedBool>(const WrappedBool &mask)
0228 {
0229   return !mask;
0230 }
0231 
0232 template <>
0233 VECCORE_FORCE_INLINE
0234 VECCORE_ATT_HOST_DEVICE
0235 bool MaskFull<WrappedBool>(const WrappedBool &mask)
0236 {
0237   return mask;
0238 }
0239 
0240 template <typename T>
0241 struct MaskingImplementation<WrappedScalar<T>> {
0242   VECCORE_FORCE_INLINE
0243   VECCORE_ATT_HOST_DEVICE
0244   static void Assign(WrappedScalar<T> &dst, WrappedBool const &mask, WrappedScalar<T> const &src)
0245   {
0246     if (mask) dst = src;
0247   }
0248 
0249   VECCORE_FORCE_INLINE
0250   VECCORE_ATT_HOST_DEVICE
0251   static void Blend(WrappedScalar<T> &dst, WrappedBool const &mask, WrappedScalar<T> const &src1,
0252                     WrappedScalar<T> const &src2)
0253   {
0254     dst = mask ? src1 : src2;
0255   }
0256 };
0257 
0258 } // namespace vecCore
0259 
0260 #endif