Warning, file /include/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h was not indexed
or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #ifndef EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H
0012 #define EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H
0013
0014 namespace Eigen {
0015 namespace internal {
0016
0017
0018
0019
0020
0021
0022
0023
0024 template<typename XprType>
0025 struct traits<TensorIndexTupleOp<XprType> > : public traits<XprType>
0026 {
0027 typedef traits<XprType> XprTraits;
0028 typedef typename XprTraits::StorageKind StorageKind;
0029 typedef typename XprTraits::Index Index;
0030 typedef Tuple<Index, typename XprTraits::Scalar> Scalar;
0031 typedef typename XprType::Nested Nested;
0032 typedef typename remove_reference<Nested>::type _Nested;
0033 static const int NumDimensions = XprTraits::NumDimensions;
0034 static const int Layout = XprTraits::Layout;
0035 };
0036
0037 template<typename XprType>
0038 struct eval<TensorIndexTupleOp<XprType>, Eigen::Dense>
0039 {
0040 typedef const TensorIndexTupleOp<XprType>EIGEN_DEVICE_REF type;
0041 };
0042
0043 template<typename XprType>
0044 struct nested<TensorIndexTupleOp<XprType>, 1,
0045 typename eval<TensorIndexTupleOp<XprType> >::type>
0046 {
0047 typedef TensorIndexTupleOp<XprType> type;
0048 };
0049
0050 }
0051
0052 template<typename XprType>
0053 class TensorIndexTupleOp : public TensorBase<TensorIndexTupleOp<XprType>, ReadOnlyAccessors>
0054 {
0055 public:
0056 typedef typename Eigen::internal::traits<TensorIndexTupleOp>::Scalar Scalar;
0057 typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
0058 typedef typename Eigen::internal::nested<TensorIndexTupleOp>::type Nested;
0059 typedef typename Eigen::internal::traits<TensorIndexTupleOp>::StorageKind StorageKind;
0060 typedef typename Eigen::internal::traits<TensorIndexTupleOp>::Index Index;
0061 typedef Tuple<Index, typename XprType::CoeffReturnType> CoeffReturnType;
0062
0063 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIndexTupleOp(const XprType& expr)
0064 : m_xpr(expr) {}
0065
0066 EIGEN_DEVICE_FUNC
0067 const typename internal::remove_all<typename XprType::Nested>::type&
0068 expression() const { return m_xpr; }
0069
0070 protected:
0071 typename XprType::Nested m_xpr;
0072 };
0073
0074
0075 template<typename ArgType, typename Device>
0076 struct TensorEvaluator<const TensorIndexTupleOp<ArgType>, Device>
0077 {
0078 typedef TensorIndexTupleOp<ArgType> XprType;
0079 typedef typename XprType::Index Index;
0080 typedef typename XprType::Scalar Scalar;
0081 typedef typename XprType::CoeffReturnType CoeffReturnType;
0082
0083 typedef typename TensorEvaluator<ArgType, Device>::Dimensions Dimensions;
0084 static const int NumDims = internal::array_size<Dimensions>::value;
0085 typedef StorageMemory<CoeffReturnType, Device> Storage;
0086 typedef typename Storage::Type EvaluatorPointerType;
0087
0088 enum {
0089 IsAligned = false,
0090 PacketAccess = false,
0091 BlockAccess = false,
0092 PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
0093 Layout = TensorEvaluator<ArgType, Device>::Layout,
0094 CoordAccess = false,
0095 RawAccess = false
0096 };
0097
0098
0099 typedef internal::TensorBlockNotImplemented TensorBlock;
0100
0101
0102 EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
0103 : m_impl(op.expression(), device) { }
0104
0105 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
0106 return m_impl.dimensions();
0107 }
0108
0109 EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType ) {
0110 m_impl.evalSubExprsIfNeeded(NULL);
0111 return true;
0112 }
0113 EIGEN_STRONG_INLINE void cleanup() {
0114 m_impl.cleanup();
0115 }
0116
0117 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
0118 {
0119 return CoeffReturnType(index, m_impl.coeff(index));
0120 }
0121
0122 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
0123 costPerCoeff(bool vectorized) const {
0124 return m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, 1);
0125 }
0126
0127 EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; }
0128
0129 #ifdef EIGEN_USE_SYCL
0130 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
0131 m_impl.bind(cgh);
0132 }
0133 #endif
0134
0135 protected:
0136 TensorEvaluator<ArgType, Device> m_impl;
0137 };
0138
0139 namespace internal {
0140
0141
0142
0143
0144
0145
0146
0147 template<typename ReduceOp, typename Dims, typename XprType>
0148 struct traits<TensorTupleReducerOp<ReduceOp, Dims, XprType> > : public traits<XprType>
0149 {
0150 typedef traits<XprType> XprTraits;
0151 typedef typename XprTraits::StorageKind StorageKind;
0152 typedef typename XprTraits::Index Index;
0153 typedef Index Scalar;
0154 typedef typename XprType::Nested Nested;
0155 typedef typename remove_reference<Nested>::type _Nested;
0156 static const int NumDimensions = XprTraits::NumDimensions - array_size<Dims>::value;
0157 static const int Layout = XprTraits::Layout;
0158 };
0159
0160 template<typename ReduceOp, typename Dims, typename XprType>
0161 struct eval<TensorTupleReducerOp<ReduceOp, Dims, XprType>, Eigen::Dense>
0162 {
0163 typedef const TensorTupleReducerOp<ReduceOp, Dims, XprType>EIGEN_DEVICE_REF type;
0164 };
0165
0166 template<typename ReduceOp, typename Dims, typename XprType>
0167 struct nested<TensorTupleReducerOp<ReduceOp, Dims, XprType>, 1,
0168 typename eval<TensorTupleReducerOp<ReduceOp, Dims, XprType> >::type>
0169 {
0170 typedef TensorTupleReducerOp<ReduceOp, Dims, XprType> type;
0171 };
0172
0173 }
0174
0175 template<typename ReduceOp, typename Dims, typename XprType>
0176 class TensorTupleReducerOp : public TensorBase<TensorTupleReducerOp<ReduceOp, Dims, XprType>, ReadOnlyAccessors>
0177 {
0178 public:
0179 typedef typename Eigen::internal::traits<TensorTupleReducerOp>::Scalar Scalar;
0180 typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
0181 typedef typename Eigen::internal::nested<TensorTupleReducerOp>::type Nested;
0182 typedef typename Eigen::internal::traits<TensorTupleReducerOp>::StorageKind StorageKind;
0183 typedef typename Eigen::internal::traits<TensorTupleReducerOp>::Index Index;
0184 typedef Index CoeffReturnType;
0185
0186 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTupleReducerOp(const XprType& expr,
0187 const ReduceOp& reduce_op,
0188 const Index return_dim,
0189 const Dims& reduce_dims)
0190 : m_xpr(expr), m_reduce_op(reduce_op), m_return_dim(return_dim), m_reduce_dims(reduce_dims) {}
0191
0192 EIGEN_DEVICE_FUNC
0193 const typename internal::remove_all<typename XprType::Nested>::type&
0194 expression() const { return m_xpr; }
0195
0196 EIGEN_DEVICE_FUNC
0197 const ReduceOp& reduce_op() const { return m_reduce_op; }
0198
0199 EIGEN_DEVICE_FUNC
0200 const Dims& reduce_dims() const { return m_reduce_dims; }
0201
0202 EIGEN_DEVICE_FUNC
0203 Index return_dim() const { return m_return_dim; }
0204
0205 protected:
0206 typename XprType::Nested m_xpr;
0207 const ReduceOp m_reduce_op;
0208 const Index m_return_dim;
0209 const Dims m_reduce_dims;
0210 };
0211
0212
0213 template<typename ReduceOp, typename Dims, typename ArgType, typename Device>
0214 struct TensorEvaluator<const TensorTupleReducerOp<ReduceOp, Dims, ArgType>, Device>
0215 {
0216 typedef TensorTupleReducerOp<ReduceOp, Dims, ArgType> XprType;
0217 typedef typename XprType::Index Index;
0218 typedef typename XprType::Scalar Scalar;
0219 typedef typename XprType::CoeffReturnType CoeffReturnType;
0220 typedef typename TensorIndexTupleOp<ArgType>::CoeffReturnType TupleType;
0221 typedef typename TensorEvaluator<const TensorReductionOp<ReduceOp, Dims, const TensorIndexTupleOp<ArgType> >, Device>::Dimensions Dimensions;
0222 typedef typename TensorEvaluator<const TensorIndexTupleOp<ArgType> , Device>::Dimensions InputDimensions;
0223 static const int NumDims = internal::array_size<InputDimensions>::value;
0224 typedef array<Index, NumDims> StrideDims;
0225 typedef StorageMemory<CoeffReturnType, Device> Storage;
0226 typedef typename Storage::Type EvaluatorPointerType;
0227 typedef StorageMemory<TupleType, Device> TupleStorageMem;
0228
0229 enum {
0230 IsAligned = false,
0231 PacketAccess = false,
0232 BlockAccess = false,
0233 PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
0234 Layout = TensorEvaluator<const TensorReductionOp<ReduceOp, Dims, const TensorIndexTupleOp<ArgType> >, Device>::Layout,
0235 CoordAccess = false,
0236 RawAccess = false
0237 };
0238
0239
0240 typedef internal::TensorBlockNotImplemented TensorBlock;
0241
0242
0243 EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
0244 : m_orig_impl(op.expression(), device),
0245 m_impl(op.expression().index_tuples().reduce(op.reduce_dims(), op.reduce_op()), device),
0246 m_return_dim(op.return_dim())
0247 {
0248 gen_strides(m_orig_impl.dimensions(), m_strides);
0249 if (Layout == static_cast<int>(ColMajor)) {
0250 const Index total_size = internal::array_prod(m_orig_impl.dimensions());
0251 m_stride_mod = (m_return_dim < NumDims - 1) ? m_strides[m_return_dim + 1] : total_size;
0252 } else {
0253 const Index total_size = internal::array_prod(m_orig_impl.dimensions());
0254 m_stride_mod = (m_return_dim > 0) ? m_strides[m_return_dim - 1] : total_size;
0255 }
0256
0257 m_stride_div = ((m_return_dim >= 0) &&
0258 (m_return_dim < static_cast<Index>(m_strides.size())))
0259 ? m_strides[m_return_dim] : 1;
0260 }
0261
0262 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const {
0263 return m_impl.dimensions();
0264 }
0265
0266 EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType ) {
0267 m_impl.evalSubExprsIfNeeded(NULL);
0268 return true;
0269 }
0270 EIGEN_STRONG_INLINE void cleanup() {
0271 m_impl.cleanup();
0272 }
0273
0274 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
0275 const TupleType v = m_impl.coeff(index);
0276 return (m_return_dim < 0) ? v.first : (v.first % m_stride_mod) / m_stride_div;
0277 }
0278
0279 EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; }
0280 #ifdef EIGEN_USE_SYCL
0281 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
0282 m_impl.bind(cgh);
0283 m_orig_impl.bind(cgh);
0284 }
0285 #endif
0286
0287 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
0288 costPerCoeff(bool vectorized) const {
0289 const double compute_cost = 1.0 +
0290 (m_return_dim < 0 ? 0.0 : (TensorOpCost::ModCost<Index>() + TensorOpCost::DivCost<Index>()));
0291 return m_orig_impl.costPerCoeff(vectorized) +
0292 m_impl.costPerCoeff(vectorized) + TensorOpCost(0, 0, compute_cost);
0293 }
0294
0295 private:
0296 EIGEN_DEVICE_FUNC void gen_strides(const InputDimensions& dims, StrideDims& strides) {
0297 if (m_return_dim < 0) {
0298 return;
0299 }
0300 eigen_assert(m_return_dim < NumDims &&
0301 "Asking to convert index to a dimension outside of the rank");
0302
0303
0304
0305 if (Layout == static_cast<int>(ColMajor)) {
0306 strides[0] = 1;
0307 for (int i = 1; i < NumDims; ++i) {
0308 strides[i] = strides[i-1] * dims[i-1];
0309 }
0310 } else {
0311 strides[NumDims-1] = 1;
0312 for (int i = NumDims - 2; i >= 0; --i) {
0313 strides[i] = strides[i+1] * dims[i+1];
0314 }
0315 }
0316 }
0317
0318 protected:
0319 TensorEvaluator<const TensorIndexTupleOp<ArgType>, Device> m_orig_impl;
0320 TensorEvaluator<const TensorReductionOp<ReduceOp, Dims, const TensorIndexTupleOp<ArgType> >, Device> m_impl;
0321 const Index m_return_dim;
0322 StrideDims m_strides;
0323 Index m_stride_mod;
0324 Index m_stride_div;
0325 };
0326
0327 }
0328
0329 #endif