Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 09:56:11

0001 // This file is part of Eigen, a lightweight C++ template library
0002 // for linear algebra.
0003 //
0004 // Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
0005 // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
0006 // Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
0007 //
0008 // This Source Code Form is subject to the terms of the Mozilla
0009 // Public License v. 2.0. If a copy of the MPL was not distributed
0010 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
0011 
0012 
0013 #ifndef EIGEN_COREEVALUATORS_H
0014 #define EIGEN_COREEVALUATORS_H
0015 
0016 namespace Eigen {
0017 
0018 namespace internal {
0019 
0020 // This class returns the evaluator kind from the expression storage kind.
0021 // Default assumes index based accessors
0022 template<typename StorageKind>
0023 struct storage_kind_to_evaluator_kind {
0024   typedef IndexBased Kind;
0025 };
0026 
0027 // This class returns the evaluator shape from the expression storage kind.
0028 // It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc.
0029 template<typename StorageKind> struct storage_kind_to_shape;
0030 
0031 template<> struct storage_kind_to_shape<Dense>                  { typedef DenseShape Shape;           };
0032 template<> struct storage_kind_to_shape<SolverStorage>          { typedef SolverShape Shape;           };
0033 template<> struct storage_kind_to_shape<PermutationStorage>     { typedef PermutationShape Shape;     };
0034 template<> struct storage_kind_to_shape<TranspositionsStorage>  { typedef TranspositionsShape Shape;  };
0035 
0036 // Evaluators have to be specialized with respect to various criteria such as:
0037 //  - storage/structure/shape
0038 //  - scalar type
0039 //  - etc.
0040 // Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators.
0041 // We currently distinguish the following kind of evaluators:
0042 // - unary_evaluator    for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate)
0043 // - binary_evaluator   for expression taking two arguments (CwiseBinaryOp)
0044 // - ternary_evaluator   for expression taking three arguments (CwiseTernaryOp)
0045 // - product_evaluator  for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching.
0046 // - mapbase_evaluator  for Map, Block, Ref
0047 // - block_evaluator    for Block (special dispatching to a mapbase_evaluator or unary_evaluator)
0048 
0049 template< typename T,
0050           typename Arg1Kind   = typename evaluator_traits<typename T::Arg1>::Kind,
0051           typename Arg2Kind   = typename evaluator_traits<typename T::Arg2>::Kind,
0052           typename Arg3Kind   = typename evaluator_traits<typename T::Arg3>::Kind,
0053           typename Arg1Scalar = typename traits<typename T::Arg1>::Scalar,
0054           typename Arg2Scalar = typename traits<typename T::Arg2>::Scalar,
0055           typename Arg3Scalar = typename traits<typename T::Arg3>::Scalar> struct ternary_evaluator;
0056 
0057 template< typename T,
0058           typename LhsKind   = typename evaluator_traits<typename T::Lhs>::Kind,
0059           typename RhsKind   = typename evaluator_traits<typename T::Rhs>::Kind,
0060           typename LhsScalar = typename traits<typename T::Lhs>::Scalar,
0061           typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct binary_evaluator;
0062 
0063 template< typename T,
0064           typename Kind   = typename evaluator_traits<typename T::NestedExpression>::Kind,
0065           typename Scalar = typename T::Scalar> struct unary_evaluator;
0066 
0067 // evaluator_traits<T> contains traits for evaluator<T>
0068 
0069 template<typename T>
0070 struct evaluator_traits_base
0071 {
0072   // by default, get evaluator kind and shape from storage
0073   typedef typename storage_kind_to_evaluator_kind<typename traits<T>::StorageKind>::Kind Kind;
0074   typedef typename storage_kind_to_shape<typename traits<T>::StorageKind>::Shape Shape;
0075 };
0076 
0077 // Default evaluator traits
0078 template<typename T>
0079 struct evaluator_traits : public evaluator_traits_base<T>
0080 {
0081 };
0082 
0083 template<typename T, typename Shape = typename evaluator_traits<T>::Shape >
0084 struct evaluator_assume_aliasing {
0085   static const bool value = false;
0086 };
0087 
0088 // By default, we assume a unary expression:
0089 template<typename T>
0090 struct evaluator : public unary_evaluator<T>
0091 {
0092   typedef unary_evaluator<T> Base;
0093   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0094   explicit evaluator(const T& xpr) : Base(xpr) {}
0095 };
0096 
0097 
0098 // TODO: Think about const-correctness
0099 template<typename T>
0100 struct evaluator<const T>
0101   : evaluator<T>
0102 {
0103   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0104   explicit evaluator(const T& xpr) : evaluator<T>(xpr) {}
0105 };
0106 
0107 // ---------- base class for all evaluators ----------
0108 
0109 template<typename ExpressionType>
0110 struct evaluator_base
0111 {
0112   // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices.
0113   typedef traits<ExpressionType> ExpressionTraits;
0114 
0115   enum {
0116     Alignment = 0
0117   };
0118   // noncopyable:
0119   // Don't make this class inherit noncopyable as this kills EBO (Empty Base Optimization)
0120   // and make complex evaluator much larger than then should do.
0121   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator_base() {}
0122   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ~evaluator_base() {}
0123 private:
0124   EIGEN_DEVICE_FUNC evaluator_base(const evaluator_base&);
0125   EIGEN_DEVICE_FUNC const evaluator_base& operator=(const evaluator_base&);
0126 };
0127 
0128 // -------------------- Matrix and Array --------------------
0129 //
0130 // evaluator<PlainObjectBase> is a common base class for the
0131 // Matrix and Array evaluators.
0132 // Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense,
0133 // so no need for more sophisticated dispatching.
0134 
0135 // this helper permits to completely eliminate m_outerStride if it is known at compiletime.
0136 template<typename Scalar,int OuterStride> class plainobjectbase_evaluator_data {
0137 public:
0138   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0139   plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr)
0140   {
0141 #ifndef EIGEN_INTERNAL_DEBUGGING
0142     EIGEN_UNUSED_VARIABLE(outerStride);
0143 #endif
0144     eigen_internal_assert(outerStride==OuterStride);
0145   }
0146   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
0147   Index outerStride() const EIGEN_NOEXCEPT { return OuterStride; }
0148   const Scalar *data;
0149 };
0150 
0151 template<typename Scalar> class plainobjectbase_evaluator_data<Scalar,Dynamic> {
0152 public:
0153   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0154   plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr), m_outerStride(outerStride) {}
0155   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0156   Index outerStride() const { return m_outerStride; }
0157   const Scalar *data;
0158 protected:
0159   Index m_outerStride;
0160 };
0161 
0162 template<typename Derived>
0163 struct evaluator<PlainObjectBase<Derived> >
0164   : evaluator_base<Derived>
0165 {
0166   typedef PlainObjectBase<Derived> PlainObjectType;
0167   typedef typename PlainObjectType::Scalar Scalar;
0168   typedef typename PlainObjectType::CoeffReturnType CoeffReturnType;
0169 
0170   enum {
0171     IsRowMajor = PlainObjectType::IsRowMajor,
0172     IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime,
0173     RowsAtCompileTime = PlainObjectType::RowsAtCompileTime,
0174     ColsAtCompileTime = PlainObjectType::ColsAtCompileTime,
0175 
0176     CoeffReadCost = NumTraits<Scalar>::ReadCost,
0177     Flags = traits<Derived>::EvaluatorFlags,
0178     Alignment = traits<Derived>::Alignment
0179   };
0180   enum {
0181     // We do not need to know the outer stride for vectors
0182     OuterStrideAtCompileTime = IsVectorAtCompileTime  ? 0
0183                                                       : int(IsRowMajor) ? ColsAtCompileTime
0184                                                                         : RowsAtCompileTime
0185   };
0186 
0187   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0188   evaluator()
0189     : m_d(0,OuterStrideAtCompileTime)
0190   {
0191     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
0192   }
0193 
0194   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0195   explicit evaluator(const PlainObjectType& m)
0196     : m_d(m.data(),IsVectorAtCompileTime ? 0 : m.outerStride())
0197   {
0198     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
0199   }
0200 
0201   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0202   CoeffReturnType coeff(Index row, Index col) const
0203   {
0204     if (IsRowMajor)
0205       return m_d.data[row * m_d.outerStride() + col];
0206     else
0207       return m_d.data[row + col * m_d.outerStride()];
0208   }
0209 
0210   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0211   CoeffReturnType coeff(Index index) const
0212   {
0213     return m_d.data[index];
0214   }
0215 
0216   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0217   Scalar& coeffRef(Index row, Index col)
0218   {
0219     if (IsRowMajor)
0220       return const_cast<Scalar*>(m_d.data)[row * m_d.outerStride() + col];
0221     else
0222       return const_cast<Scalar*>(m_d.data)[row + col * m_d.outerStride()];
0223   }
0224 
0225   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0226   Scalar& coeffRef(Index index)
0227   {
0228     return const_cast<Scalar*>(m_d.data)[index];
0229   }
0230 
0231   template<int LoadMode, typename PacketType>
0232   EIGEN_STRONG_INLINE
0233   PacketType packet(Index row, Index col) const
0234   {
0235     if (IsRowMajor)
0236       return ploadt<PacketType, LoadMode>(m_d.data + row * m_d.outerStride() + col);
0237     else
0238       return ploadt<PacketType, LoadMode>(m_d.data + row + col * m_d.outerStride());
0239   }
0240 
0241   template<int LoadMode, typename PacketType>
0242   EIGEN_STRONG_INLINE
0243   PacketType packet(Index index) const
0244   {
0245     return ploadt<PacketType, LoadMode>(m_d.data + index);
0246   }
0247 
0248   template<int StoreMode,typename PacketType>
0249   EIGEN_STRONG_INLINE
0250   void writePacket(Index row, Index col, const PacketType& x)
0251   {
0252     if (IsRowMajor)
0253       return pstoret<Scalar, PacketType, StoreMode>
0254                 (const_cast<Scalar*>(m_d.data) + row * m_d.outerStride() + col, x);
0255     else
0256       return pstoret<Scalar, PacketType, StoreMode>
0257                     (const_cast<Scalar*>(m_d.data) + row + col * m_d.outerStride(), x);
0258   }
0259 
0260   template<int StoreMode, typename PacketType>
0261   EIGEN_STRONG_INLINE
0262   void writePacket(Index index, const PacketType& x)
0263   {
0264     return pstoret<Scalar, PacketType, StoreMode>(const_cast<Scalar*>(m_d.data) + index, x);
0265   }
0266 
0267 protected:
0268 
0269   plainobjectbase_evaluator_data<Scalar,OuterStrideAtCompileTime> m_d;
0270 };
0271 
0272 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
0273 struct evaluator<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
0274   : evaluator<PlainObjectBase<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
0275 {
0276   typedef Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
0277 
0278   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0279   evaluator() {}
0280 
0281   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0282   explicit evaluator(const XprType& m)
0283     : evaluator<PlainObjectBase<XprType> >(m)
0284   { }
0285 };
0286 
0287 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
0288 struct evaluator<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
0289   : evaluator<PlainObjectBase<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
0290 {
0291   typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
0292 
0293   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0294   evaluator() {}
0295 
0296   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0297   explicit evaluator(const XprType& m)
0298     : evaluator<PlainObjectBase<XprType> >(m)
0299   { }
0300 };
0301 
0302 // -------------------- Transpose --------------------
0303 
0304 template<typename ArgType>
0305 struct unary_evaluator<Transpose<ArgType>, IndexBased>
0306   : evaluator_base<Transpose<ArgType> >
0307 {
0308   typedef Transpose<ArgType> XprType;
0309 
0310   enum {
0311     CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
0312     Flags = evaluator<ArgType>::Flags ^ RowMajorBit,
0313     Alignment = evaluator<ArgType>::Alignment
0314   };
0315 
0316   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0317   explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}
0318 
0319   typedef typename XprType::Scalar Scalar;
0320   typedef typename XprType::CoeffReturnType CoeffReturnType;
0321 
0322   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0323   CoeffReturnType coeff(Index row, Index col) const
0324   {
0325     return m_argImpl.coeff(col, row);
0326   }
0327 
0328   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0329   CoeffReturnType coeff(Index index) const
0330   {
0331     return m_argImpl.coeff(index);
0332   }
0333 
0334   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0335   Scalar& coeffRef(Index row, Index col)
0336   {
0337     return m_argImpl.coeffRef(col, row);
0338   }
0339 
0340   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0341   typename XprType::Scalar& coeffRef(Index index)
0342   {
0343     return m_argImpl.coeffRef(index);
0344   }
0345 
0346   template<int LoadMode, typename PacketType>
0347   EIGEN_STRONG_INLINE
0348   PacketType packet(Index row, Index col) const
0349   {
0350     return m_argImpl.template packet<LoadMode,PacketType>(col, row);
0351   }
0352 
0353   template<int LoadMode, typename PacketType>
0354   EIGEN_STRONG_INLINE
0355   PacketType packet(Index index) const
0356   {
0357     return m_argImpl.template packet<LoadMode,PacketType>(index);
0358   }
0359 
0360   template<int StoreMode, typename PacketType>
0361   EIGEN_STRONG_INLINE
0362   void writePacket(Index row, Index col, const PacketType& x)
0363   {
0364     m_argImpl.template writePacket<StoreMode,PacketType>(col, row, x);
0365   }
0366 
0367   template<int StoreMode, typename PacketType>
0368   EIGEN_STRONG_INLINE
0369   void writePacket(Index index, const PacketType& x)
0370   {
0371     m_argImpl.template writePacket<StoreMode,PacketType>(index, x);
0372   }
0373 
0374 protected:
0375   evaluator<ArgType> m_argImpl;
0376 };
0377 
0378 // -------------------- CwiseNullaryOp --------------------
0379 // Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator.
0380 // Likewise, there is not need to more sophisticated dispatching here.
0381 
0382 template<typename Scalar,typename NullaryOp,
0383          bool has_nullary = has_nullary_operator<NullaryOp>::value,
0384          bool has_unary   = has_unary_operator<NullaryOp>::value,
0385          bool has_binary  = has_binary_operator<NullaryOp>::value>
0386 struct nullary_wrapper
0387 {
0388   template <typename IndexType>
0389   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); }
0390   template <typename IndexType>
0391   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
0392 
0393   template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp<T>(i,j); }
0394   template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
0395 };
0396 
0397 template<typename Scalar,typename NullaryOp>
0398 struct nullary_wrapper<Scalar,NullaryOp,true,false,false>
0399 {
0400   template <typename IndexType>
0401   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); }
0402   template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp<T>(); }
0403 };
0404 
0405 template<typename Scalar,typename NullaryOp>
0406 struct nullary_wrapper<Scalar,NullaryOp,false,false,true>
0407 {
0408   template <typename IndexType>
0409   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); }
0410   template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp<T>(i,j); }
0411 };
0412 
0413 // We need the following specialization for vector-only functors assigned to a runtime vector,
0414 // for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd.
0415 // In this case, i==0 and j is used for the actual iteration.
0416 template<typename Scalar,typename NullaryOp>
0417 struct nullary_wrapper<Scalar,NullaryOp,false,true,false>
0418 {
0419   template <typename IndexType>
0420   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
0421     eigen_assert(i==0 || j==0);
0422     return op(i+j);
0423   }
0424   template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
0425     eigen_assert(i==0 || j==0);
0426     return op.template packetOp<T>(i+j);
0427   }
0428 
0429   template <typename IndexType>
0430   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
0431   template <typename T, typename IndexType>
0432   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
0433 };
0434 
0435 template<typename Scalar,typename NullaryOp>
0436 struct nullary_wrapper<Scalar,NullaryOp,false,false,false> {};
0437 
0438 #if 0 && EIGEN_COMP_MSVC>0
0439 // Disable this ugly workaround. This is now handled in traits<Ref>::match,
0440 // but this piece of code might still become handly if some other weird compilation
0441 // erros pop up again.
0442 
0443 // MSVC exhibits a weird compilation error when
0444 // compiling:
0445 //    Eigen::MatrixXf A = MatrixXf::Random(3,3);
0446 //    Ref<const MatrixXf> R = 2.f*A;
0447 // and that has_*ary_operator<scalar_constant_op<float>> have not been instantiated yet.
0448 // The "problem" is that evaluator<2.f*A> is instantiated by traits<Ref>::match<2.f*A>
0449 // and at that time has_*ary_operator<T> returns true regardless of T.
0450 // Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>.
0451 // The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(),
0452 // and packet() are really instantiated as implemented below:
0453 
0454 // This is a simple wrapper around Index to enforce the re-instantiation of
0455 // has_*ary_operator when needed.
0456 template<typename T> struct nullary_wrapper_workaround_msvc {
0457   nullary_wrapper_workaround_msvc(const T&);
0458   operator T()const;
0459 };
0460 
0461 template<typename Scalar,typename NullaryOp>
0462 struct nullary_wrapper<Scalar,NullaryOp,true,true,true>
0463 {
0464   template <typename IndexType>
0465   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
0466     return nullary_wrapper<Scalar,NullaryOp,
0467     has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
0468     has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
0469     has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i,j);
0470   }
0471   template <typename IndexType>
0472   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const {
0473     return nullary_wrapper<Scalar,NullaryOp,
0474     has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
0475     has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
0476     has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i);
0477   }
0478 
0479   template <typename T, typename IndexType>
0480   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
0481     return nullary_wrapper<Scalar,NullaryOp,
0482     has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
0483     has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
0484     has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i,j);
0485   }
0486   template <typename T, typename IndexType>
0487   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const {
0488     return nullary_wrapper<Scalar,NullaryOp,
0489     has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
0490     has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
0491     has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i);
0492   }
0493 };
0494 #endif // MSVC workaround
0495 
0496 template<typename NullaryOp, typename PlainObjectType>
0497 struct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> >
0498   : evaluator_base<CwiseNullaryOp<NullaryOp,PlainObjectType> >
0499 {
0500   typedef CwiseNullaryOp<NullaryOp,PlainObjectType> XprType;
0501   typedef typename internal::remove_all<PlainObjectType>::type PlainObjectTypeCleaned;
0502 
0503   enum {
0504     CoeffReadCost = internal::functor_traits<NullaryOp>::Cost,
0505 
0506     Flags = (evaluator<PlainObjectTypeCleaned>::Flags
0507           &  (  HereditaryBits
0508               | (functor_has_linear_access<NullaryOp>::ret  ? LinearAccessBit : 0)
0509               | (functor_traits<NullaryOp>::PacketAccess    ? PacketAccessBit : 0)))
0510           | (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit),
0511     Alignment = AlignedMax
0512   };
0513 
0514   EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n)
0515     : m_functor(n.functor()), m_wrapper()
0516   {
0517     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
0518   }
0519 
0520   typedef typename XprType::CoeffReturnType CoeffReturnType;
0521 
0522   template <typename IndexType>
0523   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0524   CoeffReturnType coeff(IndexType row, IndexType col) const
0525   {
0526     return m_wrapper(m_functor, row, col);
0527   }
0528 
0529   template <typename IndexType>
0530   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0531   CoeffReturnType coeff(IndexType index) const
0532   {
0533     return m_wrapper(m_functor,index);
0534   }
0535 
0536   template<int LoadMode, typename PacketType, typename IndexType>
0537   EIGEN_STRONG_INLINE
0538   PacketType packet(IndexType row, IndexType col) const
0539   {
0540     return m_wrapper.template packetOp<PacketType>(m_functor, row, col);
0541   }
0542 
0543   template<int LoadMode, typename PacketType, typename IndexType>
0544   EIGEN_STRONG_INLINE
0545   PacketType packet(IndexType index) const
0546   {
0547     return m_wrapper.template packetOp<PacketType>(m_functor, index);
0548   }
0549 
0550 protected:
0551   const NullaryOp m_functor;
0552   const internal::nullary_wrapper<CoeffReturnType,NullaryOp> m_wrapper;
0553 };
0554 
0555 // -------------------- CwiseUnaryOp --------------------
0556 
0557 template<typename UnaryOp, typename ArgType>
0558 struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased >
0559   : evaluator_base<CwiseUnaryOp<UnaryOp, ArgType> >
0560 {
0561   typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
0562 
0563   enum {
0564     CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<UnaryOp>::Cost),
0565 
0566     Flags = evaluator<ArgType>::Flags
0567           & (HereditaryBits | LinearAccessBit | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
0568     Alignment = evaluator<ArgType>::Alignment
0569   };
0570 
0571   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0572   explicit unary_evaluator(const XprType& op) : m_d(op)
0573   {
0574     EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
0575     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
0576   }
0577 
0578   typedef typename XprType::CoeffReturnType CoeffReturnType;
0579 
0580   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0581   CoeffReturnType coeff(Index row, Index col) const
0582   {
0583     return m_d.func()(m_d.argImpl.coeff(row, col));
0584   }
0585 
0586   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0587   CoeffReturnType coeff(Index index) const
0588   {
0589     return m_d.func()(m_d.argImpl.coeff(index));
0590   }
0591 
0592   template<int LoadMode, typename PacketType>
0593   EIGEN_STRONG_INLINE
0594   PacketType packet(Index row, Index col) const
0595   {
0596     return m_d.func().packetOp(m_d.argImpl.template packet<LoadMode, PacketType>(row, col));
0597   }
0598 
0599   template<int LoadMode, typename PacketType>
0600   EIGEN_STRONG_INLINE
0601   PacketType packet(Index index) const
0602   {
0603     return m_d.func().packetOp(m_d.argImpl.template packet<LoadMode, PacketType>(index));
0604   }
0605 
0606 protected:
0607 
0608   // this helper permits to completely eliminate the functor if it is empty
0609   struct Data
0610   {
0611     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0612     Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {}
0613     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0614     const UnaryOp& func() const { return op; }
0615     UnaryOp op;
0616     evaluator<ArgType> argImpl;
0617   };
0618 
0619   Data m_d;
0620 };
0621 
0622 // -------------------- CwiseTernaryOp --------------------
0623 
0624 // this is a ternary expression
0625 template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
0626 struct evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
0627   : public ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
0628 {
0629   typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;
0630   typedef ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > Base;
0631 
0632   EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
0633 };
0634 
0635 template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
0636 struct ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>, IndexBased, IndexBased>
0637   : evaluator_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
0638 {
0639   typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;
0640 
0641   enum {
0642     CoeffReadCost = int(evaluator<Arg1>::CoeffReadCost) + int(evaluator<Arg2>::CoeffReadCost) + int(evaluator<Arg3>::CoeffReadCost) + int(functor_traits<TernaryOp>::Cost),
0643 
0644     Arg1Flags = evaluator<Arg1>::Flags,
0645     Arg2Flags = evaluator<Arg2>::Flags,
0646     Arg3Flags = evaluator<Arg3>::Flags,
0647     SameType = is_same<typename Arg1::Scalar,typename Arg2::Scalar>::value && is_same<typename Arg1::Scalar,typename Arg3::Scalar>::value,
0648     StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit),
0649     Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & (
0650         HereditaryBits
0651         | (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) &
0652            ( (StorageOrdersAgree ? LinearAccessBit : 0)
0653            | (functor_traits<TernaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
0654            )
0655         )
0656      ),
0657     Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit),
0658     Alignment = EIGEN_PLAIN_ENUM_MIN(
0659         EIGEN_PLAIN_ENUM_MIN(evaluator<Arg1>::Alignment, evaluator<Arg2>::Alignment),
0660         evaluator<Arg3>::Alignment)
0661   };
0662 
0663   EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr) : m_d(xpr)
0664   {
0665     EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<TernaryOp>::Cost);
0666     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
0667   }
0668 
0669   typedef typename XprType::CoeffReturnType CoeffReturnType;
0670 
0671   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0672   CoeffReturnType coeff(Index row, Index col) const
0673   {
0674     return m_d.func()(m_d.arg1Impl.coeff(row, col), m_d.arg2Impl.coeff(row, col), m_d.arg3Impl.coeff(row, col));
0675   }
0676 
0677   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0678   CoeffReturnType coeff(Index index) const
0679   {
0680     return m_d.func()(m_d.arg1Impl.coeff(index), m_d.arg2Impl.coeff(index), m_d.arg3Impl.coeff(index));
0681   }
0682 
0683   template<int LoadMode, typename PacketType>
0684   EIGEN_STRONG_INLINE
0685   PacketType packet(Index row, Index col) const
0686   {
0687     return m_d.func().packetOp(m_d.arg1Impl.template packet<LoadMode,PacketType>(row, col),
0688                                m_d.arg2Impl.template packet<LoadMode,PacketType>(row, col),
0689                                m_d.arg3Impl.template packet<LoadMode,PacketType>(row, col));
0690   }
0691 
0692   template<int LoadMode, typename PacketType>
0693   EIGEN_STRONG_INLINE
0694   PacketType packet(Index index) const
0695   {
0696     return m_d.func().packetOp(m_d.arg1Impl.template packet<LoadMode,PacketType>(index),
0697                                m_d.arg2Impl.template packet<LoadMode,PacketType>(index),
0698                                m_d.arg3Impl.template packet<LoadMode,PacketType>(index));
0699   }
0700 
0701 protected:
0702   // this helper permits to completely eliminate the functor if it is empty
0703   struct Data
0704   {
0705     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0706     Data(const XprType& xpr) : op(xpr.functor()), arg1Impl(xpr.arg1()), arg2Impl(xpr.arg2()), arg3Impl(xpr.arg3()) {}
0707     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0708     const TernaryOp& func() const { return op; }
0709     TernaryOp op;
0710     evaluator<Arg1> arg1Impl;
0711     evaluator<Arg2> arg2Impl;
0712     evaluator<Arg3> arg3Impl;
0713   };
0714 
0715   Data m_d;
0716 };
0717 
0718 // -------------------- CwiseBinaryOp --------------------
0719 
0720 // this is a binary expression
0721 template<typename BinaryOp, typename Lhs, typename Rhs>
0722 struct evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
0723   : public binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
0724 {
0725   typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
0726   typedef binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > Base;
0727 
0728   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0729   explicit evaluator(const XprType& xpr) : Base(xpr) {}
0730 };
0731 
0732 template<typename BinaryOp, typename Lhs, typename Rhs>
0733 struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBased>
0734   : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
0735 {
0736   typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
0737 
0738   enum {
0739     CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
0740 
0741     LhsFlags = evaluator<Lhs>::Flags,
0742     RhsFlags = evaluator<Rhs>::Flags,
0743     SameType = is_same<typename Lhs::Scalar,typename Rhs::Scalar>::value,
0744     StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit),
0745     Flags0 = (int(LhsFlags) | int(RhsFlags)) & (
0746         HereditaryBits
0747       | (int(LhsFlags) & int(RhsFlags) &
0748            ( (StorageOrdersAgree ? LinearAccessBit : 0)
0749            | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
0750            )
0751         )
0752      ),
0753     Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
0754     Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<Lhs>::Alignment,evaluator<Rhs>::Alignment)
0755   };
0756 
0757   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0758   explicit binary_evaluator(const XprType& xpr) : m_d(xpr)
0759   {
0760     EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
0761     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
0762   }
0763 
0764   typedef typename XprType::CoeffReturnType CoeffReturnType;
0765 
0766   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0767   CoeffReturnType coeff(Index row, Index col) const
0768   {
0769     return m_d.func()(m_d.lhsImpl.coeff(row, col), m_d.rhsImpl.coeff(row, col));
0770   }
0771 
0772   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0773   CoeffReturnType coeff(Index index) const
0774   {
0775     return m_d.func()(m_d.lhsImpl.coeff(index), m_d.rhsImpl.coeff(index));
0776   }
0777 
0778   template<int LoadMode, typename PacketType>
0779   EIGEN_STRONG_INLINE
0780   PacketType packet(Index row, Index col) const
0781   {
0782     return m_d.func().packetOp(m_d.lhsImpl.template packet<LoadMode,PacketType>(row, col),
0783                                m_d.rhsImpl.template packet<LoadMode,PacketType>(row, col));
0784   }
0785 
0786   template<int LoadMode, typename PacketType>
0787   EIGEN_STRONG_INLINE
0788   PacketType packet(Index index) const
0789   {
0790     return m_d.func().packetOp(m_d.lhsImpl.template packet<LoadMode,PacketType>(index),
0791                                m_d.rhsImpl.template packet<LoadMode,PacketType>(index));
0792   }
0793 
0794 protected:
0795 
0796   // this helper permits to completely eliminate the functor if it is empty
0797   struct Data
0798   {
0799     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0800     Data(const XprType& xpr) : op(xpr.functor()), lhsImpl(xpr.lhs()), rhsImpl(xpr.rhs()) {}
0801     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0802     const BinaryOp& func() const { return op; }
0803     BinaryOp op;
0804     evaluator<Lhs> lhsImpl;
0805     evaluator<Rhs> rhsImpl;
0806   };
0807 
0808   Data m_d;
0809 };
0810 
0811 // -------------------- CwiseUnaryView --------------------
0812 
0813 template<typename UnaryOp, typename ArgType>
0814 struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased>
0815   : evaluator_base<CwiseUnaryView<UnaryOp, ArgType> >
0816 {
0817   typedef CwiseUnaryView<UnaryOp, ArgType> XprType;
0818 
0819   enum {
0820     CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<UnaryOp>::Cost),
0821 
0822     Flags = (evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)),
0823 
0824     Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost...
0825   };
0826 
0827   EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) : m_d(op)
0828   {
0829     EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
0830     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
0831   }
0832 
0833   typedef typename XprType::Scalar Scalar;
0834   typedef typename XprType::CoeffReturnType CoeffReturnType;
0835 
0836   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0837   CoeffReturnType coeff(Index row, Index col) const
0838   {
0839     return m_d.func()(m_d.argImpl.coeff(row, col));
0840   }
0841 
0842   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0843   CoeffReturnType coeff(Index index) const
0844   {
0845     return m_d.func()(m_d.argImpl.coeff(index));
0846   }
0847 
0848   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0849   Scalar& coeffRef(Index row, Index col)
0850   {
0851     return m_d.func()(m_d.argImpl.coeffRef(row, col));
0852   }
0853 
0854   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0855   Scalar& coeffRef(Index index)
0856   {
0857     return m_d.func()(m_d.argImpl.coeffRef(index));
0858   }
0859 
0860 protected:
0861 
0862   // this helper permits to completely eliminate the functor if it is empty
0863   struct Data
0864   {
0865     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0866     Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {}
0867     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0868     const UnaryOp& func() const { return op; }
0869     UnaryOp op;
0870     evaluator<ArgType> argImpl;
0871   };
0872 
0873   Data m_d;
0874 };
0875 
0876 // -------------------- Map --------------------
0877 
0878 // FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ?
0879 // but that might complicate template specialization
0880 template<typename Derived, typename PlainObjectType>
0881 struct mapbase_evaluator;
0882 
0883 template<typename Derived, typename PlainObjectType>
0884 struct mapbase_evaluator : evaluator_base<Derived>
0885 {
0886   typedef Derived  XprType;
0887   typedef typename XprType::PointerType PointerType;
0888   typedef typename XprType::Scalar Scalar;
0889   typedef typename XprType::CoeffReturnType CoeffReturnType;
0890 
0891   enum {
0892     IsRowMajor = XprType::RowsAtCompileTime,
0893     ColsAtCompileTime = XprType::ColsAtCompileTime,
0894     CoeffReadCost = NumTraits<Scalar>::ReadCost
0895   };
0896 
0897   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0898   explicit mapbase_evaluator(const XprType& map)
0899     : m_data(const_cast<PointerType>(map.data())),
0900       m_innerStride(map.innerStride()),
0901       m_outerStride(map.outerStride())
0902   {
0903     EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(evaluator<Derived>::Flags&PacketAccessBit, internal::inner_stride_at_compile_time<Derived>::ret==1),
0904                         PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
0905     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
0906   }
0907 
0908   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0909   CoeffReturnType coeff(Index row, Index col) const
0910   {
0911     return m_data[col * colStride() + row * rowStride()];
0912   }
0913 
0914   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0915   CoeffReturnType coeff(Index index) const
0916   {
0917     return m_data[index * m_innerStride.value()];
0918   }
0919 
0920   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0921   Scalar& coeffRef(Index row, Index col)
0922   {
0923     return m_data[col * colStride() + row * rowStride()];
0924   }
0925 
0926   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0927   Scalar& coeffRef(Index index)
0928   {
0929     return m_data[index * m_innerStride.value()];
0930   }
0931 
0932   template<int LoadMode, typename PacketType>
0933   EIGEN_STRONG_INLINE
0934   PacketType packet(Index row, Index col) const
0935   {
0936     PointerType ptr = m_data + row * rowStride() + col * colStride();
0937     return internal::ploadt<PacketType, LoadMode>(ptr);
0938   }
0939 
0940   template<int LoadMode, typename PacketType>
0941   EIGEN_STRONG_INLINE
0942   PacketType packet(Index index) const
0943   {
0944     return internal::ploadt<PacketType, LoadMode>(m_data + index * m_innerStride.value());
0945   }
0946 
0947   template<int StoreMode, typename PacketType>
0948   EIGEN_STRONG_INLINE
0949   void writePacket(Index row, Index col, const PacketType& x)
0950   {
0951     PointerType ptr = m_data + row * rowStride() + col * colStride();
0952     return internal::pstoret<Scalar, PacketType, StoreMode>(ptr, x);
0953   }
0954 
0955   template<int StoreMode, typename PacketType>
0956   EIGEN_STRONG_INLINE
0957   void writePacket(Index index, const PacketType& x)
0958   {
0959     internal::pstoret<Scalar, PacketType, StoreMode>(m_data + index * m_innerStride.value(), x);
0960   }
0961 protected:
0962   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
0963   Index rowStride() const EIGEN_NOEXCEPT {
0964     return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value();
0965   }
0966   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
0967   Index colStride() const EIGEN_NOEXCEPT {
0968      return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value();
0969   }
0970 
0971   PointerType m_data;
0972   const internal::variable_if_dynamic<Index, XprType::InnerStrideAtCompileTime> m_innerStride;
0973   const internal::variable_if_dynamic<Index, XprType::OuterStrideAtCompileTime> m_outerStride;
0974 };
0975 
0976 template<typename PlainObjectType, int MapOptions, typename StrideType>
0977 struct evaluator<Map<PlainObjectType, MapOptions, StrideType> >
0978   : public mapbase_evaluator<Map<PlainObjectType, MapOptions, StrideType>, PlainObjectType>
0979 {
0980   typedef Map<PlainObjectType, MapOptions, StrideType> XprType;
0981   typedef typename XprType::Scalar Scalar;
0982   // TODO: should check for smaller packet types once we can handle multi-sized packet types
0983   typedef typename packet_traits<Scalar>::type PacketScalar;
0984 
0985   enum {
0986     InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
0987                              ? int(PlainObjectType::InnerStrideAtCompileTime)
0988                              : int(StrideType::InnerStrideAtCompileTime),
0989     OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
0990                              ? int(PlainObjectType::OuterStrideAtCompileTime)
0991                              : int(StrideType::OuterStrideAtCompileTime),
0992     HasNoInnerStride = InnerStrideAtCompileTime == 1,
0993     HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0,
0994     HasNoStride = HasNoInnerStride && HasNoOuterStride,
0995     IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic,
0996 
0997     PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit),
0998     LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit),
0999     Flags = int( evaluator<PlainObjectType>::Flags) & (LinearAccessMask&PacketAccessMask),
1000 
1001     Alignment = int(MapOptions)&int(AlignedMask)
1002   };
1003 
1004   EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map)
1005     : mapbase_evaluator<XprType, PlainObjectType>(map)
1006   { }
1007 };
1008 
1009 // -------------------- Ref --------------------
1010 
1011 template<typename PlainObjectType, int RefOptions, typename StrideType>
1012 struct evaluator<Ref<PlainObjectType, RefOptions, StrideType> >
1013   : public mapbase_evaluator<Ref<PlainObjectType, RefOptions, StrideType>, PlainObjectType>
1014 {
1015   typedef Ref<PlainObjectType, RefOptions, StrideType> XprType;
1016 
1017   enum {
1018     Flags = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Flags,
1019     Alignment = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Alignment
1020   };
1021 
1022   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1023   explicit evaluator(const XprType& ref)
1024     : mapbase_evaluator<XprType, PlainObjectType>(ref)
1025   { }
1026 };
1027 
1028 // -------------------- Block --------------------
1029 
1030 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel,
1031          bool HasDirectAccess = internal::has_direct_access<ArgType>::ret> struct block_evaluator;
1032 
1033 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1034 struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1035   : block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel>
1036 {
1037   typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1038   typedef typename XprType::Scalar Scalar;
1039   // TODO: should check for smaller packet types once we can handle multi-sized packet types
1040   typedef typename packet_traits<Scalar>::type PacketScalar;
1041 
1042   enum {
1043     CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1044 
1045     RowsAtCompileTime = traits<XprType>::RowsAtCompileTime,
1046     ColsAtCompileTime = traits<XprType>::ColsAtCompileTime,
1047     MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime,
1048     MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime,
1049 
1050     ArgTypeIsRowMajor = (int(evaluator<ArgType>::Flags)&RowMajorBit) != 0,
1051     IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1
1052                : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0
1053                : ArgTypeIsRowMajor,
1054     HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor),
1055     InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
1056     InnerStrideAtCompileTime = HasSameStorageOrderAsArgType
1057                              ? int(inner_stride_at_compile_time<ArgType>::ret)
1058                              : int(outer_stride_at_compile_time<ArgType>::ret),
1059     OuterStrideAtCompileTime = HasSameStorageOrderAsArgType
1060                              ? int(outer_stride_at_compile_time<ArgType>::ret)
1061                              : int(inner_stride_at_compile_time<ArgType>::ret),
1062     MaskPacketAccessBit = (InnerStrideAtCompileTime == 1 || HasSameStorageOrderAsArgType) ? PacketAccessBit : 0,
1063 
1064     FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
1065     FlagsRowMajorBit = XprType::Flags&RowMajorBit,
1066     Flags0 = evaluator<ArgType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
1067                                            DirectAccessBit |
1068                                            MaskPacketAccessBit),
1069     Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit,
1070 
1071     PacketAlignment = unpacket_traits<PacketScalar>::alignment,
1072     Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic)
1073                              && (OuterStrideAtCompileTime!=0)
1074                              && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0,
1075     Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0)
1076   };
1077   typedef block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> block_evaluator_type;
1078   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1079   explicit evaluator(const XprType& block) : block_evaluator_type(block)
1080   {
1081     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1082   }
1083 };
1084 
1085 // no direct-access => dispatch to a unary evaluator
1086 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1087 struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /*HasDirectAccess*/ false>
1088   : unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1089 {
1090   typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1091 
1092   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1093   explicit block_evaluator(const XprType& block)
1094     : unary_evaluator<XprType>(block)
1095   {}
1096 };
1097 
1098 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1099 struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBased>
1100   : evaluator_base<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1101 {
1102   typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1103 
1104   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1105   explicit unary_evaluator(const XprType& block)
1106     : m_argImpl(block.nestedExpression()),
1107       m_startRow(block.startRow()),
1108       m_startCol(block.startCol()),
1109       m_linear_offset(ForwardLinearAccess?(ArgType::IsRowMajor ? block.startRow()*block.nestedExpression().cols() + block.startCol() : block.startCol()*block.nestedExpression().rows() + block.startRow()):0)
1110   { }
1111 
1112   typedef typename XprType::Scalar Scalar;
1113   typedef typename XprType::CoeffReturnType CoeffReturnType;
1114 
1115   enum {
1116     RowsAtCompileTime = XprType::RowsAtCompileTime,
1117     ForwardLinearAccess = (InnerPanel || int(XprType::IsRowMajor)==int(ArgType::IsRowMajor)) && bool(evaluator<ArgType>::Flags&LinearAccessBit)
1118   };
1119 
1120   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1121   CoeffReturnType coeff(Index row, Index col) const
1122   {
1123     return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col);
1124   }
1125 
1126   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1127   CoeffReturnType coeff(Index index) const
1128   {
1129     return linear_coeff_impl(index, bool_constant<ForwardLinearAccess>());
1130   }
1131 
1132   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1133   Scalar& coeffRef(Index row, Index col)
1134   {
1135     return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col);
1136   }
1137 
1138   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1139   Scalar& coeffRef(Index index)
1140   {
1141     return linear_coeffRef_impl(index, bool_constant<ForwardLinearAccess>());
1142   }
1143 
1144   template<int LoadMode, typename PacketType>
1145   EIGEN_STRONG_INLINE
1146   PacketType packet(Index row, Index col) const
1147   {
1148     return m_argImpl.template packet<LoadMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col);
1149   }
1150 
1151   template<int LoadMode, typename PacketType>
1152   EIGEN_STRONG_INLINE
1153   PacketType packet(Index index) const
1154   {
1155     if (ForwardLinearAccess)
1156       return m_argImpl.template packet<LoadMode,PacketType>(m_linear_offset.value() + index);
1157     else
1158       return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
1159                                          RowsAtCompileTime == 1 ? index : 0);
1160   }
1161 
1162   template<int StoreMode, typename PacketType>
1163   EIGEN_STRONG_INLINE
1164   void writePacket(Index row, Index col, const PacketType& x)
1165   {
1166     return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x);
1167   }
1168 
1169   template<int StoreMode, typename PacketType>
1170   EIGEN_STRONG_INLINE
1171   void writePacket(Index index, const PacketType& x)
1172   {
1173     if (ForwardLinearAccess)
1174       return m_argImpl.template writePacket<StoreMode,PacketType>(m_linear_offset.value() + index, x);
1175     else
1176       return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
1177                                               RowsAtCompileTime == 1 ? index : 0,
1178                                               x);
1179   }
1180 
1181 protected:
1182   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1183   CoeffReturnType linear_coeff_impl(Index index, internal::true_type /* ForwardLinearAccess */) const
1184   {
1185     return m_argImpl.coeff(m_linear_offset.value() + index);
1186   }
1187   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1188   CoeffReturnType linear_coeff_impl(Index index, internal::false_type /* not ForwardLinearAccess */) const
1189   {
1190     return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
1191   }
1192 
1193   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1194   Scalar& linear_coeffRef_impl(Index index, internal::true_type /* ForwardLinearAccess */)
1195   {
1196     return m_argImpl.coeffRef(m_linear_offset.value() + index);
1197   }
1198   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1199   Scalar& linear_coeffRef_impl(Index index, internal::false_type /* not ForwardLinearAccess */)
1200   {
1201     return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
1202   }
1203 
1204   evaluator<ArgType> m_argImpl;
1205   const variable_if_dynamic<Index, (ArgType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow;
1206   const variable_if_dynamic<Index, (ArgType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol;
1207   const variable_if_dynamic<Index, ForwardLinearAccess ? Dynamic : 0> m_linear_offset;
1208 };
1209 
1210 // TODO: This evaluator does not actually use the child evaluator;
1211 // all action is via the data() as returned by the Block expression.
1212 
1213 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1214 struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ true>
1215   : mapbase_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>,
1216                       typename Block<ArgType, BlockRows, BlockCols, InnerPanel>::PlainObject>
1217 {
1218   typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1219   typedef typename XprType::Scalar Scalar;
1220 
1221   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1222   explicit block_evaluator(const XprType& block)
1223     : mapbase_evaluator<XprType, typename XprType::PlainObject>(block)
1224   {
1225     // TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime
1226     eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned");
1227   }
1228 };
1229 
1230 
1231 // -------------------- Select --------------------
1232 // NOTE shall we introduce a ternary_evaluator?
1233 
1234 // TODO enable vectorization for Select
1235 template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
1236 struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
1237   : evaluator_base<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
1238 {
1239   typedef Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> XprType;
1240   enum {
1241     CoeffReadCost = evaluator<ConditionMatrixType>::CoeffReadCost
1242                   + EIGEN_PLAIN_ENUM_MAX(evaluator<ThenMatrixType>::CoeffReadCost,
1243                                          evaluator<ElseMatrixType>::CoeffReadCost),
1244 
1245     Flags = (unsigned int)evaluator<ThenMatrixType>::Flags & evaluator<ElseMatrixType>::Flags & HereditaryBits,
1246 
1247     Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ThenMatrixType>::Alignment, evaluator<ElseMatrixType>::Alignment)
1248   };
1249 
1250   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1251   explicit evaluator(const XprType& select)
1252     : m_conditionImpl(select.conditionMatrix()),
1253       m_thenImpl(select.thenMatrix()),
1254       m_elseImpl(select.elseMatrix())
1255   {
1256     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1257   }
1258 
1259   typedef typename XprType::CoeffReturnType CoeffReturnType;
1260 
1261   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1262   CoeffReturnType coeff(Index row, Index col) const
1263   {
1264     if (m_conditionImpl.coeff(row, col))
1265       return m_thenImpl.coeff(row, col);
1266     else
1267       return m_elseImpl.coeff(row, col);
1268   }
1269 
1270   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1271   CoeffReturnType coeff(Index index) const
1272   {
1273     if (m_conditionImpl.coeff(index))
1274       return m_thenImpl.coeff(index);
1275     else
1276       return m_elseImpl.coeff(index);
1277   }
1278 
1279 protected:
1280   evaluator<ConditionMatrixType> m_conditionImpl;
1281   evaluator<ThenMatrixType> m_thenImpl;
1282   evaluator<ElseMatrixType> m_elseImpl;
1283 };
1284 
1285 
1286 // -------------------- Replicate --------------------
1287 
1288 template<typename ArgType, int RowFactor, int ColFactor>
1289 struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
1290   : evaluator_base<Replicate<ArgType, RowFactor, ColFactor> >
1291 {
1292   typedef Replicate<ArgType, RowFactor, ColFactor> XprType;
1293   typedef typename XprType::CoeffReturnType CoeffReturnType;
1294   enum {
1295     Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor
1296   };
1297   typedef typename internal::nested_eval<ArgType,Factor>::type ArgTypeNested;
1298   typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned;
1299 
1300   enum {
1301     CoeffReadCost = evaluator<ArgTypeNestedCleaned>::CoeffReadCost,
1302     LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0,
1303     Flags = (evaluator<ArgTypeNestedCleaned>::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits<XprType>::Flags & RowMajorBit),
1304 
1305     Alignment = evaluator<ArgTypeNestedCleaned>::Alignment
1306   };
1307 
1308   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1309   explicit unary_evaluator(const XprType& replicate)
1310     : m_arg(replicate.nestedExpression()),
1311       m_argImpl(m_arg),
1312       m_rows(replicate.nestedExpression().rows()),
1313       m_cols(replicate.nestedExpression().cols())
1314   {}
1315 
1316   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1317   CoeffReturnType coeff(Index row, Index col) const
1318   {
1319     // try to avoid using modulo; this is a pure optimization strategy
1320     const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
1321                            : RowFactor==1 ? row
1322                            : row % m_rows.value();
1323     const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
1324                            : ColFactor==1 ? col
1325                            : col % m_cols.value();
1326 
1327     return m_argImpl.coeff(actual_row, actual_col);
1328   }
1329 
1330   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1331   CoeffReturnType coeff(Index index) const
1332   {
1333     // try to avoid using modulo; this is a pure optimization strategy
1334     const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
1335                                   ? (ColFactor==1 ?  index : index%m_cols.value())
1336                                   : (RowFactor==1 ?  index : index%m_rows.value());
1337 
1338     return m_argImpl.coeff(actual_index);
1339   }
1340 
1341   template<int LoadMode, typename PacketType>
1342   EIGEN_STRONG_INLINE
1343   PacketType packet(Index row, Index col) const
1344   {
1345     const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
1346                            : RowFactor==1 ? row
1347                            : row % m_rows.value();
1348     const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
1349                            : ColFactor==1 ? col
1350                            : col % m_cols.value();
1351 
1352     return m_argImpl.template packet<LoadMode,PacketType>(actual_row, actual_col);
1353   }
1354 
1355   template<int LoadMode, typename PacketType>
1356   EIGEN_STRONG_INLINE
1357   PacketType packet(Index index) const
1358   {
1359     const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
1360                                   ? (ColFactor==1 ?  index : index%m_cols.value())
1361                                   : (RowFactor==1 ?  index : index%m_rows.value());
1362 
1363     return m_argImpl.template packet<LoadMode,PacketType>(actual_index);
1364   }
1365 
1366 protected:
1367   const ArgTypeNested m_arg;
1368   evaluator<ArgTypeNestedCleaned> m_argImpl;
1369   const variable_if_dynamic<Index, ArgType::RowsAtCompileTime> m_rows;
1370   const variable_if_dynamic<Index, ArgType::ColsAtCompileTime> m_cols;
1371 };
1372 
1373 // -------------------- MatrixWrapper and ArrayWrapper --------------------
1374 //
1375 // evaluator_wrapper_base<T> is a common base class for the
1376 // MatrixWrapper and ArrayWrapper evaluators.
1377 
1378 template<typename XprType>
1379 struct evaluator_wrapper_base
1380   : evaluator_base<XprType>
1381 {
1382   typedef typename remove_all<typename XprType::NestedExpressionType>::type ArgType;
1383   enum {
1384     CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1385     Flags = evaluator<ArgType>::Flags,
1386     Alignment = evaluator<ArgType>::Alignment
1387   };
1388 
1389   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1390   explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
1391 
1392   typedef typename ArgType::Scalar Scalar;
1393   typedef typename ArgType::CoeffReturnType CoeffReturnType;
1394 
1395   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1396   CoeffReturnType coeff(Index row, Index col) const
1397   {
1398     return m_argImpl.coeff(row, col);
1399   }
1400 
1401   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1402   CoeffReturnType coeff(Index index) const
1403   {
1404     return m_argImpl.coeff(index);
1405   }
1406 
1407   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1408   Scalar& coeffRef(Index row, Index col)
1409   {
1410     return m_argImpl.coeffRef(row, col);
1411   }
1412 
1413   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1414   Scalar& coeffRef(Index index)
1415   {
1416     return m_argImpl.coeffRef(index);
1417   }
1418 
1419   template<int LoadMode, typename PacketType>
1420   EIGEN_STRONG_INLINE
1421   PacketType packet(Index row, Index col) const
1422   {
1423     return m_argImpl.template packet<LoadMode,PacketType>(row, col);
1424   }
1425 
1426   template<int LoadMode, typename PacketType>
1427   EIGEN_STRONG_INLINE
1428   PacketType packet(Index index) const
1429   {
1430     return m_argImpl.template packet<LoadMode,PacketType>(index);
1431   }
1432 
1433   template<int StoreMode, typename PacketType>
1434   EIGEN_STRONG_INLINE
1435   void writePacket(Index row, Index col, const PacketType& x)
1436   {
1437     m_argImpl.template writePacket<StoreMode>(row, col, x);
1438   }
1439 
1440   template<int StoreMode, typename PacketType>
1441   EIGEN_STRONG_INLINE
1442   void writePacket(Index index, const PacketType& x)
1443   {
1444     m_argImpl.template writePacket<StoreMode>(index, x);
1445   }
1446 
1447 protected:
1448   evaluator<ArgType> m_argImpl;
1449 };
1450 
1451 template<typename TArgType>
1452 struct unary_evaluator<MatrixWrapper<TArgType> >
1453   : evaluator_wrapper_base<MatrixWrapper<TArgType> >
1454 {
1455   typedef MatrixWrapper<TArgType> XprType;
1456 
1457   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1458   explicit unary_evaluator(const XprType& wrapper)
1459     : evaluator_wrapper_base<MatrixWrapper<TArgType> >(wrapper.nestedExpression())
1460   { }
1461 };
1462 
1463 template<typename TArgType>
1464 struct unary_evaluator<ArrayWrapper<TArgType> >
1465   : evaluator_wrapper_base<ArrayWrapper<TArgType> >
1466 {
1467   typedef ArrayWrapper<TArgType> XprType;
1468 
1469   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1470   explicit unary_evaluator(const XprType& wrapper)
1471     : evaluator_wrapper_base<ArrayWrapper<TArgType> >(wrapper.nestedExpression())
1472   { }
1473 };
1474 
1475 
1476 // -------------------- Reverse --------------------
1477 
1478 // defined in Reverse.h:
1479 template<typename PacketType, bool ReversePacket> struct reverse_packet_cond;
1480 
1481 template<typename ArgType, int Direction>
1482 struct unary_evaluator<Reverse<ArgType, Direction> >
1483   : evaluator_base<Reverse<ArgType, Direction> >
1484 {
1485   typedef Reverse<ArgType, Direction> XprType;
1486   typedef typename XprType::Scalar Scalar;
1487   typedef typename XprType::CoeffReturnType CoeffReturnType;
1488 
1489   enum {
1490     IsRowMajor = XprType::IsRowMajor,
1491     IsColMajor = !IsRowMajor,
1492     ReverseRow = (Direction == Vertical)   || (Direction == BothDirections),
1493     ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),
1494     ReversePacket = (Direction == BothDirections)
1495                     || ((Direction == Vertical)   && IsColMajor)
1496                     || ((Direction == Horizontal) && IsRowMajor),
1497 
1498     CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1499 
1500     // let's enable LinearAccess only with vectorization because of the product overhead
1501     // FIXME enable DirectAccess with negative strides?
1502     Flags0 = evaluator<ArgType>::Flags,
1503     LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) )
1504                   || ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1))
1505                  ? LinearAccessBit : 0,
1506 
1507     Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess),
1508 
1509     Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f.
1510   };
1511 
1512   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1513   explicit unary_evaluator(const XprType& reverse)
1514     : m_argImpl(reverse.nestedExpression()),
1515       m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1),
1516       m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1)
1517   { }
1518 
1519   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1520   CoeffReturnType coeff(Index row, Index col) const
1521   {
1522     return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row,
1523                            ReverseCol ? m_cols.value() - col - 1 : col);
1524   }
1525 
1526   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1527   CoeffReturnType coeff(Index index) const
1528   {
1529     return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1);
1530   }
1531 
1532   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1533   Scalar& coeffRef(Index row, Index col)
1534   {
1535     return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row,
1536                               ReverseCol ? m_cols.value() - col - 1 : col);
1537   }
1538 
1539   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1540   Scalar& coeffRef(Index index)
1541   {
1542     return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1);
1543   }
1544 
1545   template<int LoadMode, typename PacketType>
1546   EIGEN_STRONG_INLINE
1547   PacketType packet(Index row, Index col) const
1548   {
1549     enum {
1550       PacketSize = unpacket_traits<PacketType>::size,
1551       OffsetRow  = ReverseRow && IsColMajor ? PacketSize : 1,
1552       OffsetCol  = ReverseCol && IsRowMajor ? PacketSize : 1
1553     };
1554     typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet;
1555     return reverse_packet::run(m_argImpl.template packet<LoadMode,PacketType>(
1556                                   ReverseRow ? m_rows.value() - row - OffsetRow : row,
1557                                   ReverseCol ? m_cols.value() - col - OffsetCol : col));
1558   }
1559 
1560   template<int LoadMode, typename PacketType>
1561   EIGEN_STRONG_INLINE
1562   PacketType packet(Index index) const
1563   {
1564     enum { PacketSize = unpacket_traits<PacketType>::size };
1565     return preverse(m_argImpl.template packet<LoadMode,PacketType>(m_rows.value() * m_cols.value() - index - PacketSize));
1566   }
1567 
1568   template<int LoadMode, typename PacketType>
1569   EIGEN_STRONG_INLINE
1570   void writePacket(Index row, Index col, const PacketType& x)
1571   {
1572     // FIXME we could factorize some code with packet(i,j)
1573     enum {
1574       PacketSize = unpacket_traits<PacketType>::size,
1575       OffsetRow  = ReverseRow && IsColMajor ? PacketSize : 1,
1576       OffsetCol  = ReverseCol && IsRowMajor ? PacketSize : 1
1577     };
1578     typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet;
1579     m_argImpl.template writePacket<LoadMode>(
1580                                   ReverseRow ? m_rows.value() - row - OffsetRow : row,
1581                                   ReverseCol ? m_cols.value() - col - OffsetCol : col,
1582                                   reverse_packet::run(x));
1583   }
1584 
1585   template<int LoadMode, typename PacketType>
1586   EIGEN_STRONG_INLINE
1587   void writePacket(Index index, const PacketType& x)
1588   {
1589     enum { PacketSize = unpacket_traits<PacketType>::size };
1590     m_argImpl.template writePacket<LoadMode>
1591       (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x));
1592   }
1593 
1594 protected:
1595   evaluator<ArgType> m_argImpl;
1596 
1597   // If we do not reverse rows, then we do not need to know the number of rows; same for columns
1598   // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors.
1599   const variable_if_dynamic<Index, ReverseRow ? ArgType::RowsAtCompileTime : 1> m_rows;
1600   const variable_if_dynamic<Index, ReverseCol ? ArgType::ColsAtCompileTime : 1> m_cols;
1601 };
1602 
1603 
1604 // -------------------- Diagonal --------------------
1605 
1606 template<typename ArgType, int DiagIndex>
1607 struct evaluator<Diagonal<ArgType, DiagIndex> >
1608   : evaluator_base<Diagonal<ArgType, DiagIndex> >
1609 {
1610   typedef Diagonal<ArgType, DiagIndex> XprType;
1611 
1612   enum {
1613     CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1614 
1615     Flags = (unsigned int)(evaluator<ArgType>::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit,
1616 
1617     Alignment = 0
1618   };
1619 
1620   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1621   explicit evaluator(const XprType& diagonal)
1622     : m_argImpl(diagonal.nestedExpression()),
1623       m_index(diagonal.index())
1624   { }
1625 
1626   typedef typename XprType::Scalar Scalar;
1627   typedef typename XprType::CoeffReturnType CoeffReturnType;
1628 
1629   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1630   CoeffReturnType coeff(Index row, Index) const
1631   {
1632     return m_argImpl.coeff(row + rowOffset(), row + colOffset());
1633   }
1634 
1635   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1636   CoeffReturnType coeff(Index index) const
1637   {
1638     return m_argImpl.coeff(index + rowOffset(), index + colOffset());
1639   }
1640 
1641   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1642   Scalar& coeffRef(Index row, Index)
1643   {
1644     return m_argImpl.coeffRef(row + rowOffset(), row + colOffset());
1645   }
1646 
1647   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1648   Scalar& coeffRef(Index index)
1649   {
1650     return m_argImpl.coeffRef(index + rowOffset(), index + colOffset());
1651   }
1652 
1653 protected:
1654   evaluator<ArgType> m_argImpl;
1655   const internal::variable_if_dynamicindex<Index, XprType::DiagIndex> m_index;
1656 
1657 private:
1658   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
1659   Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); }
1660   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
1661   Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; }
1662 };
1663 
1664 
1665 //----------------------------------------------------------------------
1666 // deprecated code
1667 //----------------------------------------------------------------------
1668 
1669 // -------------------- EvalToTemp --------------------
1670 
1671 // expression class for evaluating nested expression to a temporary
1672 
1673 template<typename ArgType> class EvalToTemp;
1674 
1675 template<typename ArgType>
1676 struct traits<EvalToTemp<ArgType> >
1677   : public traits<ArgType>
1678 { };
1679 
1680 template<typename ArgType>
1681 class EvalToTemp
1682   : public dense_xpr_base<EvalToTemp<ArgType> >::type
1683 {
1684  public:
1685 
1686   typedef typename dense_xpr_base<EvalToTemp>::type Base;
1687   EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp)
1688 
1689   explicit EvalToTemp(const ArgType& arg)
1690     : m_arg(arg)
1691   { }
1692 
1693   const ArgType& arg() const
1694   {
1695     return m_arg;
1696   }
1697 
1698   EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT
1699   {
1700     return m_arg.rows();
1701   }
1702 
1703   EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT
1704   {
1705     return m_arg.cols();
1706   }
1707 
1708  private:
1709   const ArgType& m_arg;
1710 };
1711 
1712 template<typename ArgType>
1713 struct evaluator<EvalToTemp<ArgType> >
1714   : public evaluator<typename ArgType::PlainObject>
1715 {
1716   typedef EvalToTemp<ArgType>                   XprType;
1717   typedef typename ArgType::PlainObject         PlainObject;
1718   typedef evaluator<PlainObject> Base;
1719 
1720   EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
1721     : m_result(xpr.arg())
1722   {
1723     ::new (static_cast<Base*>(this)) Base(m_result);
1724   }
1725 
1726   // This constructor is used when nesting an EvalTo evaluator in another evaluator
1727   EIGEN_DEVICE_FUNC evaluator(const ArgType& arg)
1728     : m_result(arg)
1729   {
1730     ::new (static_cast<Base*>(this)) Base(m_result);
1731   }
1732 
1733 protected:
1734   PlainObject m_result;
1735 };
1736 
1737 } // namespace internal
1738 
1739 } // end namespace Eigen
1740 
1741 #endif // EIGEN_COREEVALUATORS_H