Back to home page

EIC code displayed by LXR

 
 

    


Warning, file /include/eigen3/Eigen/src/Core/util/XprHelper.h was not indexed or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).

0001 // This file is part of Eigen, a lightweight C++ template library
0002 // for linear algebra.
0003 //
0004 // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
0005 // Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
0006 //
0007 // This Source Code Form is subject to the terms of the Mozilla
0008 // Public License v. 2.0. If a copy of the MPL was not distributed
0009 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
0010 
0011 #ifndef EIGEN_XPRHELPER_H
0012 #define EIGEN_XPRHELPER_H
0013 
0014 // just a workaround because GCC seems to not really like empty structs
0015 // FIXME: gcc 4.3 generates bad code when strict-aliasing is enabled
0016 // so currently we simply disable this optimization for gcc 4.3
0017 #if EIGEN_COMP_GNUC && !EIGEN_GNUC_AT(4,3)
0018   #define EIGEN_EMPTY_STRUCT_CTOR(X) \
0019     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X() {} \
0020     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE X(const X& ) {}
0021 #else
0022   #define EIGEN_EMPTY_STRUCT_CTOR(X)
0023 #endif
0024 
0025 namespace Eigen {
0026 
0027 namespace internal {
0028 
0029 template<typename IndexDest, typename IndexSrc>
0030 EIGEN_DEVICE_FUNC
0031 inline IndexDest convert_index(const IndexSrc& idx) {
0032   // for sizeof(IndexDest)>=sizeof(IndexSrc) compilers should be able to optimize this away:
0033   eigen_internal_assert(idx <= NumTraits<IndexDest>::highest() && "Index value to big for target type");
0034   return IndexDest(idx);
0035 }
0036 
0037 // true if T can be considered as an integral index (i.e., and integral type or enum)
0038 template<typename T> struct is_valid_index_type
0039 {
0040   enum { value =
0041 #if EIGEN_HAS_TYPE_TRAITS
0042     internal::is_integral<T>::value || std::is_enum<T>::value
0043 #elif EIGEN_COMP_MSVC
0044     internal::is_integral<T>::value || __is_enum(T)
0045 #else
0046     // without C++11, we use is_convertible to Index instead of is_integral in order to treat enums as Index.
0047     internal::is_convertible<T,Index>::value && !internal::is_same<T,float>::value && !is_same<T,double>::value
0048 #endif
0049   };
0050 };
0051 
0052 // true if both types are not valid index types
0053 template<typename RowIndices, typename ColIndices>
0054 struct valid_indexed_view_overload {
0055   enum { value = !(internal::is_valid_index_type<RowIndices>::value && internal::is_valid_index_type<ColIndices>::value) };
0056 };
0057 
0058 // promote_scalar_arg is an helper used in operation between an expression and a scalar, like:
0059 //    expression * scalar
0060 // Its role is to determine how the type T of the scalar operand should be promoted given the scalar type ExprScalar of the given expression.
0061 // The IsSupported template parameter must be provided by the caller as: internal::has_ReturnType<ScalarBinaryOpTraits<ExprScalar,T,op> >::value using the proper order for ExprScalar and T.
0062 // Then the logic is as follows:
0063 //  - if the operation is natively supported as defined by IsSupported, then the scalar type is not promoted, and T is returned.
0064 //  - otherwise, NumTraits<ExprScalar>::Literal is returned if T is implicitly convertible to NumTraits<ExprScalar>::Literal AND that this does not imply a float to integer conversion.
0065 //  - otherwise, ExprScalar is returned if T is implicitly convertible to ExprScalar AND that this does not imply a float to integer conversion.
0066 //  - In all other cases, the promoted type is not defined, and the respective operation is thus invalid and not available (SFINAE).
0067 template<typename ExprScalar,typename T, bool IsSupported>
0068 struct promote_scalar_arg;
0069 
0070 template<typename S,typename T>
0071 struct promote_scalar_arg<S,T,true>
0072 {
0073   typedef T type;
0074 };
0075 
0076 // Recursively check safe conversion to PromotedType, and then ExprScalar if they are different.
0077 template<typename ExprScalar,typename T,typename PromotedType,
0078   bool ConvertibleToLiteral = internal::is_convertible<T,PromotedType>::value,
0079   bool IsSafe = NumTraits<T>::IsInteger || !NumTraits<PromotedType>::IsInteger>
0080 struct promote_scalar_arg_unsupported;
0081 
0082 // Start recursion with NumTraits<ExprScalar>::Literal
0083 template<typename S,typename T>
0084 struct promote_scalar_arg<S,T,false> : promote_scalar_arg_unsupported<S,T,typename NumTraits<S>::Literal> {};
0085 
0086 // We found a match!
0087 template<typename S,typename T, typename PromotedType>
0088 struct promote_scalar_arg_unsupported<S,T,PromotedType,true,true>
0089 {
0090   typedef PromotedType type;
0091 };
0092 
0093 // No match, but no real-to-integer issues, and ExprScalar and current PromotedType are different,
0094 // so let's try to promote to ExprScalar
0095 template<typename ExprScalar,typename T, typename PromotedType>
0096 struct promote_scalar_arg_unsupported<ExprScalar,T,PromotedType,false,true>
0097    : promote_scalar_arg_unsupported<ExprScalar,T,ExprScalar>
0098 {};
0099 
0100 // Unsafe real-to-integer, let's stop.
0101 template<typename S,typename T, typename PromotedType, bool ConvertibleToLiteral>
0102 struct promote_scalar_arg_unsupported<S,T,PromotedType,ConvertibleToLiteral,false> {};
0103 
0104 // T is not even convertible to ExprScalar, let's stop.
0105 template<typename S,typename T>
0106 struct promote_scalar_arg_unsupported<S,T,S,false,true> {};
0107 
0108 //classes inheriting no_assignment_operator don't generate a default operator=.
0109 class no_assignment_operator
0110 {
0111   private:
0112     no_assignment_operator& operator=(const no_assignment_operator&);
0113   protected:
0114     EIGEN_DEFAULT_COPY_CONSTRUCTOR(no_assignment_operator)
0115     EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(no_assignment_operator)
0116 };
0117 
0118 /** \internal return the index type with the largest number of bits */
0119 template<typename I1, typename I2>
0120 struct promote_index_type
0121 {
0122   typedef typename conditional<(sizeof(I1)<sizeof(I2)), I2, I1>::type type;
0123 };
0124 
0125 /** \internal If the template parameter Value is Dynamic, this class is just a wrapper around a T variable that
0126   * can be accessed using value() and setValue().
0127   * Otherwise, this class is an empty structure and value() just returns the template parameter Value.
0128   */
0129 template<typename T, int Value> class variable_if_dynamic
0130 {
0131   public:
0132     EIGEN_DEFAULT_EMPTY_CONSTRUCTOR_AND_DESTRUCTOR(variable_if_dynamic)
0133     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
0134     EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
0135     T value() { return T(Value); }
0136     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
0137     operator T() const { return T(Value); }
0138     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0139     void setValue(T v) const { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
0140 };
0141 
0142 template<typename T> class variable_if_dynamic<T, Dynamic>
0143 {
0144     T m_value;
0145   public:
0146     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamic(T value = 0) EIGEN_NO_THROW : m_value(value) {}
0147     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T value() const { return m_value; }
0148     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE operator T() const { return m_value; }
0149     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }
0150 };
0151 
0152 /** \internal like variable_if_dynamic but for DynamicIndex
0153   */
0154 template<typename T, int Value> class variable_if_dynamicindex
0155 {
0156   public:
0157     EIGEN_EMPTY_STRUCT_CTOR(variable_if_dynamicindex)
0158     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T v) { EIGEN_ONLY_USED_FOR_DEBUG(v); eigen_assert(v == T(Value)); }
0159     EIGEN_DEVICE_FUNC static EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
0160     T value() { return T(Value); }
0161     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
0162     void setValue(T) {}
0163 };
0164 
0165 template<typename T> class variable_if_dynamicindex<T, DynamicIndex>
0166 {
0167     T m_value;
0168     EIGEN_DEVICE_FUNC variable_if_dynamicindex() { eigen_assert(false); }
0169   public:
0170     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit variable_if_dynamicindex(T value) : m_value(value) {}
0171     EIGEN_DEVICE_FUNC T EIGEN_STRONG_INLINE value() const { return m_value; }
0172     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void setValue(T value) { m_value = value; }
0173 };
0174 
0175 template<typename T> struct functor_traits
0176 {
0177   enum
0178   {
0179     Cost = 10,
0180     PacketAccess = false,
0181     IsRepeatable = false
0182   };
0183 };
0184 
0185 template<typename T> struct packet_traits;
0186 
0187 template<typename T> struct unpacket_traits;
0188 
0189 template<int Size, typename PacketType,
0190          bool Stop = Size==Dynamic || (Size%unpacket_traits<PacketType>::size)==0 || is_same<PacketType,typename unpacket_traits<PacketType>::half>::value>
0191 struct find_best_packet_helper;
0192 
0193 template< int Size, typename PacketType>
0194 struct find_best_packet_helper<Size,PacketType,true>
0195 {
0196   typedef PacketType type;
0197 };
0198 
0199 template<int Size, typename PacketType>
0200 struct find_best_packet_helper<Size,PacketType,false>
0201 {
0202   typedef typename find_best_packet_helper<Size,typename unpacket_traits<PacketType>::half>::type type;
0203 };
0204 
0205 template<typename T, int Size>
0206 struct find_best_packet
0207 {
0208   typedef typename find_best_packet_helper<Size,typename packet_traits<T>::type>::type type;
0209 };
0210 
0211 #if EIGEN_MAX_STATIC_ALIGN_BYTES>0
0212 template<int ArrayBytes, int AlignmentBytes,
0213          bool Match     =  bool((ArrayBytes%AlignmentBytes)==0),
0214          bool TryHalf   =  bool(EIGEN_MIN_ALIGN_BYTES<AlignmentBytes) >
0215 struct compute_default_alignment_helper
0216 {
0217   enum { value = 0 };
0218 };
0219 
0220 template<int ArrayBytes, int AlignmentBytes, bool TryHalf>
0221 struct compute_default_alignment_helper<ArrayBytes, AlignmentBytes, true, TryHalf> // Match
0222 {
0223   enum { value = AlignmentBytes };
0224 };
0225 
0226 template<int ArrayBytes, int AlignmentBytes>
0227 struct compute_default_alignment_helper<ArrayBytes, AlignmentBytes, false, true> // Try-half
0228 {
0229   // current packet too large, try with an half-packet
0230   enum { value = compute_default_alignment_helper<ArrayBytes, AlignmentBytes/2>::value };
0231 };
0232 #else
0233 // If static alignment is disabled, no need to bother.
0234 // This also avoids a division by zero in "bool Match =  bool((ArrayBytes%AlignmentBytes)==0)"
0235 template<int ArrayBytes, int AlignmentBytes>
0236 struct compute_default_alignment_helper
0237 {
0238   enum { value = 0 };
0239 };
0240 #endif
0241 
0242 template<typename T, int Size> struct compute_default_alignment {
0243   enum { value = compute_default_alignment_helper<Size*sizeof(T),EIGEN_MAX_STATIC_ALIGN_BYTES>::value };
0244 };
0245 
0246 template<typename T> struct compute_default_alignment<T,Dynamic> {
0247   enum { value = EIGEN_MAX_ALIGN_BYTES };
0248 };
0249 
0250 template<typename _Scalar, int _Rows, int _Cols,
0251          int _Options = AutoAlign |
0252                           ( (_Rows==1 && _Cols!=1) ? RowMajor
0253                           : (_Cols==1 && _Rows!=1) ? ColMajor
0254                           : EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION ),
0255          int _MaxRows = _Rows,
0256          int _MaxCols = _Cols
0257 > class make_proper_matrix_type
0258 {
0259     enum {
0260       IsColVector = _Cols==1 && _Rows!=1,
0261       IsRowVector = _Rows==1 && _Cols!=1,
0262       Options = IsColVector ? (_Options | ColMajor) & ~RowMajor
0263               : IsRowVector ? (_Options | RowMajor) & ~ColMajor
0264               : _Options
0265     };
0266   public:
0267     typedef Matrix<_Scalar, _Rows, _Cols, Options, _MaxRows, _MaxCols> type;
0268 };
0269 
0270 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
0271 class compute_matrix_flags
0272 {
0273     enum { row_major_bit = Options&RowMajor ? RowMajorBit : 0 };
0274   public:
0275     // FIXME currently we still have to handle DirectAccessBit at the expression level to handle DenseCoeffsBase<>
0276     // and then propagate this information to the evaluator's flags.
0277     // However, I (Gael) think that DirectAccessBit should only matter at the evaluation stage.
0278     enum { ret = DirectAccessBit | LvalueBit | NestByRefBit | row_major_bit };
0279 };
0280 
0281 template<int _Rows, int _Cols> struct size_at_compile_time
0282 {
0283   enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols };
0284 };
0285 
0286 template<typename XprType> struct size_of_xpr_at_compile_time
0287 {
0288   enum { ret = size_at_compile_time<traits<XprType>::RowsAtCompileTime,traits<XprType>::ColsAtCompileTime>::ret };
0289 };
0290 
0291 /* plain_matrix_type : the difference from eval is that plain_matrix_type is always a plain matrix type,
0292  * whereas eval is a const reference in the case of a matrix
0293  */
0294 
0295 template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_matrix_type;
0296 template<typename T, typename BaseClassType, int Flags> struct plain_matrix_type_dense;
0297 template<typename T> struct plain_matrix_type<T,Dense>
0298 {
0299   typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind, traits<T>::Flags>::type type;
0300 };
0301 template<typename T> struct plain_matrix_type<T,DiagonalShape>
0302 {
0303   typedef typename T::PlainObject type;
0304 };
0305 
0306 template<typename T, int Flags> struct plain_matrix_type_dense<T,MatrixXpr,Flags>
0307 {
0308   typedef Matrix<typename traits<T>::Scalar,
0309                 traits<T>::RowsAtCompileTime,
0310                 traits<T>::ColsAtCompileTime,
0311                 AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor),
0312                 traits<T>::MaxRowsAtCompileTime,
0313                 traits<T>::MaxColsAtCompileTime
0314           > type;
0315 };
0316 
0317 template<typename T, int Flags> struct plain_matrix_type_dense<T,ArrayXpr,Flags>
0318 {
0319   typedef Array<typename traits<T>::Scalar,
0320                 traits<T>::RowsAtCompileTime,
0321                 traits<T>::ColsAtCompileTime,
0322                 AutoAlign | (Flags&RowMajorBit ? RowMajor : ColMajor),
0323                 traits<T>::MaxRowsAtCompileTime,
0324                 traits<T>::MaxColsAtCompileTime
0325           > type;
0326 };
0327 
0328 /* eval : the return type of eval(). For matrices, this is just a const reference
0329  * in order to avoid a useless copy
0330  */
0331 
0332 template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct eval;
0333 
0334 template<typename T> struct eval<T,Dense>
0335 {
0336   typedef typename plain_matrix_type<T>::type type;
0337 //   typedef typename T::PlainObject type;
0338 //   typedef T::Matrix<typename traits<T>::Scalar,
0339 //                 traits<T>::RowsAtCompileTime,
0340 //                 traits<T>::ColsAtCompileTime,
0341 //                 AutoAlign | (traits<T>::Flags&RowMajorBit ? RowMajor : ColMajor),
0342 //                 traits<T>::MaxRowsAtCompileTime,
0343 //                 traits<T>::MaxColsAtCompileTime
0344 //           > type;
0345 };
0346 
0347 template<typename T> struct eval<T,DiagonalShape>
0348 {
0349   typedef typename plain_matrix_type<T>::type type;
0350 };
0351 
0352 // for matrices, no need to evaluate, just use a const reference to avoid a useless copy
0353 template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
0354 struct eval<Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
0355 {
0356   typedef const Matrix<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
0357 };
0358 
0359 template<typename _Scalar, int _Rows, int _Cols, int _Options, int _MaxRows, int _MaxCols>
0360 struct eval<Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>, Dense>
0361 {
0362   typedef const Array<_Scalar, _Rows, _Cols, _Options, _MaxRows, _MaxCols>& type;
0363 };
0364 
0365 
0366 /* similar to plain_matrix_type, but using the evaluator's Flags */
0367 template<typename T, typename StorageKind = typename traits<T>::StorageKind> struct plain_object_eval;
0368 
0369 template<typename T>
0370 struct plain_object_eval<T,Dense>
0371 {
0372   typedef typename plain_matrix_type_dense<T,typename traits<T>::XprKind, evaluator<T>::Flags>::type type;
0373 };
0374 
0375 
0376 /* plain_matrix_type_column_major : same as plain_matrix_type but guaranteed to be column-major
0377  */
0378 template<typename T> struct plain_matrix_type_column_major
0379 {
0380   enum { Rows = traits<T>::RowsAtCompileTime,
0381          Cols = traits<T>::ColsAtCompileTime,
0382          MaxRows = traits<T>::MaxRowsAtCompileTime,
0383          MaxCols = traits<T>::MaxColsAtCompileTime
0384   };
0385   typedef Matrix<typename traits<T>::Scalar,
0386                 Rows,
0387                 Cols,
0388                 (MaxRows==1&&MaxCols!=1) ? RowMajor : ColMajor,
0389                 MaxRows,
0390                 MaxCols
0391           > type;
0392 };
0393 
0394 /* plain_matrix_type_row_major : same as plain_matrix_type but guaranteed to be row-major
0395  */
0396 template<typename T> struct plain_matrix_type_row_major
0397 {
0398   enum { Rows = traits<T>::RowsAtCompileTime,
0399          Cols = traits<T>::ColsAtCompileTime,
0400          MaxRows = traits<T>::MaxRowsAtCompileTime,
0401          MaxCols = traits<T>::MaxColsAtCompileTime
0402   };
0403   typedef Matrix<typename traits<T>::Scalar,
0404                 Rows,
0405                 Cols,
0406                 (MaxCols==1&&MaxRows!=1) ? ColMajor : RowMajor,
0407                 MaxRows,
0408                 MaxCols
0409           > type;
0410 };
0411 
0412 /** \internal The reference selector for template expressions. The idea is that we don't
0413   * need to use references for expressions since they are light weight proxy
0414   * objects which should generate no copying overhead. */
0415 template <typename T>
0416 struct ref_selector
0417 {
0418   typedef typename conditional<
0419     bool(traits<T>::Flags & NestByRefBit),
0420     T const&,
0421     const T
0422   >::type type;
0423 
0424   typedef typename conditional<
0425     bool(traits<T>::Flags & NestByRefBit),
0426     T &,
0427     T
0428   >::type non_const_type;
0429 };
0430 
0431 /** \internal Adds the const qualifier on the value-type of T2 if and only if T1 is a const type */
0432 template<typename T1, typename T2>
0433 struct transfer_constness
0434 {
0435   typedef typename conditional<
0436     bool(internal::is_const<T1>::value),
0437     typename internal::add_const_on_value_type<T2>::type,
0438     T2
0439   >::type type;
0440 };
0441 
0442 
0443 // However, we still need a mechanism to detect whether an expression which is evaluated multiple time
0444 // has to be evaluated into a temporary.
0445 // That's the purpose of this new nested_eval helper:
0446 /** \internal Determines how a given expression should be nested when evaluated multiple times.
0447   * For example, when you do a * (b+c), Eigen will determine how the expression b+c should be
0448   * evaluated into the bigger product expression. The choice is between nesting the expression b+c as-is, or
0449   * evaluating that expression b+c into a temporary variable d, and nest d so that the resulting expression is
0450   * a*d. Evaluating can be beneficial for example if every coefficient access in the resulting expression causes
0451   * many coefficient accesses in the nested expressions -- as is the case with matrix product for example.
0452   *
0453   * \tparam T the type of the expression being nested.
0454   * \tparam n the number of coefficient accesses in the nested expression for each coefficient access in the bigger expression.
0455   * \tparam PlainObject the type of the temporary if needed.
0456   */
0457 template<typename T, int n, typename PlainObject = typename plain_object_eval<T>::type> struct nested_eval
0458 {
0459   enum {
0460     ScalarReadCost = NumTraits<typename traits<T>::Scalar>::ReadCost,
0461     CoeffReadCost = evaluator<T>::CoeffReadCost,  // NOTE What if an evaluator evaluate itself into a temporary?
0462                                                   //      Then CoeffReadCost will be small (e.g., 1) but we still have to evaluate, especially if n>1.
0463                                                   //      This situation is already taken care by the EvalBeforeNestingBit flag, which is turned ON
0464                                                   //      for all evaluator creating a temporary. This flag is then propagated by the parent evaluators.
0465                                                   //      Another solution could be to count the number of temps?
0466     NAsInteger = n == Dynamic ? HugeCost : n,
0467     CostEval   = (NAsInteger+1) * ScalarReadCost + CoeffReadCost,
0468     CostNoEval = NAsInteger * CoeffReadCost,
0469     Evaluate = (int(evaluator<T>::Flags) & EvalBeforeNestingBit) || (int(CostEval) < int(CostNoEval))
0470   };
0471 
0472   typedef typename conditional<Evaluate, PlainObject, typename ref_selector<T>::type>::type type;
0473 };
0474 
0475 template<typename T>
0476 EIGEN_DEVICE_FUNC
0477 inline T* const_cast_ptr(const T* ptr)
0478 {
0479   return const_cast<T*>(ptr);
0480 }
0481 
0482 template<typename Derived, typename XprKind = typename traits<Derived>::XprKind>
0483 struct dense_xpr_base
0484 {
0485   /* dense_xpr_base should only ever be used on dense expressions, thus falling either into the MatrixXpr or into the ArrayXpr cases */
0486 };
0487 
0488 template<typename Derived>
0489 struct dense_xpr_base<Derived, MatrixXpr>
0490 {
0491   typedef MatrixBase<Derived> type;
0492 };
0493 
0494 template<typename Derived>
0495 struct dense_xpr_base<Derived, ArrayXpr>
0496 {
0497   typedef ArrayBase<Derived> type;
0498 };
0499 
0500 template<typename Derived, typename XprKind = typename traits<Derived>::XprKind, typename StorageKind = typename traits<Derived>::StorageKind>
0501 struct generic_xpr_base;
0502 
0503 template<typename Derived, typename XprKind>
0504 struct generic_xpr_base<Derived, XprKind, Dense>
0505 {
0506   typedef typename dense_xpr_base<Derived,XprKind>::type type;
0507 };
0508 
0509 template<typename XprType, typename CastType> struct cast_return_type
0510 {
0511   typedef typename XprType::Scalar CurrentScalarType;
0512   typedef typename remove_all<CastType>::type _CastType;
0513   typedef typename _CastType::Scalar NewScalarType;
0514   typedef typename conditional<is_same<CurrentScalarType,NewScalarType>::value,
0515                               const XprType&,CastType>::type type;
0516 };
0517 
0518 template <typename A, typename B> struct promote_storage_type;
0519 
0520 template <typename A> struct promote_storage_type<A,A>
0521 {
0522   typedef A ret;
0523 };
0524 template <typename A> struct promote_storage_type<A, const A>
0525 {
0526   typedef A ret;
0527 };
0528 template <typename A> struct promote_storage_type<const A, A>
0529 {
0530   typedef A ret;
0531 };
0532 
0533 /** \internal Specify the "storage kind" of applying a coefficient-wise
0534   * binary operations between two expressions of kinds A and B respectively.
0535   * The template parameter Functor permits to specialize the resulting storage kind wrt to
0536   * the functor.
0537   * The default rules are as follows:
0538   * \code
0539   * A      op A      -> A
0540   * A      op dense  -> dense
0541   * dense  op B      -> dense
0542   * sparse op dense  -> sparse
0543   * dense  op sparse -> sparse
0544   * \endcode
0545   */
0546 template <typename A, typename B, typename Functor> struct cwise_promote_storage_type;
0547 
0548 template <typename A, typename Functor>                   struct cwise_promote_storage_type<A,A,Functor>                                      { typedef A      ret; };
0549 template <typename Functor>                               struct cwise_promote_storage_type<Dense,Dense,Functor>                              { typedef Dense  ret; };
0550 template <typename A, typename Functor>                   struct cwise_promote_storage_type<A,Dense,Functor>                                  { typedef Dense  ret; };
0551 template <typename B, typename Functor>                   struct cwise_promote_storage_type<Dense,B,Functor>                                  { typedef Dense  ret; };
0552 template <typename Functor>                               struct cwise_promote_storage_type<Sparse,Dense,Functor>                             { typedef Sparse ret; };
0553 template <typename Functor>                               struct cwise_promote_storage_type<Dense,Sparse,Functor>                             { typedef Sparse ret; };
0554 
0555 template <typename LhsKind, typename RhsKind, int LhsOrder, int RhsOrder> struct cwise_promote_storage_order {
0556   enum { value = LhsOrder };
0557 };
0558 
0559 template <typename LhsKind, int LhsOrder, int RhsOrder>   struct cwise_promote_storage_order<LhsKind,Sparse,LhsOrder,RhsOrder>                { enum { value = RhsOrder }; };
0560 template <typename RhsKind, int LhsOrder, int RhsOrder>   struct cwise_promote_storage_order<Sparse,RhsKind,LhsOrder,RhsOrder>                { enum { value = LhsOrder }; };
0561 template <int Order>                                      struct cwise_promote_storage_order<Sparse,Sparse,Order,Order>                       { enum { value = Order }; };
0562 
0563 
0564 /** \internal Specify the "storage kind" of multiplying an expression of kind A with kind B.
0565   * The template parameter ProductTag permits to specialize the resulting storage kind wrt to
0566   * some compile-time properties of the product: GemmProduct, GemvProduct, OuterProduct, InnerProduct.
0567   * The default rules are as follows:
0568   * \code
0569   *  K * K            -> K
0570   *  dense * K        -> dense
0571   *  K * dense        -> dense
0572   *  diag * K         -> K
0573   *  K * diag         -> K
0574   *  Perm * K         -> K
0575   * K * Perm          -> K
0576   * \endcode
0577   */
0578 template <typename A, typename B, int ProductTag> struct product_promote_storage_type;
0579 
0580 template <typename A, int ProductTag> struct product_promote_storage_type<A,                  A,                  ProductTag> { typedef A     ret;};
0581 template <int ProductTag>             struct product_promote_storage_type<Dense,              Dense,              ProductTag> { typedef Dense ret;};
0582 template <typename A, int ProductTag> struct product_promote_storage_type<A,                  Dense,              ProductTag> { typedef Dense ret; };
0583 template <typename B, int ProductTag> struct product_promote_storage_type<Dense,              B,                  ProductTag> { typedef Dense ret; };
0584 
0585 template <typename A, int ProductTag> struct product_promote_storage_type<A,                  DiagonalShape,      ProductTag> { typedef A ret; };
0586 template <typename B, int ProductTag> struct product_promote_storage_type<DiagonalShape,      B,                  ProductTag> { typedef B ret; };
0587 template <int ProductTag>             struct product_promote_storage_type<Dense,              DiagonalShape,      ProductTag> { typedef Dense ret; };
0588 template <int ProductTag>             struct product_promote_storage_type<DiagonalShape,      Dense,              ProductTag> { typedef Dense ret; };
0589 
0590 template <typename A, int ProductTag> struct product_promote_storage_type<A,                  PermutationStorage, ProductTag> { typedef A ret; };
0591 template <typename B, int ProductTag> struct product_promote_storage_type<PermutationStorage, B,                  ProductTag> { typedef B ret; };
0592 template <int ProductTag>             struct product_promote_storage_type<Dense,              PermutationStorage, ProductTag> { typedef Dense ret; };
0593 template <int ProductTag>             struct product_promote_storage_type<PermutationStorage, Dense,              ProductTag> { typedef Dense ret; };
0594 
0595 /** \internal gives the plain matrix or array type to store a row/column/diagonal of a matrix type.
0596   * \tparam Scalar optional parameter allowing to pass a different scalar type than the one of the MatrixType.
0597   */
0598 template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
0599 struct plain_row_type
0600 {
0601   typedef Matrix<Scalar, 1, ExpressionType::ColsAtCompileTime,
0602                  int(ExpressionType::PlainObject::Options) | int(RowMajor), 1, ExpressionType::MaxColsAtCompileTime> MatrixRowType;
0603   typedef Array<Scalar, 1, ExpressionType::ColsAtCompileTime,
0604                  int(ExpressionType::PlainObject::Options) | int(RowMajor), 1, ExpressionType::MaxColsAtCompileTime> ArrayRowType;
0605 
0606   typedef typename conditional<
0607     is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
0608     MatrixRowType,
0609     ArrayRowType
0610   >::type type;
0611 };
0612 
0613 template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
0614 struct plain_col_type
0615 {
0616   typedef Matrix<Scalar, ExpressionType::RowsAtCompileTime, 1,
0617                  ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> MatrixColType;
0618   typedef Array<Scalar, ExpressionType::RowsAtCompileTime, 1,
0619                  ExpressionType::PlainObject::Options & ~RowMajor, ExpressionType::MaxRowsAtCompileTime, 1> ArrayColType;
0620 
0621   typedef typename conditional<
0622     is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
0623     MatrixColType,
0624     ArrayColType
0625   >::type type;
0626 };
0627 
0628 template<typename ExpressionType, typename Scalar = typename ExpressionType::Scalar>
0629 struct plain_diag_type
0630 {
0631   enum { diag_size = EIGEN_SIZE_MIN_PREFER_DYNAMIC(ExpressionType::RowsAtCompileTime, ExpressionType::ColsAtCompileTime),
0632          max_diag_size = EIGEN_SIZE_MIN_PREFER_FIXED(ExpressionType::MaxRowsAtCompileTime, ExpressionType::MaxColsAtCompileTime)
0633   };
0634   typedef Matrix<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> MatrixDiagType;
0635   typedef Array<Scalar, diag_size, 1, ExpressionType::PlainObject::Options & ~RowMajor, max_diag_size, 1> ArrayDiagType;
0636 
0637   typedef typename conditional<
0638     is_same< typename traits<ExpressionType>::XprKind, MatrixXpr >::value,
0639     MatrixDiagType,
0640     ArrayDiagType
0641   >::type type;
0642 };
0643 
0644 template<typename Expr,typename Scalar = typename Expr::Scalar>
0645 struct plain_constant_type
0646 {
0647   enum { Options = (traits<Expr>::Flags&RowMajorBit)?RowMajor:0 };
0648 
0649   typedef Array<Scalar,  traits<Expr>::RowsAtCompileTime,   traits<Expr>::ColsAtCompileTime,
0650                 Options, traits<Expr>::MaxRowsAtCompileTime,traits<Expr>::MaxColsAtCompileTime> array_type;
0651 
0652   typedef Matrix<Scalar,  traits<Expr>::RowsAtCompileTime,   traits<Expr>::ColsAtCompileTime,
0653                  Options, traits<Expr>::MaxRowsAtCompileTime,traits<Expr>::MaxColsAtCompileTime> matrix_type;
0654 
0655   typedef CwiseNullaryOp<scalar_constant_op<Scalar>, const typename conditional<is_same< typename traits<Expr>::XprKind, MatrixXpr >::value, matrix_type, array_type>::type > type;
0656 };
0657 
0658 template<typename ExpressionType>
0659 struct is_lvalue
0660 {
0661   enum { value = (!bool(is_const<ExpressionType>::value)) &&
0662                  bool(traits<ExpressionType>::Flags & LvalueBit) };
0663 };
0664 
0665 template<typename T> struct is_diagonal
0666 { enum { ret = false }; };
0667 
0668 template<typename T> struct is_diagonal<DiagonalBase<T> >
0669 { enum { ret = true }; };
0670 
0671 template<typename T> struct is_diagonal<DiagonalWrapper<T> >
0672 { enum { ret = true }; };
0673 
0674 template<typename T, int S> struct is_diagonal<DiagonalMatrix<T,S> >
0675 { enum { ret = true }; };
0676 
0677 
0678 template<typename T> struct is_identity
0679 { enum { value = false }; };
0680 
0681 template<typename T> struct is_identity<CwiseNullaryOp<internal::scalar_identity_op<typename T::Scalar>, T> >
0682 { enum { value = true }; };
0683 
0684 
0685 template<typename S1, typename S2> struct glue_shapes;
0686 template<> struct glue_shapes<DenseShape,TriangularShape> { typedef TriangularShape type;  };
0687 
0688 template<typename T1, typename T2>
0689 struct possibly_same_dense {
0690   enum { value = has_direct_access<T1>::ret && has_direct_access<T2>::ret && is_same<typename T1::Scalar,typename T2::Scalar>::value };
0691 };
0692 
0693 template<typename T1, typename T2>
0694 EIGEN_DEVICE_FUNC
0695 bool is_same_dense(const T1 &mat1, const T2 &mat2, typename enable_if<possibly_same_dense<T1,T2>::value>::type * = 0)
0696 {
0697   return (mat1.data()==mat2.data()) && (mat1.innerStride()==mat2.innerStride()) && (mat1.outerStride()==mat2.outerStride());
0698 }
0699 
0700 template<typename T1, typename T2>
0701 EIGEN_DEVICE_FUNC
0702 bool is_same_dense(const T1 &, const T2 &, typename enable_if<!possibly_same_dense<T1,T2>::value>::type * = 0)
0703 {
0704   return false;
0705 }
0706 
0707 // Internal helper defining the cost of a scalar division for the type T.
0708 // The default heuristic can be specialized for each scalar type and architecture.
0709 template<typename T,bool Vectorized=false,typename EnableIf = void>
0710 struct scalar_div_cost {
0711   enum { value = 8*NumTraits<T>::MulCost };
0712 };
0713 
0714 template<typename T,bool Vectorized>
0715 struct scalar_div_cost<std::complex<T>, Vectorized> {
0716   enum { value = 2*scalar_div_cost<T>::value
0717                + 6*NumTraits<T>::MulCost
0718                + 3*NumTraits<T>::AddCost
0719   };
0720 };
0721 
0722 
0723 template<bool Vectorized>
0724 struct scalar_div_cost<signed long,Vectorized,typename conditional<sizeof(long)==8,void,false_type>::type> { enum { value = 24 }; };
0725 template<bool Vectorized>
0726 struct scalar_div_cost<unsigned long,Vectorized,typename conditional<sizeof(long)==8,void,false_type>::type> { enum { value = 21 }; };
0727 
0728 
0729 #ifdef EIGEN_DEBUG_ASSIGN
0730 std::string demangle_traversal(int t)
0731 {
0732   if(t==DefaultTraversal) return "DefaultTraversal";
0733   if(t==LinearTraversal) return "LinearTraversal";
0734   if(t==InnerVectorizedTraversal) return "InnerVectorizedTraversal";
0735   if(t==LinearVectorizedTraversal) return "LinearVectorizedTraversal";
0736   if(t==SliceVectorizedTraversal) return "SliceVectorizedTraversal";
0737   return "?";
0738 }
0739 std::string demangle_unrolling(int t)
0740 {
0741   if(t==NoUnrolling) return "NoUnrolling";
0742   if(t==InnerUnrolling) return "InnerUnrolling";
0743   if(t==CompleteUnrolling) return "CompleteUnrolling";
0744   return "?";
0745 }
0746 std::string demangle_flags(int f)
0747 {
0748   std::string res;
0749   if(f&RowMajorBit)                 res += " | RowMajor";
0750   if(f&PacketAccessBit)             res += " | Packet";
0751   if(f&LinearAccessBit)             res += " | Linear";
0752   if(f&LvalueBit)                   res += " | Lvalue";
0753   if(f&DirectAccessBit)             res += " | Direct";
0754   if(f&NestByRefBit)                res += " | NestByRef";
0755   if(f&NoPreferredStorageOrderBit)  res += " | NoPreferredStorageOrderBit";
0756 
0757   return res;
0758 }
0759 #endif
0760 
0761 } // end namespace internal
0762 
0763 
0764 /** \class ScalarBinaryOpTraits
0765   * \ingroup Core_Module
0766   *
0767   * \brief Determines whether the given binary operation of two numeric types is allowed and what the scalar return type is.
0768   *
0769   * This class permits to control the scalar return type of any binary operation performed on two different scalar types through (partial) template specializations.
0770   *
0771   * For instance, let \c U1, \c U2 and \c U3 be three user defined scalar types for which most operations between instances of \c U1 and \c U2 returns an \c U3.
0772   * You can let %Eigen knows that by defining:
0773     \code
0774     template<typename BinaryOp>
0775     struct ScalarBinaryOpTraits<U1,U2,BinaryOp> { typedef U3 ReturnType;  };
0776     template<typename BinaryOp>
0777     struct ScalarBinaryOpTraits<U2,U1,BinaryOp> { typedef U3 ReturnType;  };
0778     \endcode
0779   * You can then explicitly disable some particular operations to get more explicit error messages:
0780     \code
0781     template<>
0782     struct ScalarBinaryOpTraits<U1,U2,internal::scalar_max_op<U1,U2> > {};
0783     \endcode
0784   * Or customize the return type for individual operation:
0785     \code
0786     template<>
0787     struct ScalarBinaryOpTraits<U1,U2,internal::scalar_sum_op<U1,U2> > { typedef U1 ReturnType; };
0788     \endcode
0789   *
0790   * By default, the following generic combinations are supported:
0791   <table class="manual">
0792   <tr><th>ScalarA</th><th>ScalarB</th><th>BinaryOp</th><th>ReturnType</th><th>Note</th></tr>
0793   <tr            ><td>\c T </td><td>\c T </td><td>\c * </td><td>\c T </td><td></td></tr>
0794   <tr class="alt"><td>\c NumTraits<T>::Real </td><td>\c T </td><td>\c * </td><td>\c T </td><td>Only if \c NumTraits<T>::IsComplex </td></tr>
0795   <tr            ><td>\c T </td><td>\c NumTraits<T>::Real </td><td>\c * </td><td>\c T </td><td>Only if \c NumTraits<T>::IsComplex </td></tr>
0796   </table>
0797   *
0798   * \sa CwiseBinaryOp
0799   */
0800 template<typename ScalarA, typename ScalarB, typename BinaryOp=internal::scalar_product_op<ScalarA,ScalarB> >
0801 struct ScalarBinaryOpTraits
0802 #ifndef EIGEN_PARSED_BY_DOXYGEN
0803   // for backward compatibility, use the hints given by the (deprecated) internal::scalar_product_traits class.
0804   : internal::scalar_product_traits<ScalarA,ScalarB>
0805 #endif // EIGEN_PARSED_BY_DOXYGEN
0806 {};
0807 
0808 template<typename T, typename BinaryOp>
0809 struct ScalarBinaryOpTraits<T,T,BinaryOp>
0810 {
0811   typedef T ReturnType;
0812 };
0813 
0814 template <typename T, typename BinaryOp>
0815 struct ScalarBinaryOpTraits<T, typename NumTraits<typename internal::enable_if<NumTraits<T>::IsComplex,T>::type>::Real, BinaryOp>
0816 {
0817   typedef T ReturnType;
0818 };
0819 template <typename T, typename BinaryOp>
0820 struct ScalarBinaryOpTraits<typename NumTraits<typename internal::enable_if<NumTraits<T>::IsComplex,T>::type>::Real, T, BinaryOp>
0821 {
0822   typedef T ReturnType;
0823 };
0824 
0825 // For Matrix * Permutation
0826 template<typename T, typename BinaryOp>
0827 struct ScalarBinaryOpTraits<T,void,BinaryOp>
0828 {
0829   typedef T ReturnType;
0830 };
0831 
0832 // For Permutation * Matrix
0833 template<typename T, typename BinaryOp>
0834 struct ScalarBinaryOpTraits<void,T,BinaryOp>
0835 {
0836   typedef T ReturnType;
0837 };
0838 
0839 // for Permutation*Permutation
0840 template<typename BinaryOp>
0841 struct ScalarBinaryOpTraits<void,void,BinaryOp>
0842 {
0843   typedef void ReturnType;
0844 };
0845 
0846 // We require Lhs and Rhs to have "compatible" scalar types.
0847 // It is tempting to always allow mixing different types but remember that this is often impossible in the vectorized paths.
0848 // So allowing mixing different types gives very unexpected errors when enabling vectorization, when the user tries to
0849 // add together a float matrix and a double matrix.
0850 #define EIGEN_CHECK_BINARY_COMPATIBILIY(BINOP,LHS,RHS) \
0851   EIGEN_STATIC_ASSERT((Eigen::internal::has_ReturnType<ScalarBinaryOpTraits<LHS, RHS,BINOP> >::value), \
0852     YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
0853 
0854 } // end namespace Eigen
0855 
0856 #endif // EIGEN_XPRHELPER_H