Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 09:57:05

0001 // This file is part of Eigen, a lightweight C++ template library
0002 // for linear algebra.
0003 //
0004 // Copyright (C) 2009-2011, 2013 Jitse Niesen <jitse@maths.leeds.ac.uk>
0005 //
0006 // This Source Code Form is subject to the terms of the Mozilla
0007 // Public License v. 2.0. If a copy of the MPL was not distributed
0008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
0009 
0010 #ifndef EIGEN_MATRIX_FUNCTION_H
0011 #define EIGEN_MATRIX_FUNCTION_H
0012 
0013 #include "StemFunction.h"
0014 
0015 
0016 namespace Eigen { 
0017 
0018 namespace internal {
0019 
0020 /** \brief Maximum distance allowed between eigenvalues to be considered "close". */
0021 static const float matrix_function_separation = 0.1f;
0022 
0023 /** \ingroup MatrixFunctions_Module
0024   * \class MatrixFunctionAtomic
0025   * \brief Helper class for computing matrix functions of atomic matrices.
0026   *
0027   * Here, an atomic matrix is a triangular matrix whose diagonal entries are close to each other.
0028   */
0029 template <typename MatrixType>
0030 class MatrixFunctionAtomic 
0031 {
0032   public:
0033 
0034     typedef typename MatrixType::Scalar Scalar;
0035     typedef typename stem_function<Scalar>::type StemFunction;
0036 
0037     /** \brief Constructor
0038       * \param[in]  f  matrix function to compute.
0039       */
0040     MatrixFunctionAtomic(StemFunction f) : m_f(f) { }
0041 
0042     /** \brief Compute matrix function of atomic matrix
0043       * \param[in]  A  argument of matrix function, should be upper triangular and atomic
0044       * \returns  f(A), the matrix function evaluated at the given matrix
0045       */
0046     MatrixType compute(const MatrixType& A);
0047 
0048   private:
0049     StemFunction* m_f;
0050 };
0051 
0052 template <typename MatrixType>
0053 typename NumTraits<typename MatrixType::Scalar>::Real matrix_function_compute_mu(const MatrixType& A)
0054 {
0055   typedef typename plain_col_type<MatrixType>::type VectorType;
0056   Index rows = A.rows();
0057   const MatrixType N = MatrixType::Identity(rows, rows) - A;
0058   VectorType e = VectorType::Ones(rows);
0059   N.template triangularView<Upper>().solveInPlace(e);
0060   return e.cwiseAbs().maxCoeff();
0061 }
0062 
0063 template <typename MatrixType>
0064 MatrixType MatrixFunctionAtomic<MatrixType>::compute(const MatrixType& A)
0065 {
0066   // TODO: Use that A is upper triangular
0067   typedef typename NumTraits<Scalar>::Real RealScalar;
0068   Index rows = A.rows();
0069   Scalar avgEival = A.trace() / Scalar(RealScalar(rows));
0070   MatrixType Ashifted = A - avgEival * MatrixType::Identity(rows, rows);
0071   RealScalar mu = matrix_function_compute_mu(Ashifted);
0072   MatrixType F = m_f(avgEival, 0) * MatrixType::Identity(rows, rows);
0073   MatrixType P = Ashifted;
0074   MatrixType Fincr;
0075   for (Index s = 1; double(s) < 1.1 * double(rows) + 10.0; s++) { // upper limit is fairly arbitrary
0076     Fincr = m_f(avgEival, static_cast<int>(s)) * P;
0077     F += Fincr;
0078     P = Scalar(RealScalar(1)/RealScalar(s + 1)) * P * Ashifted;
0079 
0080     // test whether Taylor series converged
0081     const RealScalar F_norm = F.cwiseAbs().rowwise().sum().maxCoeff();
0082     const RealScalar Fincr_norm = Fincr.cwiseAbs().rowwise().sum().maxCoeff();
0083     if (Fincr_norm < NumTraits<Scalar>::epsilon() * F_norm) {
0084       RealScalar delta = 0;
0085       RealScalar rfactorial = 1;
0086       for (Index r = 0; r < rows; r++) {
0087         RealScalar mx = 0;
0088         for (Index i = 0; i < rows; i++)
0089           mx = (std::max)(mx, std::abs(m_f(Ashifted(i, i) + avgEival, static_cast<int>(s+r))));
0090         if (r != 0)
0091           rfactorial *= RealScalar(r);
0092         delta = (std::max)(delta, mx / rfactorial);
0093       }
0094       const RealScalar P_norm = P.cwiseAbs().rowwise().sum().maxCoeff();
0095       if (mu * delta * P_norm < NumTraits<Scalar>::epsilon() * F_norm) // series converged
0096         break;
0097     }
0098   }
0099   return F;
0100 }
0101 
0102 /** \brief Find cluster in \p clusters containing some value 
0103   * \param[in] key Value to find
0104   * \returns Iterator to cluster containing \p key, or \c clusters.end() if no cluster in \p m_clusters
0105   * contains \p key.
0106   */
0107 template <typename Index, typename ListOfClusters>
0108 typename ListOfClusters::iterator matrix_function_find_cluster(Index key, ListOfClusters& clusters)
0109 {
0110   typename std::list<Index>::iterator j;
0111   for (typename ListOfClusters::iterator i = clusters.begin(); i != clusters.end(); ++i) {
0112     j = std::find(i->begin(), i->end(), key);
0113     if (j != i->end())
0114       return i;
0115   }
0116   return clusters.end();
0117 }
0118 
0119 /** \brief Partition eigenvalues in clusters of ei'vals close to each other
0120   * 
0121   * \param[in]  eivals    Eigenvalues
0122   * \param[out] clusters  Resulting partition of eigenvalues
0123   *
0124   * The partition satisfies the following two properties:
0125   * # Any eigenvalue in a certain cluster is at most matrix_function_separation() away from another eigenvalue
0126   *   in the same cluster.
0127   * # The distance between two eigenvalues in different clusters is more than matrix_function_separation().  
0128   * The implementation follows Algorithm 4.1 in the paper of Davies and Higham.
0129   */
0130 template <typename EivalsType, typename Cluster>
0131 void matrix_function_partition_eigenvalues(const EivalsType& eivals, std::list<Cluster>& clusters)
0132 {
0133   typedef typename EivalsType::RealScalar RealScalar;
0134   for (Index i=0; i<eivals.rows(); ++i) {
0135     // Find cluster containing i-th ei'val, adding a new cluster if necessary
0136     typename std::list<Cluster>::iterator qi = matrix_function_find_cluster(i, clusters);
0137     if (qi == clusters.end()) {
0138       Cluster l;
0139       l.push_back(i);
0140       clusters.push_back(l);
0141       qi = clusters.end();
0142       --qi;
0143     }
0144 
0145     // Look for other element to add to the set
0146     for (Index j=i+1; j<eivals.rows(); ++j) {
0147       if (abs(eivals(j) - eivals(i)) <= RealScalar(matrix_function_separation)
0148           && std::find(qi->begin(), qi->end(), j) == qi->end()) {
0149         typename std::list<Cluster>::iterator qj = matrix_function_find_cluster(j, clusters);
0150         if (qj == clusters.end()) {
0151           qi->push_back(j);
0152         } else {
0153           qi->insert(qi->end(), qj->begin(), qj->end());
0154           clusters.erase(qj);
0155         }
0156       }
0157     }
0158   }
0159 }
0160 
0161 /** \brief Compute size of each cluster given a partitioning */
0162 template <typename ListOfClusters, typename Index>
0163 void matrix_function_compute_cluster_size(const ListOfClusters& clusters, Matrix<Index, Dynamic, 1>& clusterSize)
0164 {
0165   const Index numClusters = static_cast<Index>(clusters.size());
0166   clusterSize.setZero(numClusters);
0167   Index clusterIndex = 0;
0168   for (typename ListOfClusters::const_iterator cluster = clusters.begin(); cluster != clusters.end(); ++cluster) {
0169     clusterSize[clusterIndex] = cluster->size();
0170     ++clusterIndex;
0171   }
0172 }
0173 
0174 /** \brief Compute start of each block using clusterSize */
0175 template <typename VectorType>
0176 void matrix_function_compute_block_start(const VectorType& clusterSize, VectorType& blockStart)
0177 {
0178   blockStart.resize(clusterSize.rows());
0179   blockStart(0) = 0;
0180   for (Index i = 1; i < clusterSize.rows(); i++) {
0181     blockStart(i) = blockStart(i-1) + clusterSize(i-1);
0182   }
0183 }
0184 
0185 /** \brief Compute mapping of eigenvalue indices to cluster indices */
0186 template <typename EivalsType, typename ListOfClusters, typename VectorType>
0187 void matrix_function_compute_map(const EivalsType& eivals, const ListOfClusters& clusters, VectorType& eivalToCluster)
0188 {
0189   eivalToCluster.resize(eivals.rows());
0190   Index clusterIndex = 0;
0191   for (typename ListOfClusters::const_iterator cluster = clusters.begin(); cluster != clusters.end(); ++cluster) {
0192     for (Index i = 0; i < eivals.rows(); ++i) {
0193       if (std::find(cluster->begin(), cluster->end(), i) != cluster->end()) {
0194         eivalToCluster[i] = clusterIndex;
0195       }
0196     }
0197     ++clusterIndex;
0198   }
0199 }
0200 
0201 /** \brief Compute permutation which groups ei'vals in same cluster together */
0202 template <typename DynVectorType, typename VectorType>
0203 void matrix_function_compute_permutation(const DynVectorType& blockStart, const DynVectorType& eivalToCluster, VectorType& permutation)
0204 {
0205   DynVectorType indexNextEntry = blockStart;
0206   permutation.resize(eivalToCluster.rows());
0207   for (Index i = 0; i < eivalToCluster.rows(); i++) {
0208     Index cluster = eivalToCluster[i];
0209     permutation[i] = indexNextEntry[cluster];
0210     ++indexNextEntry[cluster];
0211   }
0212 }  
0213 
0214 /** \brief Permute Schur decomposition in U and T according to permutation */
0215 template <typename VectorType, typename MatrixType>
0216 void matrix_function_permute_schur(VectorType& permutation, MatrixType& U, MatrixType& T)
0217 {
0218   for (Index i = 0; i < permutation.rows() - 1; i++) {
0219     Index j;
0220     for (j = i; j < permutation.rows(); j++) {
0221       if (permutation(j) == i) break;
0222     }
0223     eigen_assert(permutation(j) == i);
0224     for (Index k = j-1; k >= i; k--) {
0225       JacobiRotation<typename MatrixType::Scalar> rotation;
0226       rotation.makeGivens(T(k, k+1), T(k+1, k+1) - T(k, k));
0227       T.applyOnTheLeft(k, k+1, rotation.adjoint());
0228       T.applyOnTheRight(k, k+1, rotation);
0229       U.applyOnTheRight(k, k+1, rotation);
0230       std::swap(permutation.coeffRef(k), permutation.coeffRef(k+1));
0231     }
0232   }
0233 }
0234 
0235 /** \brief Compute block diagonal part of matrix function.
0236   *
0237   * This routine computes the matrix function applied to the block diagonal part of \p T (which should be
0238   * upper triangular), with the blocking given by \p blockStart and \p clusterSize. The matrix function of
0239   * each diagonal block is computed by \p atomic. The off-diagonal parts of \p fT are set to zero.
0240   */
0241 template <typename MatrixType, typename AtomicType, typename VectorType>
0242 void matrix_function_compute_block_atomic(const MatrixType& T, AtomicType& atomic, const VectorType& blockStart, const VectorType& clusterSize, MatrixType& fT)
0243 { 
0244   fT.setZero(T.rows(), T.cols());
0245   for (Index i = 0; i < clusterSize.rows(); ++i) {
0246     fT.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i))
0247       = atomic.compute(T.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i)));
0248   }
0249 }
0250 
0251 /** \brief Solve a triangular Sylvester equation AX + XB = C 
0252   *
0253   * \param[in]  A  the matrix A; should be square and upper triangular
0254   * \param[in]  B  the matrix B; should be square and upper triangular
0255   * \param[in]  C  the matrix C; should have correct size.
0256   *
0257   * \returns the solution X.
0258   *
0259   * If A is m-by-m and B is n-by-n, then both C and X are m-by-n.  The (i,j)-th component of the Sylvester
0260   * equation is
0261   * \f[ 
0262   *     \sum_{k=i}^m A_{ik} X_{kj} + \sum_{k=1}^j X_{ik} B_{kj} = C_{ij}. 
0263   * \f]
0264   * This can be re-arranged to yield:
0265   * \f[ 
0266   *     X_{ij} = \frac{1}{A_{ii} + B_{jj}} \Bigl( C_{ij}
0267   *     - \sum_{k=i+1}^m A_{ik} X_{kj} - \sum_{k=1}^{j-1} X_{ik} B_{kj} \Bigr).
0268   * \f]
0269   * It is assumed that A and B are such that the numerator is never zero (otherwise the Sylvester equation
0270   * does not have a unique solution). In that case, these equations can be evaluated in the order 
0271   * \f$ i=m,\ldots,1 \f$ and \f$ j=1,\ldots,n \f$.
0272   */
0273 template <typename MatrixType>
0274 MatrixType matrix_function_solve_triangular_sylvester(const MatrixType& A, const MatrixType& B, const MatrixType& C)
0275 {
0276   eigen_assert(A.rows() == A.cols());
0277   eigen_assert(A.isUpperTriangular());
0278   eigen_assert(B.rows() == B.cols());
0279   eigen_assert(B.isUpperTriangular());
0280   eigen_assert(C.rows() == A.rows());
0281   eigen_assert(C.cols() == B.rows());
0282 
0283   typedef typename MatrixType::Scalar Scalar;
0284 
0285   Index m = A.rows();
0286   Index n = B.rows();
0287   MatrixType X(m, n);
0288 
0289   for (Index i = m - 1; i >= 0; --i) {
0290     for (Index j = 0; j < n; ++j) {
0291 
0292       // Compute AX = \sum_{k=i+1}^m A_{ik} X_{kj}
0293       Scalar AX;
0294       if (i == m - 1) {
0295     AX = 0; 
0296       } else {
0297     Matrix<Scalar,1,1> AXmatrix = A.row(i).tail(m-1-i) * X.col(j).tail(m-1-i);
0298     AX = AXmatrix(0,0);
0299       }
0300 
0301       // Compute XB = \sum_{k=1}^{j-1} X_{ik} B_{kj}
0302       Scalar XB;
0303       if (j == 0) {
0304     XB = 0; 
0305       } else {
0306     Matrix<Scalar,1,1> XBmatrix = X.row(i).head(j) * B.col(j).head(j);
0307     XB = XBmatrix(0,0);
0308       }
0309 
0310       X(i,j) = (C(i,j) - AX - XB) / (A(i,i) + B(j,j));
0311     }
0312   }
0313   return X;
0314 }
0315 
0316 /** \brief Compute part of matrix function above block diagonal.
0317   *
0318   * This routine completes the computation of \p fT, denoting a matrix function applied to the triangular
0319   * matrix \p T. It assumes that the block diagonal part of \p fT has already been computed. The part below
0320   * the diagonal is zero, because \p T is upper triangular.
0321   */
0322 template <typename MatrixType, typename VectorType>
0323 void matrix_function_compute_above_diagonal(const MatrixType& T, const VectorType& blockStart, const VectorType& clusterSize, MatrixType& fT)
0324 { 
0325   typedef internal::traits<MatrixType> Traits;
0326   typedef typename MatrixType::Scalar Scalar;
0327   static const int Options = MatrixType::Options;
0328   typedef Matrix<Scalar, Dynamic, Dynamic, Options, Traits::RowsAtCompileTime, Traits::ColsAtCompileTime> DynMatrixType;
0329 
0330   for (Index k = 1; k < clusterSize.rows(); k++) {
0331     for (Index i = 0; i < clusterSize.rows() - k; i++) {
0332       // compute (i, i+k) block
0333       DynMatrixType A = T.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i));
0334       DynMatrixType B = -T.block(blockStart(i+k), blockStart(i+k), clusterSize(i+k), clusterSize(i+k));
0335       DynMatrixType C = fT.block(blockStart(i), blockStart(i), clusterSize(i), clusterSize(i))
0336         * T.block(blockStart(i), blockStart(i+k), clusterSize(i), clusterSize(i+k));
0337       C -= T.block(blockStart(i), blockStart(i+k), clusterSize(i), clusterSize(i+k))
0338         * fT.block(blockStart(i+k), blockStart(i+k), clusterSize(i+k), clusterSize(i+k));
0339       for (Index m = i + 1; m < i + k; m++) {
0340         C += fT.block(blockStart(i), blockStart(m), clusterSize(i), clusterSize(m))
0341           * T.block(blockStart(m), blockStart(i+k), clusterSize(m), clusterSize(i+k));
0342         C -= T.block(blockStart(i), blockStart(m), clusterSize(i), clusterSize(m))
0343           * fT.block(blockStart(m), blockStart(i+k), clusterSize(m), clusterSize(i+k));
0344       }
0345       fT.block(blockStart(i), blockStart(i+k), clusterSize(i), clusterSize(i+k))
0346         = matrix_function_solve_triangular_sylvester(A, B, C);
0347     }
0348   }
0349 }
0350 
0351 /** \ingroup MatrixFunctions_Module
0352   * \brief Class for computing matrix functions.
0353   * \tparam  MatrixType  type of the argument of the matrix function,
0354   *                      expected to be an instantiation of the Matrix class template.
0355   * \tparam  AtomicType  type for computing matrix function of atomic blocks.
0356   * \tparam  IsComplex   used internally to select correct specialization.
0357   *
0358   * This class implements the Schur-Parlett algorithm for computing matrix functions. The spectrum of the
0359   * matrix is divided in clustered of eigenvalues that lies close together. This class delegates the
0360   * computation of the matrix function on every block corresponding to these clusters to an object of type
0361   * \p AtomicType and uses these results to compute the matrix function of the whole matrix. The class
0362   * \p AtomicType should have a \p compute() member function for computing the matrix function of a block.
0363   *
0364   * \sa class MatrixFunctionAtomic, class MatrixLogarithmAtomic
0365   */
0366 template <typename MatrixType, int IsComplex = NumTraits<typename internal::traits<MatrixType>::Scalar>::IsComplex>
0367 struct matrix_function_compute
0368 {  
0369     /** \brief Compute the matrix function.
0370       *
0371       * \param[in]  A       argument of matrix function, should be a square matrix.
0372       * \param[in]  atomic  class for computing matrix function of atomic blocks.
0373       * \param[out] result  the function \p f applied to \p A, as
0374       * specified in the constructor.
0375       *
0376       * See MatrixBase::matrixFunction() for details on how this computation
0377       * is implemented.
0378       */
0379     template <typename AtomicType, typename ResultType> 
0380     static void run(const MatrixType& A, AtomicType& atomic, ResultType &result);    
0381 };
0382 
0383 /** \internal \ingroup MatrixFunctions_Module 
0384   * \brief Partial specialization of MatrixFunction for real matrices
0385   *
0386   * This converts the real matrix to a complex matrix, compute the matrix function of that matrix, and then
0387   * converts the result back to a real matrix.
0388   */
0389 template <typename MatrixType>
0390 struct matrix_function_compute<MatrixType, 0>
0391 {  
0392   template <typename MatA, typename AtomicType, typename ResultType>
0393   static void run(const MatA& A, AtomicType& atomic, ResultType &result)
0394   {
0395     typedef internal::traits<MatrixType> Traits;
0396     typedef typename Traits::Scalar Scalar;
0397     static const int Rows = Traits::RowsAtCompileTime, Cols = Traits::ColsAtCompileTime;
0398     static const int MaxRows = Traits::MaxRowsAtCompileTime, MaxCols = Traits::MaxColsAtCompileTime;
0399 
0400     typedef std::complex<Scalar> ComplexScalar;
0401     typedef Matrix<ComplexScalar, Rows, Cols, 0, MaxRows, MaxCols> ComplexMatrix;
0402 
0403     ComplexMatrix CA = A.template cast<ComplexScalar>();
0404     ComplexMatrix Cresult;
0405     matrix_function_compute<ComplexMatrix>::run(CA, atomic, Cresult);
0406     result = Cresult.real();
0407   }
0408 };
0409 
0410 /** \internal \ingroup MatrixFunctions_Module 
0411   * \brief Partial specialization of MatrixFunction for complex matrices
0412   */
0413 template <typename MatrixType>
0414 struct matrix_function_compute<MatrixType, 1>
0415 {
0416   template <typename MatA, typename AtomicType, typename ResultType>
0417   static void run(const MatA& A, AtomicType& atomic, ResultType &result)
0418   {
0419     typedef internal::traits<MatrixType> Traits;
0420     
0421     // compute Schur decomposition of A
0422     const ComplexSchur<MatrixType> schurOfA(A);
0423     eigen_assert(schurOfA.info()==Success);
0424     MatrixType T = schurOfA.matrixT();
0425     MatrixType U = schurOfA.matrixU();
0426 
0427     // partition eigenvalues into clusters of ei'vals "close" to each other
0428     std::list<std::list<Index> > clusters; 
0429     matrix_function_partition_eigenvalues(T.diagonal(), clusters);
0430 
0431     // compute size of each cluster
0432     Matrix<Index, Dynamic, 1> clusterSize;
0433     matrix_function_compute_cluster_size(clusters, clusterSize);
0434 
0435     // blockStart[i] is row index at which block corresponding to i-th cluster starts 
0436     Matrix<Index, Dynamic, 1> blockStart; 
0437     matrix_function_compute_block_start(clusterSize, blockStart);
0438 
0439     // compute map so that eivalToCluster[i] = j means that i-th ei'val is in j-th cluster 
0440     Matrix<Index, Dynamic, 1> eivalToCluster;
0441     matrix_function_compute_map(T.diagonal(), clusters, eivalToCluster);
0442 
0443     // compute permutation which groups ei'vals in same cluster together 
0444     Matrix<Index, Traits::RowsAtCompileTime, 1> permutation;
0445     matrix_function_compute_permutation(blockStart, eivalToCluster, permutation);
0446 
0447     // permute Schur decomposition
0448     matrix_function_permute_schur(permutation, U, T);
0449 
0450     // compute result
0451     MatrixType fT; // matrix function applied to T
0452     matrix_function_compute_block_atomic(T, atomic, blockStart, clusterSize, fT);
0453     matrix_function_compute_above_diagonal(T, blockStart, clusterSize, fT);
0454     result = U * (fT.template triangularView<Upper>() * U.adjoint());
0455   }
0456 };
0457 
0458 } // end of namespace internal
0459 
0460 /** \ingroup MatrixFunctions_Module
0461   *
0462   * \brief Proxy for the matrix function of some matrix (expression).
0463   *
0464   * \tparam Derived  Type of the argument to the matrix function.
0465   *
0466   * This class holds the argument to the matrix function until it is assigned or evaluated for some other
0467   * reason (so the argument should not be changed in the meantime). It is the return type of
0468   * matrixBase::matrixFunction() and related functions and most of the time this is the only way it is used.
0469   */
0470 template<typename Derived> class MatrixFunctionReturnValue
0471 : public ReturnByValue<MatrixFunctionReturnValue<Derived> >
0472 {
0473   public:
0474     typedef typename Derived::Scalar Scalar;
0475     typedef typename internal::stem_function<Scalar>::type StemFunction;
0476 
0477   protected:
0478     typedef typename internal::ref_selector<Derived>::type DerivedNested;
0479 
0480   public:
0481 
0482     /** \brief Constructor.
0483       *
0484       * \param[in] A  %Matrix (expression) forming the argument of the matrix function.
0485       * \param[in] f  Stem function for matrix function under consideration.
0486       */
0487     MatrixFunctionReturnValue(const Derived& A, StemFunction f) : m_A(A), m_f(f) { }
0488 
0489     /** \brief Compute the matrix function.
0490       *
0491       * \param[out] result \p f applied to \p A, where \p f and \p A are as in the constructor.
0492       */
0493     template <typename ResultType>
0494     inline void evalTo(ResultType& result) const
0495     {
0496       typedef typename internal::nested_eval<Derived, 10>::type NestedEvalType;
0497       typedef typename internal::remove_all<NestedEvalType>::type NestedEvalTypeClean;
0498       typedef internal::traits<NestedEvalTypeClean> Traits;
0499       typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
0500       typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0, Traits::RowsAtCompileTime, Traits::ColsAtCompileTime> DynMatrixType;
0501 
0502       typedef internal::MatrixFunctionAtomic<DynMatrixType> AtomicType;
0503       AtomicType atomic(m_f);
0504 
0505       internal::matrix_function_compute<typename NestedEvalTypeClean::PlainObject>::run(m_A, atomic, result);
0506     }
0507 
0508     Index rows() const { return m_A.rows(); }
0509     Index cols() const { return m_A.cols(); }
0510 
0511   private:
0512     const DerivedNested m_A;
0513     StemFunction *m_f;
0514 };
0515 
0516 namespace internal {
0517 template<typename Derived>
0518 struct traits<MatrixFunctionReturnValue<Derived> >
0519 {
0520   typedef typename Derived::PlainObject ReturnType;
0521 };
0522 }
0523 
0524 
0525 /********** MatrixBase methods **********/
0526 
0527 
0528 template <typename Derived>
0529 const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::matrixFunction(typename internal::stem_function<typename internal::traits<Derived>::Scalar>::type f) const
0530 {
0531   eigen_assert(rows() == cols());
0532   return MatrixFunctionReturnValue<Derived>(derived(), f);
0533 }
0534 
0535 template <typename Derived>
0536 const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sin() const
0537 {
0538   eigen_assert(rows() == cols());
0539   typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
0540   return MatrixFunctionReturnValue<Derived>(derived(), internal::stem_function_sin<ComplexScalar>);
0541 }
0542 
0543 template <typename Derived>
0544 const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cos() const
0545 {
0546   eigen_assert(rows() == cols());
0547   typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
0548   return MatrixFunctionReturnValue<Derived>(derived(), internal::stem_function_cos<ComplexScalar>);
0549 }
0550 
0551 template <typename Derived>
0552 const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::sinh() const
0553 {
0554   eigen_assert(rows() == cols());
0555   typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
0556   return MatrixFunctionReturnValue<Derived>(derived(), internal::stem_function_sinh<ComplexScalar>);
0557 }
0558 
0559 template <typename Derived>
0560 const MatrixFunctionReturnValue<Derived> MatrixBase<Derived>::cosh() const
0561 {
0562   eigen_assert(rows() == cols());
0563   typedef typename internal::stem_function<Scalar>::ComplexScalar ComplexScalar;
0564   return MatrixFunctionReturnValue<Derived>(derived(), internal::stem_function_cosh<ComplexScalar>);
0565 }
0566 
0567 } // end namespace Eigen
0568 
0569 #endif // EIGEN_MATRIX_FUNCTION_H