File indexing completed on 2024-11-15 09:37:09
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
0011 #define EIGEN_SPARSE_SELFADJOINTVIEW_H
0012
0013 namespace Eigen {
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029 namespace internal {
0030
0031 template<typename MatrixType, unsigned int Mode>
0032 struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
0033 };
0034
0035 template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
0036 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
0037
0038 template<int Mode,typename MatrixType,int DestOrder>
0039 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
0040
0041 }
0042
0043 template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
0044 : public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
0045 {
0046 public:
0047
0048 enum {
0049 Mode = _Mode,
0050 TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0),
0051 RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
0052 ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
0053 };
0054
0055 typedef EigenBase<SparseSelfAdjointView> Base;
0056 typedef typename MatrixType::Scalar Scalar;
0057 typedef typename MatrixType::StorageIndex StorageIndex;
0058 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
0059 typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
0060 typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
0061
0062 explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
0063 {
0064 eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
0065 }
0066
0067 inline Index rows() const { return m_matrix.rows(); }
0068 inline Index cols() const { return m_matrix.cols(); }
0069
0070
0071 const _MatrixTypeNested& matrix() const { return m_matrix; }
0072 typename internal::remove_reference<MatrixTypeNested>::type& matrix() { return m_matrix; }
0073
0074
0075
0076
0077
0078
0079 template<typename OtherDerived>
0080 Product<SparseSelfAdjointView, OtherDerived>
0081 operator*(const SparseMatrixBase<OtherDerived>& rhs) const
0082 {
0083 return Product<SparseSelfAdjointView, OtherDerived>(*this, rhs.derived());
0084 }
0085
0086
0087
0088
0089
0090
0091 template<typename OtherDerived> friend
0092 Product<OtherDerived, SparseSelfAdjointView>
0093 operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
0094 {
0095 return Product<OtherDerived, SparseSelfAdjointView>(lhs.derived(), rhs);
0096 }
0097
0098
0099 template<typename OtherDerived>
0100 Product<SparseSelfAdjointView,OtherDerived>
0101 operator*(const MatrixBase<OtherDerived>& rhs) const
0102 {
0103 return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());
0104 }
0105
0106
0107 template<typename OtherDerived> friend
0108 Product<OtherDerived,SparseSelfAdjointView>
0109 operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
0110 {
0111 return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);
0112 }
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122 template<typename DerivedU>
0123 SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
0124
0125
0126
0127 SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
0128 {
0129 return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
0130 }
0131
0132 template<typename SrcMatrixType,int SrcMode>
0133 SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)
0134 {
0135 internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
0136 return *this;
0137 }
0138
0139 SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
0140 {
0141 PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
0142 return *this = src.twistedBy(pnull);
0143 }
0144
0145
0146 EIGEN_DEFAULT_COPY_CONSTRUCTOR(SparseSelfAdjointView)
0147
0148 template<typename SrcMatrixType,unsigned int SrcMode>
0149 SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
0150 {
0151 PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
0152 return *this = src.twistedBy(pnull);
0153 }
0154
0155 void resize(Index rows, Index cols)
0156 {
0157 EIGEN_ONLY_USED_FOR_DEBUG(rows);
0158 EIGEN_ONLY_USED_FOR_DEBUG(cols);
0159 eigen_assert(rows == this->rows() && cols == this->cols()
0160 && "SparseSelfadjointView::resize() does not actually allow to resize.");
0161 }
0162
0163 protected:
0164
0165 MatrixTypeNested m_matrix;
0166
0167
0168 private:
0169 template<typename Dest> void evalTo(Dest &) const;
0170 };
0171
0172
0173
0174
0175
0176 template<typename Derived>
0177 template<unsigned int UpLo>
0178 typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const
0179 {
0180 return SparseSelfAdjointView<const Derived, UpLo>(derived());
0181 }
0182
0183 template<typename Derived>
0184 template<unsigned int UpLo>
0185 typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
0186 {
0187 return SparseSelfAdjointView<Derived, UpLo>(derived());
0188 }
0189
0190
0191
0192
0193
0194 template<typename MatrixType, unsigned int Mode>
0195 template<typename DerivedU>
0196 SparseSelfAdjointView<MatrixType,Mode>&
0197 SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
0198 {
0199 SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
0200 if(alpha==Scalar(0))
0201 m_matrix = tmp.template triangularView<Mode>();
0202 else
0203 m_matrix += alpha * tmp.template triangularView<Mode>();
0204
0205 return *this;
0206 }
0207
0208 namespace internal {
0209
0210
0211
0212
0213 template<typename MatrixType, unsigned int Mode>
0214 struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
0215 {
0216 typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
0217 typedef SparseSelfAdjointShape Shape;
0218 };
0219
0220 struct SparseSelfAdjoint2Sparse {};
0221
0222 template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
0223 template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
0224
0225 template< typename DstXprType, typename SrcXprType, typename Functor>
0226 struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
0227 {
0228 typedef typename DstXprType::StorageIndex StorageIndex;
0229 typedef internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> AssignOpType;
0230
0231 template<typename DestScalar,int StorageOrder>
0232 static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignOpType&)
0233 {
0234 internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
0235 }
0236
0237
0238 template<typename DestScalar,int StorageOrder,typename AssignFunc>
0239 static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignFunc& func)
0240 {
0241 SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
0242 run(tmp, src, AssignOpType());
0243 call_assignment_no_alias_no_transpose(dst, tmp, func);
0244 }
0245
0246 template<typename DestScalar,int StorageOrder>
0247 static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
0248 const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& )
0249 {
0250 SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
0251 run(tmp, src, AssignOpType());
0252 dst += tmp;
0253 }
0254
0255 template<typename DestScalar,int StorageOrder>
0256 static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
0257 const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& )
0258 {
0259 SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
0260 run(tmp, src, AssignOpType());
0261 dst -= tmp;
0262 }
0263
0264 template<typename DestScalar>
0265 static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const AssignOpType&)
0266 {
0267
0268 SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
0269 internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
0270 dst = tmp;
0271 }
0272 };
0273
0274 }
0275
0276
0277
0278
0279
0280 namespace internal {
0281
0282 template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
0283 inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
0284 {
0285 EIGEN_ONLY_USED_FOR_DEBUG(alpha);
0286
0287 typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
0288 typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
0289 typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
0290 typedef typename LhsEval::InnerIterator LhsIterator;
0291 typedef typename SparseLhsType::Scalar LhsScalar;
0292
0293 enum {
0294 LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
0295 ProcessFirstHalf =
0296 ((Mode&(Upper|Lower))==(Upper|Lower))
0297 || ( (Mode&Upper) && !LhsIsRowMajor)
0298 || ( (Mode&Lower) && LhsIsRowMajor),
0299 ProcessSecondHalf = !ProcessFirstHalf
0300 };
0301
0302 SparseLhsTypeNested lhs_nested(lhs);
0303 LhsEval lhsEval(lhs_nested);
0304
0305
0306 for (Index k=0; k<rhs.cols(); ++k)
0307 {
0308 for (Index j=0; j<lhs.outerSize(); ++j)
0309 {
0310 LhsIterator i(lhsEval,j);
0311
0312 if (ProcessSecondHalf)
0313 {
0314 while (i && i.index()<j) ++i;
0315 if(i && i.index()==j)
0316 {
0317 res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
0318 ++i;
0319 }
0320 }
0321
0322
0323 typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
0324
0325 typename DenseResType::Scalar res_j(0);
0326 for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
0327 {
0328 LhsScalar lhs_ij = i.value();
0329 if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
0330 res_j += lhs_ij * rhs.coeff(i.index(),k);
0331 res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
0332 }
0333 res.coeffRef(j,k) += alpha * res_j;
0334
0335
0336 if (ProcessFirstHalf && i && (i.index()==j))
0337 res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
0338 }
0339 }
0340 }
0341
0342
0343 template<typename LhsView, typename Rhs, int ProductType>
0344 struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
0345 : generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
0346 {
0347 template<typename Dest>
0348 static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
0349 {
0350 typedef typename LhsView::_MatrixTypeNested Lhs;
0351 typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
0352 typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
0353 LhsNested lhsNested(lhsView.matrix());
0354 RhsNested rhsNested(rhs);
0355
0356 internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
0357 }
0358 };
0359
0360 template<typename Lhs, typename RhsView, int ProductType>
0361 struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
0362 : generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
0363 {
0364 template<typename Dest>
0365 static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
0366 {
0367 typedef typename RhsView::_MatrixTypeNested Rhs;
0368 typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
0369 typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
0370 LhsNested lhsNested(lhs);
0371 RhsNested rhsNested(rhsView.matrix());
0372
0373
0374 Transpose<Dest> dstT(dst);
0375 internal::sparse_selfadjoint_time_dense_product<RhsView::TransposeMode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
0376 }
0377 };
0378
0379
0380
0381
0382 template<typename LhsView, typename Rhs, int ProductTag>
0383 struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>
0384 : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
0385 {
0386 typedef Product<LhsView, Rhs, DefaultProduct> XprType;
0387 typedef typename XprType::PlainObject PlainObject;
0388 typedef evaluator<PlainObject> Base;
0389
0390 product_evaluator(const XprType& xpr)
0391 : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
0392 {
0393 ::new (static_cast<Base*>(this)) Base(m_result);
0394 generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
0395 }
0396
0397 protected:
0398 typename Rhs::PlainObject m_lhs;
0399 PlainObject m_result;
0400 };
0401
0402 template<typename Lhs, typename RhsView, int ProductTag>
0403 struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>
0404 : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
0405 {
0406 typedef Product<Lhs, RhsView, DefaultProduct> XprType;
0407 typedef typename XprType::PlainObject PlainObject;
0408 typedef evaluator<PlainObject> Base;
0409
0410 product_evaluator(const XprType& xpr)
0411 : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
0412 {
0413 ::new (static_cast<Base*>(this)) Base(m_result);
0414 generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
0415 }
0416
0417 protected:
0418 typename Lhs::PlainObject m_rhs;
0419 PlainObject m_result;
0420 };
0421
0422 }
0423
0424
0425
0426
0427 namespace internal {
0428
0429 template<int Mode,typename MatrixType,int DestOrder>
0430 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
0431 {
0432 typedef typename MatrixType::StorageIndex StorageIndex;
0433 typedef typename MatrixType::Scalar Scalar;
0434 typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
0435 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
0436 typedef evaluator<MatrixType> MatEval;
0437 typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
0438
0439 MatEval matEval(mat);
0440 Dest& dest(_dest.derived());
0441 enum {
0442 StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
0443 };
0444
0445 Index size = mat.rows();
0446 VectorI count;
0447 count.resize(size);
0448 count.setZero();
0449 dest.resize(size,size);
0450 for(Index j = 0; j<size; ++j)
0451 {
0452 Index jp = perm ? perm[j] : j;
0453 for(MatIterator it(matEval,j); it; ++it)
0454 {
0455 Index i = it.index();
0456 Index r = it.row();
0457 Index c = it.col();
0458 Index ip = perm ? perm[i] : i;
0459 if(Mode==int(Upper|Lower))
0460 count[StorageOrderMatch ? jp : ip]++;
0461 else if(r==c)
0462 count[ip]++;
0463 else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
0464 {
0465 count[ip]++;
0466 count[jp]++;
0467 }
0468 }
0469 }
0470 Index nnz = count.sum();
0471
0472
0473 dest.resizeNonZeros(nnz);
0474 dest.outerIndexPtr()[0] = 0;
0475 for(Index j=0; j<size; ++j)
0476 dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
0477 for(Index j=0; j<size; ++j)
0478 count[j] = dest.outerIndexPtr()[j];
0479
0480
0481 for(StorageIndex j = 0; j<size; ++j)
0482 {
0483 for(MatIterator it(matEval,j); it; ++it)
0484 {
0485 StorageIndex i = internal::convert_index<StorageIndex>(it.index());
0486 Index r = it.row();
0487 Index c = it.col();
0488
0489 StorageIndex jp = perm ? perm[j] : j;
0490 StorageIndex ip = perm ? perm[i] : i;
0491
0492 if(Mode==int(Upper|Lower))
0493 {
0494 Index k = count[StorageOrderMatch ? jp : ip]++;
0495 dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
0496 dest.valuePtr()[k] = it.value();
0497 }
0498 else if(r==c)
0499 {
0500 Index k = count[ip]++;
0501 dest.innerIndexPtr()[k] = ip;
0502 dest.valuePtr()[k] = it.value();
0503 }
0504 else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
0505 {
0506 if(!StorageOrderMatch)
0507 std::swap(ip,jp);
0508 Index k = count[jp]++;
0509 dest.innerIndexPtr()[k] = ip;
0510 dest.valuePtr()[k] = it.value();
0511 k = count[ip]++;
0512 dest.innerIndexPtr()[k] = jp;
0513 dest.valuePtr()[k] = numext::conj(it.value());
0514 }
0515 }
0516 }
0517 }
0518
0519 template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
0520 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
0521 {
0522 typedef typename MatrixType::StorageIndex StorageIndex;
0523 typedef typename MatrixType::Scalar Scalar;
0524 SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
0525 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
0526 typedef evaluator<MatrixType> MatEval;
0527 typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
0528
0529 enum {
0530 SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
0531 StorageOrderMatch = int(SrcOrder) == int(DstOrder),
0532 DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
0533 SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
0534 };
0535
0536 MatEval matEval(mat);
0537
0538 Index size = mat.rows();
0539 VectorI count(size);
0540 count.setZero();
0541 dest.resize(size,size);
0542 for(StorageIndex j = 0; j<size; ++j)
0543 {
0544 StorageIndex jp = perm ? perm[j] : j;
0545 for(MatIterator it(matEval,j); it; ++it)
0546 {
0547 StorageIndex i = it.index();
0548 if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
0549 continue;
0550
0551 StorageIndex ip = perm ? perm[i] : i;
0552 count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
0553 }
0554 }
0555 dest.outerIndexPtr()[0] = 0;
0556 for(Index j=0; j<size; ++j)
0557 dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
0558 dest.resizeNonZeros(dest.outerIndexPtr()[size]);
0559 for(Index j=0; j<size; ++j)
0560 count[j] = dest.outerIndexPtr()[j];
0561
0562 for(StorageIndex j = 0; j<size; ++j)
0563 {
0564
0565 for(MatIterator it(matEval,j); it; ++it)
0566 {
0567 StorageIndex i = it.index();
0568 if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
0569 continue;
0570
0571 StorageIndex jp = perm ? perm[j] : j;
0572 StorageIndex ip = perm? perm[i] : i;
0573
0574 Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
0575 dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
0576
0577 if(!StorageOrderMatch) std::swap(ip,jp);
0578 if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
0579 dest.valuePtr()[k] = numext::conj(it.value());
0580 else
0581 dest.valuePtr()[k] = it.value();
0582 }
0583 }
0584 }
0585
0586 }
0587
0588
0589
0590 namespace internal {
0591
0592 template<typename MatrixType, int Mode>
0593 struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
0594 };
0595
0596 }
0597
0598 template<typename MatrixType,int Mode>
0599 class SparseSymmetricPermutationProduct
0600 : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
0601 {
0602 public:
0603 typedef typename MatrixType::Scalar Scalar;
0604 typedef typename MatrixType::StorageIndex StorageIndex;
0605 enum {
0606 RowsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::RowsAtCompileTime,
0607 ColsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::ColsAtCompileTime
0608 };
0609 protected:
0610 typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> Perm;
0611 public:
0612 typedef Matrix<StorageIndex,Dynamic,1> VectorI;
0613 typedef typename MatrixType::Nested MatrixTypeNested;
0614 typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
0615
0616 SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
0617 : m_matrix(mat), m_perm(perm)
0618 {}
0619
0620 inline Index rows() const { return m_matrix.rows(); }
0621 inline Index cols() const { return m_matrix.cols(); }
0622
0623 const NestedExpression& matrix() const { return m_matrix; }
0624 const Perm& perm() const { return m_perm; }
0625
0626 protected:
0627 MatrixTypeNested m_matrix;
0628 const Perm& m_perm;
0629
0630 };
0631
0632 namespace internal {
0633
0634 template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
0635 struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
0636 {
0637 typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
0638 typedef typename DstXprType::StorageIndex DstIndex;
0639 template<int Options>
0640 static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
0641 {
0642
0643 SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
0644 internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
0645 dst = tmp;
0646 }
0647
0648 template<typename DestType,unsigned int DestMode>
0649 static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
0650 {
0651 internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
0652 }
0653 };
0654
0655 }
0656
0657 }
0658
0659 #endif