Back to home page

EIC code displayed by LXR

 
 

    


Warning, file /include/eigen3/unsupported/Eigen/CXX11/src/Tensor/Tensor.h was not indexed or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).

0001 // This file is part of Eigen, a lightweight C++ template library
0002 // for linear algebra.
0003 //
0004 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
0005 // Copyright (C) 2013 Christian Seiler <christian@iwakd.de>
0006 //
0007 // This Source Code Form is subject to the terms of the Mozilla
0008 // Public License v. 2.0. If a copy of the MPL was not distributed
0009 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
0010 
0011 #ifndef EIGEN_CXX11_TENSOR_TENSOR_H
0012 #define EIGEN_CXX11_TENSOR_TENSOR_H
0013 
0014 namespace Eigen {
0015 
0016 /** \class Tensor
0017   * \ingroup CXX11_Tensor_Module
0018   *
0019   * \brief The tensor class.
0020   *
0021   * The %Tensor class is the work-horse for all \em dense tensors within Eigen.
0022   *
0023   * The %Tensor class encompasses only dynamic-size objects so far.
0024   *
0025   * The first two template parameters are required:
0026   * \tparam Scalar_  Numeric type, e.g. float, double, int or `std::complex<float>`.
0027   *                 User defined scalar types are supported as well (see \ref user_defined_scalars "here").
0028   * \tparam NumIndices_ Number of indices (i.e. rank of the tensor)
0029   *
0030   * The remaining template parameters are optional -- in most cases you don't have to worry about them.
0031   * \tparam Options_  A combination of either \b #RowMajor or \b #ColMajor, and of either
0032   *                 \b #AutoAlign or \b #DontAlign.
0033   *                 The former controls \ref TopicStorageOrders "storage order", and defaults to column-major. The latter controls alignment, which is required
0034   *                 for vectorization. It defaults to aligning tensors. Note that tensors currently do not support any operations that profit from vectorization.
0035   *                 Support for such operations (i.e. adding two tensors etc.) is planned.
0036   *
0037   * You can access elements of tensors using normal subscripting:
0038   *
0039   * \code
0040   * Eigen::Tensor<double, 4> t(10, 10, 10, 10);
0041   * t(0, 1, 2, 3) = 42.0;
0042   * \endcode
0043   *
0044   * This class can be extended with the help of the plugin mechanism described on the page
0045   * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_TENSOR_PLUGIN.
0046   *
0047   * <i><b>Some notes:</b></i>
0048   *
0049   * <dl>
0050   * <dt><b>Relation to other parts of Eigen:</b></dt>
0051   * <dd>The midterm development goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that
0052   * taking blocks or using tensors in expressions is easily possible, including an interface with the vector/matrix code
0053   * by providing .asMatrix() and .asVector() (or similar) methods for rank 2 and 1 tensors. However, currently, the %Tensor
0054   * class does not provide any of these features and is only available as a stand-alone class that just allows for
0055   * coefficient access. Also, when fixed-size tensors are implemented, the number of template arguments is likely to
0056   * change dramatically.</dd>
0057   * </dl>
0058   *
0059   * \ref TopicStorageOrders
0060   */
0061 
0062 template<typename Scalar_, int NumIndices_, int Options_, typename IndexType_>
0063 class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> >
0064 {
0065   public:
0066     typedef Tensor<Scalar_, NumIndices_, Options_, IndexType_> Self;
0067     typedef TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexType_> > Base;
0068     typedef typename Eigen::internal::nested<Self>::type Nested;
0069     typedef typename internal::traits<Self>::StorageKind StorageKind;
0070     typedef typename internal::traits<Self>::Index Index;
0071     typedef Scalar_ Scalar;
0072     typedef typename NumTraits<Scalar>::Real RealScalar;
0073     typedef typename Base::CoeffReturnType CoeffReturnType;
0074 
0075     enum {
0076       IsAligned = bool(EIGEN_MAX_ALIGN_BYTES>0) & !(Options_&DontAlign),
0077       Layout = Options_ & RowMajor ? RowMajor : ColMajor,
0078       CoordAccess = true,
0079       RawAccess = true
0080     };
0081 
0082     static const int Options = Options_;
0083     static const int NumIndices = NumIndices_;
0084     typedef DSizes<Index, NumIndices_> Dimensions;
0085 
0086   protected:
0087     TensorStorage<Scalar, Dimensions, Options> m_storage;
0088 
0089 #ifdef EIGEN_HAS_SFINAE
0090     template<typename CustomIndices>
0091     struct isOfNormalIndex{
0092       static const bool is_array = internal::is_base_of<array<Index, NumIndices>, CustomIndices>::value;
0093       static const bool is_int = NumTraits<CustomIndices>::IsInteger;
0094       static const bool value = is_array | is_int;
0095     };
0096 #endif
0097 
0098   public:
0099     // Metadata
0100     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index                         rank()                   const { return NumIndices; }
0101     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index                         dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
0102     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions&             dimensions()             const { return m_storage.dimensions(); }
0103     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index                         size()                   const { return m_storage.size(); }
0104     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar                        *data()                        { return m_storage.data(); }
0105     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar                  *data()                  const { return m_storage.data(); }
0106 
0107     // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
0108     // work, because that uses base().coeffRef() - and we don't yet
0109     // implement a similar class hierarchy
0110     inline Self& base()             { return *this; }
0111     inline const Self& base() const { return *this; }
0112 
0113 #if EIGEN_HAS_VARIADIC_TEMPLATES
0114     template<typename... IndexTypes>
0115     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
0116     {
0117       // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
0118       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
0119       return coeff(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
0120     }
0121 #endif
0122 
0123     // normal indices
0124     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
0125     {
0126       eigen_internal_assert(checkIndexRange(indices));
0127       return m_storage.data()[linearizedIndex(indices)];
0128     }
0129 
0130     // custom indices
0131 #ifdef EIGEN_HAS_SFINAE
0132     template<typename CustomIndices,
0133              EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
0134     >
0135     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(CustomIndices& indices) const
0136     {
0137         return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
0138     }
0139 #endif
0140 
0141     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff() const
0142     {
0143       EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
0144       return m_storage.data()[0];
0145     }
0146 
0147     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
0148     {
0149       eigen_internal_assert(index >= 0 && index < size());
0150       return m_storage.data()[index];
0151     }
0152 
0153 #if EIGEN_HAS_VARIADIC_TEMPLATES
0154     template<typename... IndexTypes>
0155     inline Scalar& coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
0156     {
0157       // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
0158       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
0159       return coeffRef(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
0160     }
0161 #endif
0162 
0163     // normal indices
0164     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<Index, NumIndices>& indices)
0165     {
0166       eigen_internal_assert(checkIndexRange(indices));
0167       return m_storage.data()[linearizedIndex(indices)];
0168     }
0169 
0170     // custom indices
0171 #ifdef EIGEN_HAS_SFINAE
0172     template<typename CustomIndices,
0173              EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
0174              >
0175     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(CustomIndices& indices)
0176     {
0177         return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
0178     }
0179 #endif
0180 
0181     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef()
0182     {
0183       EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
0184       return m_storage.data()[0];
0185     }
0186 
0187     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
0188     {
0189       eigen_internal_assert(index >= 0 && index < size());
0190       return m_storage.data()[index];
0191     }
0192 
0193 #if EIGEN_HAS_VARIADIC_TEMPLATES
0194     template<typename... IndexTypes>
0195     inline const Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
0196     {
0197       // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
0198       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
0199       return this->operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
0200     }
0201 #else
0202     EIGEN_DEVICE_FUNC
0203     EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
0204     {
0205       return coeff(array<Index, 2>(i0, i1));
0206     }
0207     EIGEN_DEVICE_FUNC
0208     EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
0209     {
0210       return coeff(array<Index, 3>(i0, i1, i2));
0211     }
0212     EIGEN_DEVICE_FUNC
0213     EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
0214     {
0215       return coeff(array<Index, 4>(i0, i1, i2, i3));
0216     }
0217     EIGEN_DEVICE_FUNC
0218     EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
0219     {
0220       return coeff(array<Index, 5>(i0, i1, i2, i3, i4));
0221     }
0222 #endif
0223 
0224     // custom indices
0225 #ifdef EIGEN_HAS_SFINAE
0226     template<typename CustomIndices,
0227              EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
0228     >
0229     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(CustomIndices& indices) const
0230     {
0231         return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
0232     }
0233 #endif
0234 
0235     // normal indices
0236     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
0237     {
0238       return coeff(indices);
0239     }
0240 
0241     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
0242     {
0243       eigen_internal_assert(index >= 0 && index < size());
0244       return coeff(index);
0245     }
0246 
0247     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()() const
0248     {
0249       EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
0250       return coeff();
0251     }
0252 
0253     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
0254     {
0255       // The bracket operator is only for vectors, use the parenthesis operator instead.
0256       EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
0257       return coeff(index);
0258     }
0259 
0260 #if EIGEN_HAS_VARIADIC_TEMPLATES
0261     template<typename... IndexTypes>
0262     inline Scalar& operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
0263     {
0264       // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
0265       EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
0266       return operator()(array<Index, NumIndices>{{firstIndex, secondIndex, otherIndices...}});
0267     }
0268 #else
0269     EIGEN_DEVICE_FUNC
0270     EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
0271     {
0272       return coeffRef(array<Index, 2>(i0, i1));
0273     }
0274     EIGEN_DEVICE_FUNC
0275     EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
0276     {
0277       return coeffRef(array<Index, 3>(i0, i1, i2));
0278     }
0279     EIGEN_DEVICE_FUNC
0280     EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
0281     {
0282       return coeffRef(array<Index, 4>(i0, i1, i2, i3));
0283     }
0284     EIGEN_DEVICE_FUNC
0285     EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
0286     {
0287       return coeffRef(array<Index, 5>(i0, i1, i2, i3, i4));
0288     }
0289 #endif
0290 
0291     // normal indices
0292     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(const array<Index, NumIndices>& indices)
0293     {
0294       return coeffRef(indices);
0295     }
0296 
0297     // custom indices
0298 #ifdef EIGEN_HAS_SFINAE
0299     template<typename CustomIndices,
0300              EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomIndices>::value) )
0301     >
0302     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(CustomIndices& indices)
0303     {
0304       return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
0305     }
0306 #endif
0307 
0308     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index index)
0309     {
0310       eigen_assert(index >= 0 && index < size());
0311       return coeffRef(index);
0312     }
0313 
0314     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()()
0315     {
0316       EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
0317       return coeffRef();
0318     }
0319 
0320     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator[](Index index)
0321     {
0322       // The bracket operator is only for vectors, use the parenthesis operator instead
0323       EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
0324       return coeffRef(index);
0325     }
0326 
0327     EIGEN_DEVICE_FUNC
0328     EIGEN_STRONG_INLINE Tensor()
0329       : m_storage()
0330     {
0331     }
0332 
0333     EIGEN_DEVICE_FUNC
0334     EIGEN_STRONG_INLINE Tensor(const Self& other)
0335       : m_storage(other.m_storage)
0336     {
0337     }
0338 
0339 #if EIGEN_HAS_VARIADIC_TEMPLATES
0340     template<typename... IndexTypes>
0341     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index firstDimension, IndexTypes... otherDimensions)
0342         : m_storage(firstDimension, otherDimensions...)
0343     {
0344       // The number of dimensions used to construct a tensor must be equal to the rank of the tensor.
0345       EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
0346     }
0347 #else
0348     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(Index dim1)
0349       : m_storage(dim1, array<Index, 1>(dim1))
0350     {
0351       EIGEN_STATIC_ASSERT(1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
0352     }
0353     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2)
0354       : m_storage(dim1*dim2, array<Index, 2>(dim1, dim2))
0355     {
0356       EIGEN_STATIC_ASSERT(2 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
0357     }
0358     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3)
0359       : m_storage(dim1*dim2*dim3, array<Index, 3>(dim1, dim2, dim3))
0360     {
0361       EIGEN_STATIC_ASSERT(3 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
0362     }
0363     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4)
0364       : m_storage(dim1*dim2*dim3*dim4, array<Index, 4>(dim1, dim2, dim3, dim4))
0365     {
0366       EIGEN_STATIC_ASSERT(4 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
0367     }
0368     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor(Index dim1, Index dim2, Index dim3, Index dim4, Index dim5)
0369       : m_storage(dim1*dim2*dim3*dim4*dim5, array<Index, 5>(dim1, dim2, dim3, dim4, dim5))
0370     {
0371       EIGEN_STATIC_ASSERT(5 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
0372     }
0373 #endif
0374 
0375     /** Normal Dimension */
0376     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit Tensor(const array<Index, NumIndices>& dimensions)
0377         : m_storage(internal::array_prod(dimensions), dimensions)
0378     {
0379       EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
0380     }
0381 
0382     template<typename OtherDerived>
0383     EIGEN_DEVICE_FUNC
0384     EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, ReadOnlyAccessors>& other)
0385     {
0386       typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
0387       Assign assign(*this, other.derived());
0388       resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
0389       internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
0390     }
0391 
0392     template<typename OtherDerived>
0393     EIGEN_DEVICE_FUNC
0394     EIGEN_STRONG_INLINE Tensor(const TensorBase<OtherDerived, WriteAccessors>& other)
0395     {
0396       typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
0397       Assign assign(*this, other.derived());
0398       resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
0399       internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
0400     }
0401 
0402     #if EIGEN_HAS_RVALUE_REFERENCES
0403     EIGEN_DEVICE_FUNC
0404     EIGEN_STRONG_INLINE Tensor(Self&& other)
0405       : m_storage(std::move(other.m_storage))
0406     {
0407     }
0408     EIGEN_DEVICE_FUNC
0409     EIGEN_STRONG_INLINE Tensor& operator=(Self&& other)
0410     {
0411       m_storage = std::move(other.m_storage);
0412       return *this;
0413     }
0414     #endif
0415 
0416     EIGEN_DEVICE_FUNC
0417     EIGEN_STRONG_INLINE Tensor& operator=(const Tensor& other)
0418     {
0419       typedef TensorAssignOp<Tensor, const Tensor> Assign;
0420       Assign assign(*this, other);
0421       resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
0422       internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
0423       return *this;
0424     }
0425     template<typename OtherDerived>
0426     EIGEN_DEVICE_FUNC
0427     EIGEN_STRONG_INLINE Tensor& operator=(const OtherDerived& other)
0428     {
0429       typedef TensorAssignOp<Tensor, const OtherDerived> Assign;
0430       Assign assign(*this, other);
0431       resize(TensorEvaluator<const Assign, DefaultDevice>(assign, DefaultDevice()).dimensions());
0432       internal::TensorExecutor<const Assign, DefaultDevice>::run(assign, DefaultDevice());
0433       return *this;
0434     }
0435 
0436 #if EIGEN_HAS_VARIADIC_TEMPLATES
0437     template<typename... IndexTypes> EIGEN_DEVICE_FUNC
0438     void resize(Index firstDimension, IndexTypes... otherDimensions)
0439     {
0440       // The number of dimensions used to resize a tensor must be equal to the rank of the tensor.
0441       EIGEN_STATIC_ASSERT(sizeof...(otherDimensions) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
0442       resize(array<Index, NumIndices>{{firstDimension, otherDimensions...}});
0443     }
0444 #endif
0445 
0446     /** Normal Dimension */
0447     EIGEN_DEVICE_FUNC void resize(const array<Index, NumIndices>& dimensions)
0448     {
0449       int i;
0450       Index size = Index(1);
0451       for (i = 0; i < NumIndices; i++) {
0452         internal::check_rows_cols_for_overflow<Dynamic>::run(size, dimensions[i]);
0453         size *= dimensions[i];
0454       }
0455       #ifdef EIGEN_INITIALIZE_COEFFS
0456         bool size_changed = size != this->size();
0457         m_storage.resize(size, dimensions);
0458         if(size_changed) EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
0459       #else
0460         m_storage.resize(size, dimensions);
0461       #endif
0462     }
0463 
0464     // Why this overload, DSizes is derived from array ??? //
0465     EIGEN_DEVICE_FUNC void resize(const DSizes<Index, NumIndices>& dimensions) {
0466       array<Index, NumIndices> dims;
0467       for (int i = 0; i < NumIndices; ++i) {
0468         dims[i] = dimensions[i];
0469       }
0470       resize(dims);
0471     }
0472 
0473     EIGEN_DEVICE_FUNC
0474     void resize()
0475     {
0476       EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
0477       // Nothing to do: rank 0 tensors have fixed size
0478     }
0479 
0480 #ifdef EIGEN_HAS_INDEX_LIST
0481     template <typename FirstType, typename... OtherTypes>
0482     EIGEN_DEVICE_FUNC
0483     void resize(const Eigen::IndexList<FirstType, OtherTypes...>& dimensions) {
0484       array<Index, NumIndices> dims;
0485       for (int i = 0; i < NumIndices; ++i) {
0486         dims[i] = static_cast<Index>(dimensions[i]);
0487       }
0488       resize(dims);
0489     }
0490 #endif
0491 
0492     /** Custom Dimension */
0493 #ifdef EIGEN_HAS_SFINAE
0494     template<typename CustomDimension,
0495              EIGEN_SFINAE_ENABLE_IF( !(isOfNormalIndex<CustomDimension>::value) )
0496     >
0497     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void resize(CustomDimension& dimensions)
0498     {
0499       resize(internal::customIndices2Array<Index,NumIndices>(dimensions));
0500     }
0501 #endif
0502 
0503 #ifndef EIGEN_EMULATE_CXX11_META_H
0504     template <typename std::ptrdiff_t... Indices>
0505     EIGEN_DEVICE_FUNC
0506     void resize(const Sizes<Indices...>& dimensions) {
0507       array<Index, NumIndices> dims;
0508       for (int i = 0; i < NumIndices; ++i) {
0509         dims[i] = static_cast<Index>(dimensions[i]);
0510       }
0511       resize(dims);
0512     }
0513 #else
0514     template <std::size_t V1, std::size_t V2, std::size_t V3, std::size_t V4, std::size_t V5>
0515     EIGEN_DEVICE_FUNC
0516     void resize(const Sizes<V1, V2, V3, V4, V5>& dimensions) {
0517       array<Index, NumIndices> dims;
0518       for (int i = 0; i < NumIndices; ++i) {
0519         dims[i] = static_cast<Index>(dimensions[i]);
0520       }
0521       resize(dims);
0522     }
0523 #endif
0524 
0525   protected:
0526 
0527     bool checkIndexRange(const array<Index, NumIndices>& indices) const
0528     {
0529       using internal::array_apply_and_reduce;
0530       using internal::array_zip_and_reduce;
0531       using internal::greater_equal_zero_op;
0532       using internal::logical_and_op;
0533       using internal::lesser_op;
0534 
0535       return
0536         // check whether the indices are all >= 0
0537         array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
0538         // check whether the indices fit in the dimensions
0539         array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());
0540     }
0541 
0542     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array<Index, NumIndices>& indices) const
0543     {
0544       if (Options&RowMajor) {
0545         return m_storage.dimensions().IndexOfRowMajor(indices);
0546       } else {
0547         return m_storage.dimensions().IndexOfColMajor(indices);
0548       }
0549     }
0550 };
0551 
0552 } // end namespace Eigen
0553 
0554 #endif // EIGEN_CXX11_TENSOR_TENSOR_H