Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-09-18 08:11:58

0001 // This file is part of the ACTS project.
0002 //
0003 // Copyright (C) 2016 CERN for the benefit of the ACTS project
0004 //
0005 // This Source Code Form is subject to the terms of the Mozilla Public
0006 // License, v. 2.0. If a copy of the MPL was not distributed with this
0007 // file, You can obtain one at https://mozilla.org/MPL/2.0/.
0008 
0009 #pragma once
0010 
0011 #include "Acts/Definitions/Algebra.hpp"
0012 #include "Acts/Utilities/ArrayHelpers.hpp"
0013 #include "Acts/Utilities/MathHelpers.hpp"
0014 
0015 #include <bitset>
0016 #include <optional>
0017 
0018 #include "Eigen/Dense"
0019 
0020 namespace Acts {
0021 
0022 /// Convert a bitset to a matrix of integers, with each element set to the bit
0023 /// value.
0024 /// @note How the bits are assigned to matrix elements depends on the storage
0025 /// type of the matrix being converted (row-major or col-major)
0026 /// @tparam MatrixType Matrix type that is produced
0027 /// @param bs The bitset to convert
0028 /// @return A matrix with the integer values of the bits from @p bs
0029 template <typename MatrixType>
0030 MatrixType bitsetToMatrix(const std::bitset<MatrixType::RowsAtCompileTime *
0031                                             MatrixType::ColsAtCompileTime>
0032                               bs) {
0033   constexpr int rows = MatrixType::RowsAtCompileTime;
0034   constexpr int cols = MatrixType::ColsAtCompileTime;
0035 
0036   static_assert(rows != -1 && cols != -1,
0037                 "bitsetToMatrix does not support dynamic matrices");
0038 
0039   MatrixType m;
0040   auto* p = m.data();
0041   for (std::size_t i = 0; i < rows * cols; i++) {
0042     p[i] = bs[rows * cols - 1 - i];
0043   }
0044   return m;
0045 }
0046 
0047 /// Convert an integer matrix to a bitset.
0048 /// @note How the bits are ordered depends on the storage type of the matrix
0049 /// being converted (row-major or col-major)
0050 /// @tparam Derived Eigen base concrete type
0051 /// @param m Matrix that is converted
0052 /// @return The converted bitset.
0053 template <typename Derived>
0054 auto matrixToBitset(const Eigen::PlainObjectBase<Derived>& m) {
0055   using MatrixType = Eigen::PlainObjectBase<Derived>;
0056   constexpr std::size_t rows = MatrixType::RowsAtCompileTime;
0057   constexpr std::size_t cols = MatrixType::ColsAtCompileTime;
0058 
0059   std::bitset<rows * cols> res;
0060 
0061   auto* p = m.data();
0062   for (std::size_t i = 0; i < rows * cols; i++) {
0063     res[rows * cols - 1 - i] = static_cast<bool>(p[i]);
0064   }
0065 
0066   return res;
0067 }
0068 
0069 /// @brief Perform a blocked matrix multiplication, avoiding Eigen GEMM
0070 /// methods.
0071 ///
0072 /// @tparam A The type of the first matrix, which should be MxN
0073 /// @tparam B The type of the second matrix, which should be NxP
0074 ///
0075 /// @param[in] a An MxN matrix of type A
0076 /// @param[in] b An NxP matrix of type P
0077 ///
0078 /// @returns The product ab
0079 template <typename A, typename B>
0080 inline ActsMatrix<A::RowsAtCompileTime, B::ColsAtCompileTime> blockedMult(
0081     const A& a, const B& b) {
0082   // Extract the sizes of the matrix types that we receive as template
0083   // parameters.
0084   constexpr int M = A::RowsAtCompileTime;
0085   constexpr int N = A::ColsAtCompileTime;
0086   constexpr int P = B::ColsAtCompileTime;
0087 
0088   // Ensure that the second dimension of our first matrix equals the first
0089   // dimension of the second matrix, otherwise we cannot multiply.
0090   static_assert(N == B::RowsAtCompileTime);
0091 
0092   if constexpr (M <= 4 && N <= 4 && P <= 4) {
0093     // In cases where the matrices being multiplied are small, we can rely on
0094     // Eigen do to a good job, and we don't really need to do any blocking.
0095     return a * b;
0096   } else {
0097     // Here, we want to calculate the expression: C = AB, Eigen, natively,
0098     // doesn't do a great job at this if the matrices A and B are large
0099     // (roughly M >= 8, N >= 8, or P >= 8), and applies a slow GEMM operation.
0100     // We apply a blocked matrix multiplication operation to decompose the
0101     // multiplication into smaller operations, relying on the fact that:
0102     //
0103     // ┌         ┐   ┌         ┐ ┌         ┐
0104     // │ C₁₁ C₁₂ │ = │ A₁₁ A₁₂ │ │ B₁₁ B₁₂ │
0105     // │ C₂₁ C₂₂ │ = │ A₂₁ A₂₂ │ │ B₂₁ B₂₂ │
0106     // └         ┘   └         ┘ └         ┘
0107     //
0108     // where:
0109     //
0110     // C₁₁ = A₁₁ * B₁₁ + A₁₂ * B₂₁
0111     // C₁₂ = A₁₁ * B₁₂ + A₁₂ * B₂₂
0112     // C₂₁ = A₂₁ * B₁₁ + A₂₂ * B₂₁
0113     // C₂₂ = A₂₁ * B₁₂ + A₂₂ * B₂₂
0114     //
0115     // The sizes of these submatrices are roughly half (in each dimension) that
0116     // of the parent matrix. If the size of the parent matrix is even, we can
0117     // divide it exactly, If the size of the parent matrix is odd, then some
0118     // of the submatrices will be one larger than the others. In general, for
0119     // any matrix Q, the sizes of the submatrices are (where / denotes integer
0120     // division):
0121     //
0122     // Q₁₁ : M / 2 × P / 2
0123     // Q₁₂ : M / 2 × (P + 1) / 2
0124     // Q₂₁ : (M + 1) / 2 × P / 2
0125     // Q₂₂ : (M + 1) / 2 × (P + 1) / 2
0126     //
0127     // See https://csapp.cs.cmu.edu/public/waside/waside-blocking.pdf for a
0128     // more in-depth explanation of blocked matrix multiplication.
0129     constexpr int M1 = M / 2;
0130     constexpr int M2 = (M + 1) / 2;
0131     constexpr int N1 = N / 2;
0132     constexpr int N2 = (N + 1) / 2;
0133     constexpr int P1 = P / 2;
0134     constexpr int P2 = (P + 1) / 2;
0135 
0136     // Construct the end result in this matrix, which destroys a few of Eigen's
0137     // built-in optimization techniques, but sadly this is necessary.
0138     ActsMatrix<M, P> r;
0139 
0140     // C₁₁ = A₁₁ * B₁₁ + A₁₂ * B₂₁
0141     r.template topLeftCorner<M1, P1>().noalias() =
0142         a.template topLeftCorner<M1, N1>() *
0143             b.template topLeftCorner<N1, P1>() +
0144         a.template topRightCorner<M1, N2>() *
0145             b.template bottomLeftCorner<N2, P1>();
0146 
0147     // C₁₂ = A₁₁ * B₁₂ + A₁₂ * B₂₂
0148     r.template topRightCorner<M1, P2>().noalias() =
0149         a.template topLeftCorner<M1, N1>() *
0150             b.template topRightCorner<N1, P2>() +
0151         a.template topRightCorner<M1, N2>() *
0152             b.template bottomRightCorner<N2, P2>();
0153 
0154     // C₂₁ = A₂₁ * B₁₁ + A₂₂ * B₂₁
0155     r.template bottomLeftCorner<M2, P1>().noalias() =
0156         a.template bottomLeftCorner<M2, N1>() *
0157             b.template topLeftCorner<N1, P1>() +
0158         a.template bottomRightCorner<M2, N2>() *
0159             b.template bottomLeftCorner<N2, P1>();
0160 
0161     // C₂₂ = A₂₁ * B₁₂ + A₂₂ * B₂₂
0162     r.template bottomRightCorner<M2, P2>().noalias() =
0163         a.template bottomLeftCorner<M2, N1>() *
0164             b.template topRightCorner<N1, P2>() +
0165         a.template bottomRightCorner<M2, N2>() *
0166             b.template bottomRightCorner<N2, P2>();
0167 
0168     return r;
0169   }
0170 }
0171 
0172 /// FPE "safe" functions
0173 ///
0174 /// Our main motivation for this is that users might have a strict FPE policy
0175 /// which would flag every single occurrence as a failure and then somebody has
0176 /// to investigate. Since we are processing a high number of events and floating
0177 /// point numbers sometimes work in mysterious ways the caller of this function
0178 /// might want to hide FPEs and handle them in a more controlled way.
0179 
0180 /// Calculate the inverse of an Eigen matrix after checking if it can be
0181 /// numerically inverted. This allows to catch potential FPEs before they occur.
0182 /// For matrices up to 4x4, the inverse is computed directly. For larger
0183 /// matrices, and dynamic matrices the FullPivLU is used.
0184 ///
0185 /// @tparam Derived Eigen derived concrete type
0186 /// @tparam Result Eigen result type defaulted to input type
0187 ///
0188 /// @param m Eigen matrix to invert
0189 ///
0190 /// @return The theta value
0191 template <typename MatrixType, typename ResultType = MatrixType>
0192 std::optional<ResultType> safeInverse(const MatrixType& m) noexcept {
0193   constexpr int rows = MatrixType::RowsAtCompileTime;
0194   constexpr int cols = MatrixType::ColsAtCompileTime;
0195 
0196   static_assert(rows == cols);
0197 
0198   ResultType result;
0199   bool invertible = false;
0200 
0201   if constexpr (rows > 4 || rows == -1) {
0202     Eigen::FullPivLU<MatrixType> mFullPivLU(m);
0203     if (mFullPivLU.isInvertible()) {
0204       invertible = true;
0205       result = mFullPivLU.inverse();
0206     }
0207   } else {
0208     m.computeInverseWithCheck(result, invertible);
0209   }
0210 
0211   if (invertible) {
0212     return result;
0213   }
0214 
0215   return std::nullopt;
0216 }
0217 
0218 /// Specialization of the exponent limit to be used for safe exponential,
0219 /// depending on the floating point type.
0220 /// See https://godbolt.org/z/z53Er6Mzf for reasoning for the concrete numbers.
0221 template <typename T>
0222 struct ExpSafeLimit {};
0223 template <>
0224 struct ExpSafeLimit<double> {
0225   constexpr static double value = 500.0;
0226 };
0227 template <>
0228 struct ExpSafeLimit<float> {
0229   constexpr static float value = 50.0;
0230 };
0231 
0232 /// Calculate the exponential function while avoiding FPEs.
0233 ///
0234 /// @param val argument for which the exponential function should be evaluated.
0235 ///
0236 /// @return 0 in the case of underflow, std::numeric_limits<T>::infinity in the
0237 /// case of overflow, std::exp(val) else
0238 template <typename T>
0239 constexpr T safeExp(T val) noexcept {
0240   constexpr T maxExponent = ExpSafeLimit<T>::value;
0241   constexpr T minExponent = -maxExponent;
0242   if (val < minExponent) {
0243     return 0.0;
0244   }
0245 
0246   if (val > maxExponent) {
0247     return std::numeric_limits<T>::infinity();
0248   }
0249 
0250   return std::exp(val);
0251 }
0252 
0253 /// @brief Map the indices of the lower triangular part of a symmetric N x N matrix
0254 ///        to an unrolled vector index.
0255 /// @param i The row index of the symmetric matrix
0256 /// @param k The column index of the symmetric matrix
0257 template <std::size_t N>
0258 constexpr std::size_t vecIdxFromSymMat(const std::size_t i, const std::size_t k)
0259   requires(N > 0)
0260 {
0261   assert(i < N);
0262   assert(k < N);
0263   if (k > i) {
0264     return vecIdxFromSymMat<N>(k, i);
0265   }
0266   return sumUpToN(i) + k;
0267 }
0268 /// @brief Map an unrolled vector index to the indices of the lower triangular
0269 ///        part of a symmetric N x N matrix. Inverse of `vecIdxFromSymMat`.
0270 /// @param k The unrolled vector index
0271 /// @return A pair of indices (i, j) such that the element at (i, j) in the
0272 ///         symmetric matrix corresponds to the k-th element in the unrolled
0273 ///         vector.
0274 template <std::size_t N>
0275 constexpr std::array<std::size_t, 2> symMatIndices(const std::size_t k)
0276   requires(N > 1)
0277 {
0278   assert(k < sumUpToN(N));
0279   constexpr std::size_t bound = sumUpToN(N - 1);
0280   if (k >= bound) {
0281     return std::array<std::size_t, 2>{N - 1, k - bound};
0282   }
0283   if constexpr (N > 2) {
0284     return symMatIndices<N - 1>(k);
0285   }
0286   return filledArray<std::size_t, 2>(0);
0287 }
0288 
0289 }  // namespace Acts