Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 09:11:08

0001 // This file is part of the ACTS project.
0002 //
0003 // Copyright (C) 2016 CERN for the benefit of the ACTS project
0004 //
0005 // This Source Code Form is subject to the terms of the Mozilla Public
0006 // License, v. 2.0. If a copy of the MPL was not distributed with this
0007 // file, You can obtain one at https://mozilla.org/MPL/2.0/.
0008 
0009 #pragma once
0010 
0011 #include "Acts/Definitions/Algebra.hpp"
0012 
0013 #include <bitset>
0014 #include <optional>
0015 
0016 #include "Eigen/Dense"
0017 
0018 namespace Acts {
0019 
0020 /// Convert a bitset to a matrix of integers, with each element set to the bit
0021 /// value.
0022 /// @note How the bits are assigned to matrix elements depends on the storage
0023 /// type of the matrix being converted (row-major or col-major)
0024 /// @tparam MatrixType Matrix type that is produced
0025 /// @param bs The bitset to convert
0026 /// @return A matrix with the integer values of the bits from @p bs
0027 template <typename MatrixType>
0028 MatrixType bitsetToMatrix(const std::bitset<MatrixType::RowsAtCompileTime *
0029                                             MatrixType::ColsAtCompileTime>
0030                               bs) {
0031   constexpr int rows = MatrixType::RowsAtCompileTime;
0032   constexpr int cols = MatrixType::ColsAtCompileTime;
0033 
0034   static_assert(rows != -1 && cols != -1,
0035                 "bitsetToMatrix does not support dynamic matrices");
0036 
0037   MatrixType m;
0038   auto* p = m.data();
0039   for (std::size_t i = 0; i < rows * cols; i++) {
0040     p[i] = bs[rows * cols - 1 - i];
0041   }
0042   return m;
0043 }
0044 
0045 /// Convert an integer matrix to a bitset.
0046 /// @note How the bits are ordered depends on the storage type of the matrix
0047 /// being converted (row-major or col-major)
0048 /// @tparam Derived Eigen base concrete type
0049 /// @param m Matrix that is converted
0050 /// @return The converted bitset.
0051 template <typename Derived>
0052 auto matrixToBitset(const Eigen::PlainObjectBase<Derived>& m) {
0053   using MatrixType = Eigen::PlainObjectBase<Derived>;
0054   constexpr std::size_t rows = MatrixType::RowsAtCompileTime;
0055   constexpr std::size_t cols = MatrixType::ColsAtCompileTime;
0056 
0057   std::bitset<rows * cols> res;
0058 
0059   auto* p = m.data();
0060   for (std::size_t i = 0; i < rows * cols; i++) {
0061     res[rows * cols - 1 - i] = static_cast<bool>(p[i]);
0062   }
0063 
0064   return res;
0065 }
0066 
0067 /// @brief Perform a blocked matrix multiplication, avoiding Eigen GEMM
0068 /// methods.
0069 ///
0070 /// @tparam A The type of the first matrix, which should be MxN
0071 /// @tparam B The type of the second matrix, which should be NxP
0072 ///
0073 /// @param[in] a An MxN matrix of type A
0074 /// @param[in] b An NxP matrix of type P
0075 ///
0076 /// @returns The product ab
0077 template <typename A, typename B>
0078 inline ActsMatrix<A::RowsAtCompileTime, B::ColsAtCompileTime> blockedMult(
0079     const A& a, const B& b) {
0080   // Extract the sizes of the matrix types that we receive as template
0081   // parameters.
0082   constexpr int M = A::RowsAtCompileTime;
0083   constexpr int N = A::ColsAtCompileTime;
0084   constexpr int P = B::ColsAtCompileTime;
0085 
0086   // Ensure that the second dimension of our first matrix equals the first
0087   // dimension of the second matrix, otherwise we cannot multiply.
0088   static_assert(N == B::RowsAtCompileTime);
0089 
0090   if constexpr (M <= 4 && N <= 4 && P <= 4) {
0091     // In cases where the matrices being multiplied are small, we can rely on
0092     // Eigen do to a good job, and we don't really need to do any blocking.
0093     return a * b;
0094   } else {
0095     // Here, we want to calculate the expression: C = AB, Eigen, natively,
0096     // doesn't do a great job at this if the matrices A and B are large
0097     // (roughly M >= 8, N >= 8, or P >= 8), and applies a slow GEMM operation.
0098     // We apply a blocked matrix multiplication operation to decompose the
0099     // multiplication into smaller operations, relying on the fact that:
0100     //
0101     // ┌         ┐   ┌         ┐ ┌         ┐
0102     // │ C₁₁ C₁₂ │ = │ A₁₁ A₁₂ │ │ B₁₁ B₁₂ │
0103     // │ C₂₁ C₂₂ │ = │ A₂₁ A₂₂ │ │ B₂₁ B₂₂ │
0104     // └         ┘   └         ┘ └         ┘
0105     //
0106     // where:
0107     //
0108     // C₁₁ = A₁₁ * B₁₁ + A₁₂ * B₂₁
0109     // C₁₂ = A₁₁ * B₁₂ + A₁₂ * B₂₂
0110     // C₂₁ = A₂₁ * B₁₁ + A₂₂ * B₂₁
0111     // C₂₂ = A₂₁ * B₁₂ + A₂₂ * B₂₂
0112     //
0113     // The sizes of these submatrices are roughly half (in each dimension) that
0114     // of the parent matrix. If the size of the parent matrix is even, we can
0115     // divide it exactly, If the size of the parent matrix is odd, then some
0116     // of the submatrices will be one larger than the others. In general, for
0117     // any matrix Q, the sizes of the submatrices are (where / denotes integer
0118     // division):
0119     //
0120     // Q₁₁ : M / 2 × P / 2
0121     // Q₁₂ : M / 2 × (P + 1) / 2
0122     // Q₂₁ : (M + 1) / 2 × P / 2
0123     // Q₂₂ : (M + 1) / 2 × (P + 1) / 2
0124     //
0125     // See https://csapp.cs.cmu.edu/public/waside/waside-blocking.pdf for a
0126     // more in-depth explanation of blocked matrix multiplication.
0127     constexpr int M1 = M / 2;
0128     constexpr int M2 = (M + 1) / 2;
0129     constexpr int N1 = N / 2;
0130     constexpr int N2 = (N + 1) / 2;
0131     constexpr int P1 = P / 2;
0132     constexpr int P2 = (P + 1) / 2;
0133 
0134     // Construct the end result in this matrix, which destroys a few of Eigen's
0135     // built-in optimization techniques, but sadly this is necessary.
0136     ActsMatrix<M, P> r;
0137 
0138     // C₁₁ = A₁₁ * B₁₁ + A₁₂ * B₂₁
0139     r.template topLeftCorner<M1, P1>().noalias() =
0140         a.template topLeftCorner<M1, N1>() *
0141             b.template topLeftCorner<N1, P1>() +
0142         a.template topRightCorner<M1, N2>() *
0143             b.template bottomLeftCorner<N2, P1>();
0144 
0145     // C₁₂ = A₁₁ * B₁₂ + A₁₂ * B₂₂
0146     r.template topRightCorner<M1, P2>().noalias() =
0147         a.template topLeftCorner<M1, N1>() *
0148             b.template topRightCorner<N1, P2>() +
0149         a.template topRightCorner<M1, N2>() *
0150             b.template bottomRightCorner<N2, P2>();
0151 
0152     // C₂₁ = A₂₁ * B₁₁ + A₂₂ * B₂₁
0153     r.template bottomLeftCorner<M2, P1>().noalias() =
0154         a.template bottomLeftCorner<M2, N1>() *
0155             b.template topLeftCorner<N1, P1>() +
0156         a.template bottomRightCorner<M2, N2>() *
0157             b.template bottomLeftCorner<N2, P1>();
0158 
0159     // C₂₂ = A₂₁ * B₁₂ + A₂₂ * B₂₂
0160     r.template bottomRightCorner<M2, P2>().noalias() =
0161         a.template bottomLeftCorner<M2, N1>() *
0162             b.template topRightCorner<N1, P2>() +
0163         a.template bottomRightCorner<M2, N2>() *
0164             b.template bottomRightCorner<N2, P2>();
0165 
0166     return r;
0167   }
0168 }
0169 
0170 /// FPE "safe" functions
0171 ///
0172 /// Our main motivation for this is that users might have a strict FPE policy
0173 /// which would flag every single occurrence as a failure and then somebody has
0174 /// to investigate. Since we are processing a high number of events and floating
0175 /// point numbers sometimes work in mysterious ways the caller of this function
0176 /// might want to hide FPEs and handle them in a more controlled way.
0177 
0178 /// Calculate the inverse of an Eigen matrix after checking if it can be
0179 /// numerically inverted. This allows to catch potential FPEs before they occur.
0180 /// For matrices up to 4x4, the inverse is computed directly. For larger
0181 /// matrices, and dynamic matrices the FullPivLU is used.
0182 ///
0183 /// @tparam Derived Eigen derived concrete type
0184 /// @tparam Result Eigen result type defaulted to input type
0185 ///
0186 /// @param m Eigen matrix to invert
0187 ///
0188 /// @return The theta value
0189 template <typename MatrixType, typename ResultType = MatrixType>
0190 std::optional<ResultType> safeInverse(const MatrixType& m) noexcept {
0191   constexpr int rows = MatrixType::RowsAtCompileTime;
0192   constexpr int cols = MatrixType::ColsAtCompileTime;
0193 
0194   static_assert(rows == cols);
0195 
0196   ResultType result;
0197   bool invertible = false;
0198 
0199   if constexpr (rows > 4 || rows == -1) {
0200     Eigen::FullPivLU<MatrixType> mFullPivLU(m);
0201     if (mFullPivLU.isInvertible()) {
0202       invertible = true;
0203       result = mFullPivLU.inverse();
0204     }
0205   } else {
0206     m.computeInverseWithCheck(result, invertible);
0207   }
0208 
0209   if (invertible) {
0210     return result;
0211   }
0212 
0213   return std::nullopt;
0214 }
0215 
0216 /// Specialization of the exponent limit to be used for safe exponential,
0217 /// depending on the floating point type.
0218 /// See https://godbolt.org/z/z53Er6Mzf for reasoning for the concrete numbers.
0219 template <typename T>
0220 struct ExpSafeLimit {};
0221 template <>
0222 struct ExpSafeLimit<double> {
0223   constexpr static double value = 500.0;
0224 };
0225 template <>
0226 struct ExpSafeLimit<float> {
0227   constexpr static float value = 50.0;
0228 };
0229 
0230 /// Calculate the exponential function while avoiding FPEs.
0231 ///
0232 /// @param val argument for which the exponential function should be evaluated.
0233 ///
0234 /// @return 0 in the case of underflow, std::numeric_limits<T>::infinity in the
0235 /// case of overflow, std::exp(val) else
0236 template <typename T>
0237 constexpr T safeExp(T val) noexcept {
0238   constexpr T maxExponent = ExpSafeLimit<T>::value;
0239   constexpr T minExponent = -maxExponent;
0240   if (val < minExponent) {
0241     return 0.0;
0242   }
0243 
0244   if (val > maxExponent) {
0245     return std::numeric_limits<T>::infinity();
0246   }
0247 
0248   return std::exp(val);
0249 }
0250 
0251 }  // namespace Acts