Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-04-19 09:06:55

0001 #ifndef RIVET_RIVETYODA_HH
0002 #define RIVET_RIVETYODA_HH
0003 
0004 #include "Rivet/Config/RivetCommon.hh"
0005 #include "Rivet/Tools/TypeTraits.hh"
0006 #include "YODA/AnalysisObject.h"
0007 #include "YODA/Counter.h"
0008 #include "YODA/Histo.h"
0009 #include "YODA/Profile.h"
0010 #include "YODA/Estimate0D.h"
0011 #include "YODA/BinnedEstimate.h"
0012 #include "YODA/Scatter.h"
0013 
0014 // Use execinfo for backtrace if available
0015 #ifdef HAVE_EXECINFO_H
0016 #include <execinfo.h>
0017 #endif
0018 
0019 #include <map>
0020 #include <unordered_map>
0021 #include <valarray>
0022 
0023 
0024 namespace YODA {
0025 
0026   template<size_t DbnN, typename ... AxisT>
0027   using BinnedDbnPtr = std::shared_ptr<YODA::BinnedDbn<DbnN, AxisT...>>;
0028 
0029   template<typename ... AxisT>
0030   using BinnedHistoPtr = BinnedDbnPtr<sizeof...(AxisT), AxisT...>;
0031 
0032   template<typename ... AxisT>
0033   using BinnedProfilePtr = BinnedDbnPtr<sizeof...(AxisT)+1, AxisT...>;
0034 
0035   template<typename ... AxisT>
0036   using BinnedEstimatePtr = std::shared_ptr<YODA::BinnedEstimate<AxisT...>>;
0037 
0038   template<size_t N>
0039   using ScatterNDPtr = std::shared_ptr<YODA::ScatterND<N>>;
0040 
0041   using AnalysisObjectPtr = std::shared_ptr<YODA::AnalysisObject>;
0042   using CounterPtr = std::shared_ptr<YODA::Counter>;
0043   using Estimate0DPtr = std::shared_ptr<YODA::Estimate0D>;
0044   using Histo1DPtr = BinnedHistoPtr<double>;
0045   using Histo2DPtr = BinnedHistoPtr<double,double>;
0046   using Histo3DPtr = BinnedHistoPtr<double,double,double>;
0047   using Profile1DPtr = BinnedProfilePtr<double>;
0048   using Profile2DPtr = BinnedProfilePtr<double,double>;
0049   using Profile3DPtr = BinnedProfilePtr<double,double,double>;
0050   using Estimate1DPtr = BinnedEstimatePtr<double>;
0051   using Estimate2DPtr = BinnedEstimatePtr<double,double>;
0052   using Estimate3DPtr = BinnedEstimatePtr<double,double,double>;
0053   using Scatter1DPtr = ScatterNDPtr<1>;
0054   using Scatter2DPtr = ScatterNDPtr<2>;
0055   using Scatter3DPtr = ScatterNDPtr<3>;
0056 
0057 }
0058 
0059 namespace Rivet {
0060 
0061   /// If @a dst is the same subclass as @a src, copy the contents of @a
0062   /// src into @a dst and return true. Otherwise return false.
0063   template <typename T>
0064   bool copyAO(YODA::AnalysisObjectPtr src, YODA::AnalysisObjectPtr dst, const double scale=1.0) {
0065     if (dst->hasAnnotation("Type") && src->type() != dst->type()) {
0066       throw YODA::LogicError("Operation requries types to be the same!");
0067     }
0068     for (const std::string& a : src->annotations()) {
0069       dst->setAnnotation(a, src->annotation(a));
0070     }
0071     shared_ptr<T> dstPtr = std::static_pointer_cast<T>(dst);
0072     *dstPtr = *std::static_pointer_cast<T>(src);
0073     if constexpr (isFillable<T>::value) { dstPtr->scaleW(scale); }
0074     return true;
0075   }
0076 
0077 
0078   /// @brief A polymorphic base type for the AO type handles
0079   struct TypeBaseHandle {
0080 
0081     TypeBaseHandle() = default;
0082 
0083     virtual ~TypeBaseHandle() { }
0084 
0085     virtual bool copyAO(YODA::AnalysisObjectPtr src,
0086                         YODA::AnalysisObjectPtr dst,
0087                         const double scale = 1.0) const = 0;
0088 
0089     virtual bool addAO(YODA::AnalysisObjectPtr src,
0090                        YODA::AnalysisObjectPtr& dst,
0091                        const double scale = 1.0) const = 0;
0092 
0093   };
0094 
0095 
0096 
0097   /// @brief The type-specific handle that can perform
0098   /// type-specific operations for objects of type T
0099   template<typename T>
0100   struct TypeHandle : public TypeBaseHandle {
0101 
0102     bool addAO(YODA::AnalysisObjectPtr src,
0103                YODA::AnalysisObjectPtr& dst,
0104                const double scale = 1.0) const {
0105       if constexpr (isFillable<T>::value) {
0106         std::shared_ptr<T> srcPtr = std::static_pointer_cast<T>(src);
0107         srcPtr->scaleW(scale);
0108         if (dst == nullptr) { dst = src; return true; }
0109         try { *std::static_pointer_cast<T>(dst) += *srcPtr; }
0110         catch (YODA::BinningError&) { return false; }
0111         return true;
0112       }
0113       else if (dst == nullptr) { dst = src; return true; }
0114       return false;
0115     }
0116 
0117     bool copyAO(YODA::AnalysisObjectPtr src,
0118                 YODA::AnalysisObjectPtr dst,
0119                 const double scale = 1.0) const {
0120       return ::Rivet::copyAO<T>(src, dst, scale);
0121     }
0122 
0123   };
0124 
0125 
0126   /// @defgroup AOFills Minimal objects representing AO fills,
0127   /// to be buffered before collapseEventGroup().
0128   ///
0129   /// @note Every object listed here needs a virtual fill method in YODA,
0130   /// otherwise the Tuple fakery won't work.
0131   ///
0132   /// @{
0133 
0134   /// Typedef for weights.
0135   using Weight = double;
0136 
0137   /// A single fill is a (FillType, Weight) pair.
0138   template<typename T>
0139   using Fill = pair<typename T::FillType, Weight>;
0140 
0141   /// A collection of several Fill objects.
0142   template<typename T>
0143   using Fills = vector<Fill<T>>;
0144 
0145 
0146 
0147   /// @brief FillCollectors which are used to temporarily cache
0148   /// unaggregated fills until collapsed by the Multiplexers via
0149   /// a call to collapseEventGroup().
0150   ///
0151   /// The specialisations of this inherit from the YODA analysis object types,
0152   /// and are used as such. The user-facing analysis objects in
0153   /// Analysis::analyze() are FillCollectors on the apparent type (accessed transparently
0154   /// via the dereferencing of the current Multiplexer<T>::active() pointer).
0155   ///
0156   /// @todo Do we really want this inheritance from YODA::AOs??
0157   template<typename T>
0158   class FillCollector;
0159 
0160 
0161   /// FillCollector specialisation for Counter
0162   template<>
0163   class FillCollector<YODA::Counter> : public YODA::Counter {
0164   public:
0165 
0166     using YAO = YODA::Counter;
0167     using Ptr = shared_ptr<FillCollector<YAO>>;
0168     using YAO::operator =;
0169 
0170     FillCollector() : YAO() { }
0171 
0172     /// Constructor
0173     ///
0174     /// The Counter isn't actually needed here:
0175     /// We call the YAO nullary constructor for
0176     /// performance reasons but still require
0177     /// the pointer argument to harmonise the
0178     /// FillCollector constructors.
0179     FillCollector(typename YAO::Ptr yao) : YAO(yao->path()) { }
0180 
0181     /// Overloaded fill method, which stores Fill info
0182     /// until Multiplexer<T>::collapseEventGroup() is called.
0183     ///
0184     /// @todo Do we need to deal with users using fractions directly?
0185     int fill(const double weight=1.0, const double fraction = 1.0) {
0186       (void)fraction; // suppress unused variable warning
0187       _fills.insert(_fills.end(), { YAO::FillType(), weight } );
0188       return 0;
0189     }
0190 
0191     /// Empty the subevent stack (for start of new event group).
0192     void reset() { _fills.clear(); }
0193 
0194     /// Access the fill info subevent stack.
0195     const Fills<YAO>& fills() const { return _fills; }
0196 
0197   private:
0198 
0199     Fills<YAO> _fills;
0200 
0201   };
0202 
0203 
0204   /// FillCollector specialisation for all BinnedDbn-like AOs
0205   template <size_t DbnN, typename... AxisT>
0206   class FillCollector<YODA::BinnedDbn<DbnN, AxisT...>>
0207          : public YODA::BinnedDbn<DbnN, AxisT...> {
0208   public:
0209 
0210     using YAO = YODA::BinnedDbn<DbnN, AxisT...>;
0211     using Ptr = shared_ptr<FillCollector<YAO>>;
0212     using YAO::operator =;
0213 
0214     FillCollector() : YAO() { }
0215 
0216     /// Constructor
0217     ///
0218     /// We call the cheaper constructor based on the
0219     /// binning to avoid copying of the bin content.
0220     /// The underlying binning object is still used
0221     /// in analize() by many routines, e.g. to query
0222     /// numBins() or to loop over bins() with
0223     /// subsequent calls to bin xMid() etc.
0224     FillCollector(typename YAO::Ptr yao) : YAO(yao->binning()) {
0225       YAO::setPath(yao->path());
0226     }
0227 
0228     /// Overloaded fill method, which stores Fill info
0229     /// until Multiplexer<T>::collapseEventGroup() is called.
0230     ///
0231     /// @todo Do we need to deal with users using fractions directly?
0232     int fill(typename YAO::FillType&& fillCoords,
0233              const double weight=1.0, const double fraction=1.0) {
0234       (void)fraction; // suppress unused variable warning
0235       if (YODA::containsNan(fillCoords)) {
0236         _fills.insert(_fills.end(), { std::move(fillCoords), weight } );
0237         return -1;
0238       }
0239       // Could be that DbnN > number of binned axes, so should
0240       // extract subset of bin coordinates to pinpoint bin
0241       typename YAO::BinningT::EdgeTypesTuple binCoords{};
0242       auto extractBinCoords = [&binCoords, &fillCoords](auto I) {
0243         std::get<I>(binCoords) = std::get<I>(fillCoords);
0244       };
0245       MetaUtils::staticFor<sizeof...(AxisT)>(extractBinCoords);
0246       _fills.insert(_fills.end(), { std::move(fillCoords), weight } );
0247       return (int)YAO::_binning.globalIndexAt(binCoords);
0248     }
0249 
0250     /// Empty the subevent stack (for start of new event group).
0251     void reset() noexcept { _fills.clear(); }
0252 
0253     /// Access the fill info subevent stack.
0254     const Fills<YAO>& fills() const { return _fills; }
0255 
0256   private:
0257 
0258     Fills<YAO> _fills;
0259 
0260   };
0261   /// FillCollector specialisation for Histo1D
0262   template <typename AxisT>
0263   class FillCollector<YODA::BinnedDbn<1, AxisT>>
0264          : public YODA::BinnedDbn<1, AxisT> {
0265   public:
0266 
0267     using YAO = YODA::BinnedDbn<1, AxisT>;
0268     using Ptr = shared_ptr<FillCollector<YAO>>;
0269     using YAO::operator =;
0270 
0271     FillCollector() : YAO() { }
0272 
0273     /// Constructor
0274     FillCollector(typename YAO::Ptr yao) : YAO(yao->binning()) {
0275       YAO::setPath(yao->path());
0276     }
0277 
0278     /// Overloaded fill method, which stores Fill info
0279     /// until Multiplexer<T>::collapseEventGroup() is called.
0280     ///
0281     /// @todo Do we need to deal with users using fractions directly?
0282     int fill(typename YAO::FillType&& fillCoords,
0283              const double weight=1.0, const double fraction=1.0) {
0284       (void)fraction; // suppress unused variable warning
0285       if (YODA::containsNan(fillCoords)) {
0286         _fills.insert(_fills.end(), { std::move(fillCoords), weight } );
0287         return -1;
0288       }
0289       // Could be that DbnN > number of binned axes, so should
0290       // extract subset of bin coordinates to pinpoint bin
0291       typename YAO::BinningT::EdgeTypesTuple binCoords{};
0292       auto extractBinCoords = [&binCoords, &fillCoords](auto I) {
0293         std::get<I>(binCoords) = std::get<I>(fillCoords);
0294       };
0295       MetaUtils::staticFor<1>(extractBinCoords);
0296       _fills.insert(_fills.end(), { std::move(fillCoords), weight } );
0297       return (int)YAO::_binning.globalIndexAt(binCoords);
0298     }
0299     //
0300     int fill(const AxisT x, const double weight=1.0, const double fraction=1.0) {
0301       return fill(typename YAO::FillType{x}, weight, fraction);
0302     }
0303 
0304     /// Empty the subevent stack (for start of new event group).
0305     void reset() noexcept { _fills.clear(); }
0306 
0307     /// Access the fill info subevent stack.
0308     const Fills<YAO>& fills() const { return _fills; }
0309 
0310   private:
0311 
0312     Fills<YAO> _fills;
0313 
0314   };
0315   /// FillCollector specialisation for Histo2D
0316   template <typename AxisT1, typename AxisT2>
0317   class FillCollector<YODA::BinnedDbn<2, AxisT1, AxisT2>>
0318          : public YODA::BinnedDbn<2, AxisT1, AxisT2> {
0319   public:
0320 
0321     using YAO = YODA::BinnedDbn<2, AxisT1, AxisT2>;
0322     using Ptr = shared_ptr<FillCollector<YAO>>;
0323     using YAO::operator =;
0324 
0325     FillCollector() : YAO() { }
0326 
0327     /// Constructor
0328     FillCollector(typename YAO::Ptr yao) : YAO(yao->binning()) {
0329       YAO::setPath(yao->path());
0330     }
0331 
0332     /// Overloaded fill method, which stores Fill info
0333     /// until Multiplexer<T>::collapseEventGroup() is called.
0334     ///
0335     /// @todo Do we need to deal with users using fractions directly?
0336     int fill(typename YAO::FillType&& fillCoords,
0337              const double weight=1.0, const double fraction=1.0) {
0338       (void)fraction; // suppress unused variable warning
0339       if (YODA::containsNan(fillCoords)) {
0340         _fills.insert(_fills.end(), { std::move(fillCoords), weight } );
0341         return -1;
0342       }
0343       // Could be that DbnN > number of binned axes, so should
0344       // extract subset of bin coordinates to pinpoint bin
0345       typename YAO::BinningT::EdgeTypesTuple binCoords{};
0346       auto extractBinCoords = [&binCoords, &fillCoords](auto I) {
0347         std::get<I>(binCoords) = std::get<I>(fillCoords);
0348       };
0349       MetaUtils::staticFor<1>(extractBinCoords);
0350       _fills.insert(_fills.end(), { std::move(fillCoords), weight } );
0351       return (int)YAO::_binning.globalIndexAt(binCoords);
0352     }
0353     //
0354     int fill(const AxisT1 x, const AxisT2 y, const double weight=1.0, const double fraction=1.0) {
0355       return fill(typename YAO::FillType{x,y}, weight, fraction);
0356     }
0357 
0358     /// Empty the subevent stack (for start of new event group).
0359     void reset() noexcept { _fills.clear(); }
0360 
0361     /// Access the fill info subevent stack.
0362     const Fills<YAO>& fills() const { return _fills; }
0363 
0364   private:
0365 
0366     Fills<YAO> _fills;
0367 
0368   };
0369   /// FillCollector specialisation for Histo3D
0370   template <typename AxisT1, typename AxisT2, typename AxisT3>
0371   class FillCollector<YODA::BinnedDbn<3, AxisT1, AxisT2, AxisT3>>
0372          : public YODA::BinnedDbn<3, AxisT1, AxisT2, AxisT3> {
0373   public:
0374 
0375     using YAO = YODA::BinnedDbn<3, AxisT1, AxisT2, AxisT3>;
0376     using Ptr = shared_ptr<FillCollector<YAO>>;
0377     using YAO::operator =;
0378 
0379     FillCollector() : YAO() { }
0380 
0381     /// Constructor
0382     FillCollector(typename YAO::Ptr yao) : YAO(yao->binning()) {
0383       YAO::setPath(yao->path());
0384     }
0385 
0386     /// Overloaded fill method, which stores Fill info
0387     /// until Multiplexer<T>::collapseEventGroup() is called.
0388     ///
0389     /// @todo Do we need to deal with users using fractions directly?
0390     int fill(typename YAO::FillType&& fillCoords,
0391              const double weight=1.0, const double fraction=1.0) {
0392       (void)fraction; // suppress unused variable warning
0393       if (YODA::containsNan(fillCoords)) {
0394         _fills.insert(_fills.end(), { std::move(fillCoords), weight } );
0395         return -1;
0396       }
0397       // Could be that DbnN > number of binned axes, so should
0398       // extract subset of bin coordinates to pinpoint bin
0399       typename YAO::BinningT::EdgeTypesTuple binCoords{};
0400       auto extractBinCoords = [&binCoords, &fillCoords](auto I) {
0401         std::get<I>(binCoords) = std::get<I>(fillCoords);
0402       };
0403       MetaUtils::staticFor<1>(extractBinCoords);
0404       _fills.insert(_fills.end(), { std::move(fillCoords), weight } );
0405       return (int)YAO::_binning.globalIndexAt(binCoords);
0406     }
0407     //
0408     int fill(const AxisT1 x, const AxisT2 y, const AxisT3 z, const double weight=1.0, const double fraction=1.0) {
0409       return fill(typename YAO::FillType{x,y,z}, weight, fraction);
0410     }
0411 
0412     /// Empty the subevent stack (for start of new event group).
0413     void reset() noexcept { _fills.clear(); }
0414 
0415     /// Access the fill info subevent stack.
0416     const Fills<YAO>& fills() const { return _fills; }
0417 
0418   private:
0419 
0420     Fills<YAO> _fills;
0421 
0422   };
0423   /// FillCollector specialisation for Profile1D
0424   template <typename AxisT>
0425   class FillCollector<YODA::BinnedDbn<2, AxisT>>
0426          : public YODA::BinnedDbn<2, AxisT> {
0427   public:
0428 
0429     using YAO = YODA::BinnedDbn<2, AxisT>;
0430     using Ptr = shared_ptr<FillCollector<YAO>>;
0431     using YAO::operator =;
0432 
0433     FillCollector() : YAO() { }
0434 
0435     /// Constructor
0436     FillCollector(typename YAO::Ptr yao) : YAO(yao->binning()) {
0437       YAO::setPath(yao->path());
0438     }
0439 
0440     /// Overloaded fill method, which stores Fill info
0441     /// until Multiplexer<T>::collapseEventGroup() is called.
0442     ///
0443     /// @todo Do we need to deal with users using fractions directly?
0444     int fill(typename YAO::FillType&& fillCoords,
0445              const double weight=1.0, const double fraction=1.0) {
0446       (void)fraction; // suppress unused variable warning
0447       if (YODA::containsNan(fillCoords)) {
0448         _fills.insert(_fills.end(), { std::move(fillCoords), weight } );
0449         return -1;
0450       }
0451       // Could be that DbnN > number of binned axes, so should
0452       // extract subset of bin coordinates to pinpoint bin
0453       typename YAO::BinningT::EdgeTypesTuple binCoords{};
0454       auto extractBinCoords = [&binCoords, &fillCoords](auto I) {
0455         std::get<I>(binCoords) = std::get<I>(fillCoords);
0456       };
0457       MetaUtils::staticFor<1>(extractBinCoords);
0458       _fills.insert(_fills.end(), { std::move(fillCoords), weight } );
0459       return (int)YAO::_binning.globalIndexAt(binCoords);
0460     }
0461     //
0462     int fill(const AxisT x, const double y, const double weight=1.0, const double fraction=1.0) {
0463       return fill(typename YAO::FillType{x,y}, weight, fraction);
0464     }
0465 
0466     /// Empty the subevent stack (for start of new event group).
0467     void reset() noexcept { _fills.clear(); }
0468 
0469     /// Access the fill info subevent stack.
0470     const Fills<YAO>& fills() const { return _fills; }
0471 
0472   private:
0473 
0474     Fills<YAO> _fills;
0475 
0476   };
0477   /// FillCollector specialisation for Histo2D
0478   template <typename AxisT1, typename AxisT2>
0479   class FillCollector<YODA::BinnedDbn<3, AxisT1, AxisT2>>
0480          : public YODA::BinnedDbn<3, AxisT1, AxisT2> {
0481   public:
0482 
0483     using YAO = YODA::BinnedDbn<3, AxisT1, AxisT2>;
0484     using Ptr = shared_ptr<FillCollector<YAO>>;
0485     using YAO::operator =;
0486 
0487     FillCollector() : YAO() { }
0488 
0489     /// Constructor
0490     FillCollector(typename YAO::Ptr yao) : YAO(yao->binning()) {
0491       YAO::setPath(yao->path());
0492     }
0493 
0494     /// Overloaded fill method, which stores Fill info
0495     /// until Multiplexer<T>::collapseEventGroup() is called.
0496     ///
0497     /// @todo Do we need to deal with users using fractions directly?
0498     int fill(typename YAO::FillType&& fillCoords,
0499              const double weight=1.0, const double fraction=1.0) {
0500       (void)fraction; // suppress unused variable warning
0501       if (YODA::containsNan(fillCoords)) {
0502         _fills.insert(_fills.end(), { std::move(fillCoords), weight } );
0503         return -1;
0504       }
0505       // Could be that DbnN > number of binned axes, so should
0506       // extract subset of bin coordinates to pinpoint bin
0507       typename YAO::BinningT::EdgeTypesTuple binCoords{};
0508       auto extractBinCoords = [&binCoords, &fillCoords](auto I) {
0509         std::get<I>(binCoords) = std::get<I>(fillCoords);
0510       };
0511       MetaUtils::staticFor<1>(extractBinCoords);
0512       _fills.insert(_fills.end(), { std::move(fillCoords), weight } );
0513       return (int)YAO::_binning.globalIndexAt(binCoords);
0514     }
0515     //
0516     int fill(const AxisT1 x, const AxisT2 y, const double z, const double weight=1.0, const double fraction=1.0) {
0517       return fill(typename YAO::FillType{x,y,z}, weight, fraction);
0518     }
0519 
0520     /// Empty the subevent stack (for start of new event group).
0521     void reset() noexcept { _fills.clear(); }
0522 
0523     /// Access the fill info subevent stack.
0524     const Fills<YAO>& fills() const { return _fills; }
0525 
0526   private:
0527 
0528     Fills<YAO> _fills;
0529 
0530   };
0531   /// FillCollector specialisation for Histo3D
0532   template <typename AxisT1, typename AxisT2, typename AxisT3>
0533   class FillCollector<YODA::BinnedDbn<4, AxisT1, AxisT2, AxisT3>>
0534          : public YODA::BinnedDbn<4, AxisT1, AxisT2, AxisT3> {
0535   public:
0536 
0537     using YAO = YODA::BinnedDbn<4, AxisT1, AxisT2, AxisT3>;
0538     using Ptr = shared_ptr<FillCollector<YAO>>;
0539     using YAO::operator =;
0540 
0541     FillCollector() : YAO() { }
0542 
0543     /// Constructor
0544     FillCollector(typename YAO::Ptr yao) : YAO(yao->binning()) {
0545       YAO::setPath(yao->path());
0546     }
0547 
0548     /// Overloaded fill method, which stores Fill info
0549     /// until Multiplexer<T>::collapseEventGroup() is called.
0550     ///
0551     /// @todo Do we need to deal with users using fractions directly?
0552     int fill(typename YAO::FillType&& fillCoords,
0553              const double weight=1.0, const double fraction=1.0) {
0554       (void)fraction; // suppress unused variable warning
0555       if (YODA::containsNan(fillCoords)) {
0556         _fills.insert(_fills.end(), { std::move(fillCoords), weight } );
0557         return -1;
0558       }
0559       // Could be that DbnN > number of binned axes, so should
0560       // extract subset of bin coordinates to pinpoint bin
0561       typename YAO::BinningT::EdgeTypesTuple binCoords{};
0562       auto extractBinCoords = [&binCoords, &fillCoords](auto I) {
0563         std::get<I>(binCoords) = std::get<I>(fillCoords);
0564       };
0565       MetaUtils::staticFor<1>(extractBinCoords);
0566       _fills.insert(_fills.end(), { std::move(fillCoords), weight } );
0567       return (int)YAO::_binning.globalIndexAt(binCoords);
0568     }
0569     //
0570     int fill(const AxisT1 x, const AxisT2 y, const AxisT3 z, const double zPlus, const double weight=1.0, const double fraction=1.0) {
0571       return fill(typename YAO::FillType{x,y,z,zPlus}, weight, fraction);
0572     }
0573 
0574     /// Empty the subevent stack (for start of new event group).
0575     void reset() noexcept { _fills.clear(); }
0576 
0577     /// Access the fill info subevent stack.
0578     const Fills<YAO>& fills() const { return _fills; }
0579 
0580   private:
0581 
0582     Fills<YAO> _fills;
0583 
0584   };
0585 
0586 
0587   /// FillCollector specialisation for Estimate
0588   template<>
0589   class FillCollector<YODA::Estimate0D> : public YODA::Estimate0D {
0590   public:
0591 
0592     using YAO = YODA::Estimate0D;
0593     using Ptr = shared_ptr<FillCollector<YAO>>;
0594     using YAO::operator =;
0595 
0596     FillCollector() : YAO() { }
0597 
0598     /// Constructor
0599     ///
0600     /// The Estimate isn't actually needed here:
0601     /// We call the YAO nullary constructor for
0602     /// performance reasons but still require
0603     /// the pointer argument to harmonise the
0604     /// FillCollector constructors.
0605     FillCollector(typename YAO::Ptr yao) : YAO(yao->path()) { }
0606 
0607   };
0608 
0609   /// FillCollector specialisation for BinnedEstimate
0610   template<typename ... AxisT>
0611   class FillCollector<YODA::BinnedEstimate<AxisT...>>
0612          : public YODA::BinnedEstimate<AxisT...> {
0613   public:
0614 
0615     using YAO = YODA::BinnedEstimate<AxisT...>;
0616     using Ptr = shared_ptr<FillCollector<YAO>>;
0617     using YAO::operator =;
0618 
0619     FillCollector() : YAO() { }
0620 
0621     /// Constructor
0622     ///
0623     /// The BinnedEstimate isn't actually needed here:
0624     /// We call the YAO nullary constructor for
0625     /// performance reasons but still require
0626     /// the pointer argument to harmonise the
0627     /// FillCollector constructors.
0628     FillCollector(typename YAO::Ptr yao) : YAO(yao->path()) { }
0629 
0630   };
0631 
0632   /// FillCollector specialisation for ScatterND
0633   template <size_t N>
0634   class FillCollector<YODA::ScatterND<N>> : public YODA::ScatterND<N> {
0635   public:
0636 
0637     using YAO = YODA::ScatterND<N>;
0638     using Ptr = shared_ptr<FillCollector<YAO>>;
0639     using YAO::operator =;
0640 
0641     FillCollector() : YAO() { }
0642 
0643     /// Constructor
0644     ///
0645     /// The Scatter isn't actually needed here:
0646     /// We call the YAO nullary constructor for
0647     /// performance reasons but still require
0648     /// the pointer argument to harmonise the
0649     /// FillCollector constructors.
0650     FillCollector(typename YAO::Ptr yao) : YAO(yao->path()) { }
0651 
0652   };
0653 
0654   /// @}
0655 
0656   /// Anonymous namespace to limit visibility
0657   namespace {
0658 
0659     using namespace std;
0660 
0661     template<typename... Args>
0662     double distance(const tuple<Args...>& a, const tuple<Args...>& b) {
0663       double rtn = 0;
0664       auto calculateDistance = [&](auto I) {
0665         if constexpr (std::is_floating_point<std::tuple_element_t<I,
0666                                              std::tuple<Args...>>>::value) {
0667           rtn += Rivet::sqr(get<I>(a) - get<I>(b));
0668         }
0669       };
0670       MetaUtils::staticFor<sizeof...(Args)>(calculateDistance);
0671       return rtn;
0672     }
0673 
0674     /// The argument @a evgroup is a vector of sub-events.
0675     /// Each sub-event contains a FillCollection (where each
0676     /// Fill is a pair of fill coordinate and weight fraction).
0677     ///
0678     /// Returns a transposed vector of fills with aligned sub-events,
0679     /// potentially padded with empty fills to ensure equal sizes.
0680     template <typename T>
0681     vector<Fills<T>> applyEmptyFillPaddingAndTranspose(const vector<typename FillCollector<T>::Ptr>& subevents) {
0682 
0683       static Fill<T> EmptyFill = { typename T::FillType{}, 0.0 };
0684 
0685       vector<Fills<T>> matched;
0686       matched.reserve(subevents.size());
0687       // First just copy subevents into vectors and find the longest vector.
0688       size_t maxFillLen = 0; // length of biggest vector
0689       size_t maxFillPos = 0; // index position of biggest vector
0690       for (const auto& subevt : subevents) {
0691         auto&& fills = subevt->fills();
0692         if (fills.size() > maxFillLen) {
0693           maxFillLen = fills.size();
0694           maxFillPos = matched.size();
0695         }
0696         matched.push_back(std::move(fills));
0697       }
0698 
0699       // Now, go through all subevents with missing fills.
0700       const Fills<T>& maxFill = matched[maxFillPos]; // the longest fill
0701       for (auto& fills : matched) {
0702 
0703         if (fills.size() == maxFillLen)  continue;
0704 
0705         /// Add empty-fill padding, such that
0706         /// every element has the same length
0707         while (fills.size() < maxFillLen) {
0708           fills.push_back(EmptyFill);
0709         }
0710 
0711         /// Iterate from the back and shift all fill values
0712         /// backwards by swapping with empty fills so that
0713         /// they better match the full subevent.
0714         for (int i = maxFillLen - 1; i >= 0; --i) {
0715           if (fills[i] == EmptyFill)  continue;
0716           int j = i;
0717           while (j+1 < (int)maxFillLen &&
0718                  fills[j + 1] == EmptyFill &&
0719                  distance(fills[j].first,
0720                           maxFill[j].first) >
0721                  distance(fills[j].first,
0722                           maxFill[j+1].first)) {
0723             swap(fills[j], fills[j+1]);
0724             ++j;
0725           }
0726         }
0727       }
0728       // finally, transpose and we're done
0729       vector<Fills<T>> result(maxFillLen, Fills<T>(matched.size()));
0730       for (size_t i = 0; i < matched.size(); ++i) {
0731         for (size_t j = 0; j < maxFillLen; ++j) {
0732           result.at(j).at(i) = std::move(matched.at(i).at(j));
0733         }
0734       }
0735       return result;
0736     }
0737 
0738     /// Helper struct to extract a YODA::Binning type
0739     /// corresponding to the axis types of the fill tuple
0740     template<typename T>
0741     struct SubwindowType;
0742     //
0743     template<typename... Args>
0744     struct SubwindowType<tuple<Args...>> {
0745       using type = YODA::Binning<std::decay_t<decltype(std::declval<YODA::Axis<Args>>())>...>;
0746     };
0747 
0748     /// Windowing with (optional) smearing of bin edges
0749     ///
0750     /// The logic here follows Appendix A of the Rivet 3 paper
0751     /// but generalised to N dimensions: First, construct windows
0752     /// around the fill coordinates, then create a YODA::Binning
0753     /// object where each subwindow is represented as a bin to
0754     /// allow looping over all subwindows (i.e. bins) in ND.
0755     template<typename YAO>
0756     vector<std::tuple<typename YAO::FillType, valarray<double>, double>>
0757     applyFillWindows(shared_ptr<YAO> ao, const Fills<YAO>& subevents,
0758                      const vector<valarray<double>>& weights, const double fsmear=0.5) {
0759 
0760 
0761       using SubwindowT = typename SubwindowType<typename YAO::FillType>::type;
0762       SubwindowT subwindows;
0763 
0764       const size_t nFills = subevents.size();
0765       vector<vector<double>> edgesLo, edgesHi;
0766       edgesLo.resize(YAO::FillDimension::value);
0767       edgesHi.resize(YAO::FillDimension::value);
0768 
0769       auto constructWindows = [&](auto I) {
0770 
0771         using FillAxisT = typename SubwindowT::template getAxisT<I>;
0772         using isContinuous = typename SubwindowT::template is_CAxis<I>;
0773 
0774         if constexpr(!isContinuous::value) { // discrete axes don't need smearing
0775           // use single-edge axis for discrete coordinates
0776           subwindows.template axis<I>() = FillAxisT({ std::get<I>(subevents[0].first) });
0777           return;
0778         }
0779         else { // continuous axes need windowing
0780           edgesHi[I].resize(nFills);
0781           edgesLo[I].resize(nFills);
0782 
0783           if constexpr(I < YAO::BinningT::Dimension::value) {
0784             // this fill axis is binned: window sizes will depend on bin width
0785             const auto& axis = ao->binning().template axis<I>();
0786             size_t over = 0, under = 0;
0787             const double edgeMax = ao->template max<I>();
0788             const double edgeMin = ao->template min<I>();
0789             const size_t binLast = axis.numBins(); // index of last visible bin
0790 
0791             for (size_t i = 0; i < nFills; ++i) {
0792               const double edge = get<I>(subevents[i].first);
0793               size_t idx = axis.index(edge);
0794               if (edge >= edgeMax) {
0795                 if (edge > edgeMax) ++over;
0796                 idx = binLast; // cut off at highest visible bin
0797               }
0798               else if (edge < edgeMin) {
0799                 ++under;
0800                 idx = 1; // cut off at lowest visible bin
0801               }
0802               // find index of closest neighbouring bin
0803               size_t ibn = idx;
0804               if (edge > axis.mid(idx)) {
0805                 if (idx != binLast) ++ibn;
0806               }
0807               else {
0808                 if (idx != 1) --ibn;
0809               }
0810 
0811               // construct rectangular windows of with = 2*delta
0812               const double ibw = axis.width(idx) < axis.width(ibn)? idx : ibn;
0813               if ( fsmear > 0.0 ) {
0814                 const double delta = 0.5*fsmear*axis.width(ibw);
0815                 edgesHi[I][i] = edge + delta;
0816                 edgesLo[I][i] = edge - delta;
0817               }
0818               else {
0819                 const double delta = 0.5*axis.width(ibw);
0820                 if (edge > edgeMax) {
0821                   edgesHi[I][i] = max(edgeMax + 2*delta, edge + delta);
0822                   edgesLo[I][i] = max(edgeMax, edge - delta);
0823                 }
0824                 else if (edge < edgeMin) {
0825                   edgesHi[I][i] = min(edgeMin, edge + delta);
0826                   edgesLo[I][i] = min(edgeMin - 2*delta, edge - delta);
0827                 }
0828                 else {
0829                   edgesHi[I][i] = axis.max(idx);
0830                   edgesLo[I][i] = axis.min(idx);
0831                 }
0832               }
0833             }
0834             for (size_t i = 0; i < nFills; ++i) {
0835               const double wsize = edgesHi[I][i] - edgesLo[I][i];
0836               if (over == nFills && edgesLo[I][i] < edgeMax && edgesHi[I][i] > edgeMax) {
0837                 edgesHi[I][i] = edgeMax + wsize;
0838                 edgesLo[I][i] = edgeMax;
0839               }
0840               else if (over == 0 && edgesLo[I][i] < edgeMax && edgesHi[I][i] > edgeMax) {
0841                 edgesLo[I][i] = edgeMax - wsize;
0842                 edgesHi[I][i] = edgeMax;
0843               }
0844               else if (under == nFills && edgesLo[I][i] < edgeMin && edgesHi[I][i] > edgeMin) {
0845                 edgesLo[I][i] = edgeMin - wsize;
0846                 edgesHi[I][i] = edgeMin;
0847               }
0848               else if (under == 0 && edgesLo[I][i] < edgeMin && edgesHi[I][i] > edgeMin) {
0849                 edgesHi[I][i] = edgeMin + wsize;
0850                 edgesLo[I][i] = edgeMin;
0851               }
0852             }
0853           } // end of constexpr-check for binned axes
0854           else {
0855             // this fill axis is unbinned (e.g. in Profiles)
0856             for (size_t i = 0; i < nFills; ++i) {
0857               /// @todo What's a good reference for the window size along an unbinned axes? Is 20% of the FP edge a reasonable window size?
0858               /// @note No smearing needed here since there are no bin-edges along this axes
0859               const double edge = get<I>(subevents[i].first);
0860               const double delta = 0.1*fabs(edge);
0861               edgesHi[I][i] = edge + delta;
0862               edgesLo[I][i] = edge - delta;
0863             }
0864           }
0865           // create CAxis with subwindows from the set of window edges
0866           vector<double> windowEdges;
0867           std::copy(edgesLo[I].begin(), edgesLo[I].end(), std::back_inserter(windowEdges));
0868           std::copy(edgesHi[I].begin(), edgesHi[I].end(), std::back_inserter(windowEdges));
0869           std::sort(windowEdges.begin(), windowEdges.end());
0870           windowEdges.erase( std::unique(windowEdges.begin(), windowEdges.end()), windowEdges.end() );
0871           subwindows.template axis<I>() = FillAxisT(windowEdges);
0872         } // end of if constexpr isContinuous
0873       };
0874       // execute for each fill dimension
0875       MetaUtils::staticFor<YAO::FillDimension::value>(constructWindows);
0876 
0877       // Placeholder for the return type: a tuple of {FillType, multi-weights, fill fraction}
0878       vector<std::tuple<typename YAO::FillType, valarray<double>, double>> rtn;
0879 
0880       // Subwindows are given by visible bins of the
0881       // SubwindowT, so skip all under-/overflows
0882       const vector<size_t> overflows = subwindows.calcOverflowBinsIndices();
0883       const auto& itEnd = overflows.cend();
0884       for (size_t i = 0; i < subwindows.numBins(); ++i) {
0885         if (std::find(overflows.cbegin(), itEnd, i) != itEnd)  continue;
0886 
0887         const auto coords = subwindows.edgeTuple(i);
0888         const double subwindowArea = subwindows.dVol(i);
0889         size_t nSubfills = 0;
0890         double windowFrac = 0.;
0891         valarray<double> sumw(0.0, weights[0].size()); // one per multiweight
0892         for (size_t j = 0; j < nFills; ++j) {
0893           bool pass = true;
0894           double windowArea = 1.0;
0895           auto checkSubwindowOverlap = [&](auto I) {
0896            using isContinuous = typename SubwindowT::template is_CAxis<I>;
0897             if constexpr (isContinuous::value) {
0898               const double edge = std::get<I>(coords);
0899               pass &= (edgesLo[I][j] <= edge && edge <= edgesHi[I][j]);
0900               windowArea *= edgesHi[I][j] - edgesLo[I][j];
0901             }
0902           };
0903           MetaUtils::staticFor<YAO::FillDimension::value>(checkSubwindowOverlap);
0904           if (pass) {
0905             windowFrac = subwindowArea/windowArea;
0906             sumw += subevents[j].second * weights[j];
0907             ++nSubfills;
0908           }
0909         }
0910         if (nSubfills) {
0911           const double fillFrac = (double)nSubfills/(double)nFills;
0912           rtn.emplace_back(coords, sumw/fillFrac, fillFrac*windowFrac); // normalise to a single fill
0913         }
0914       }
0915       return rtn;
0916     } // end of applyFillWindows
0917 
0918   } // end of anonymous name space
0919 
0920 
0921   /// @name Multiplexing wrappers
0922   /// @{
0923 
0924   /// @brief Multiplexer base class
0925   ///
0926   /// Abstract interface to a set of YODA AOs corresponding
0927   /// to multiple weight-streams, with subevent handling.
0928   ///
0929   /// @note This abstraction is useful for looping over
0930   /// generic multiplexed AOs e.g. in the AnalysisHandler.
0931   class MultiplexedAO {
0932   public:
0933 
0934     virtual ~MultiplexedAO() { }
0935 
0936     /// The type being represented is a generic AO.
0937     using Inner = YODA::AnalysisObject;
0938 
0939     /// Add a new layer of subevent fill staging.
0940     virtual void newSubEvent() = 0;
0941 
0942     /// Sync the fill proxies to the persistent histogram.
0943     virtual void collapseEventGroup(const vector<std::valarray<double>>& weight, const double nlowfrac=0.0) = 0;
0944 
0945     /// Sync the persistent histograms to the final collection.
0946     virtual void pushToFinal() = 0;
0947 
0948     /// A shared pointer to the active YODA AO.
0949     virtual YODA::AnalysisObjectPtr activeAO() const = 0;
0950 
0951     /// The histogram path, without a variation suffix.
0952     virtual string basePath() const = 0;
0953 
0954     /// Access the active analysis object for function calls.
0955     virtual YODA::AnalysisObject* operator -> () = 0;
0956 
0957     /// Access the active analysis object for const function calls.
0958     virtual YODA::AnalysisObject* operator -> () const = 0;
0959 
0960     /// Access the active analysis object as a reference.
0961     virtual const YODA::AnalysisObject& operator * () const = 0;
0962 
0963     /// Set active object for analyze.
0964     virtual void setActiveWeightIdx(size_t iWeight) = 0;
0965 
0966     /// Set active object for finalize.
0967     virtual void setActiveFinalWeightIdx(size_t iWeight) = 0;
0968 
0969     /// Unset the active-object pointer.
0970     virtual void unsetActiveWeight() = 0;
0971 
0972     /// Set the size of the bootstrap vectors
0973     virtual void initBootstrap() = 0;
0974 
0975     /// Test for equality.
0976     bool operator == (const MultiplexedAO& p) { return (this == &p); }
0977 
0978     /// Test for inequality.
0979     bool operator != (const MultiplexedAO& p) { return (this != &p); }
0980 
0981     const vector<bool>& fillOutcomes() const {  return _fillOutcomes; }
0982 
0983     const vector<double>& fillFractions() const {  return _fillFractions; }
0984 
0985   protected:
0986 
0987     vector<bool> _fillOutcomes;
0988 
0989     vector<double> _fillFractions;
0990 
0991   };
0992 
0993 
0994 
0995   /// @brief Type-specific multiplexed YODA analysis object.
0996   ///
0997   /// Specialisations of this class (to each type of YODA object)
0998   /// are effectively the user-facing types in Rivet analyses,
0999   /// modulo a further wrapping via the MultplexAOPtr (which is
1000   /// a customised std::shared_pointer around the Multiplexer).
1001   ///
1002   /// Multiplexers instantiate one AO for each of the multiweights
1003   /// (x2 for pre- and post-finalize copies). They can expose either
1004   /// FillCollector<T> or T active pointers, for the analyze() and
1005   /// finalize() steps, respectively.
1006   ///
1007   /// @todo Some things are not really well-defined here.
1008   /// For instance: fill() in the finalize() method and
1009   /// integral() in the analyze() method. Should we throw?
1010   template<typename T>
1011   class Multiplexer : public MultiplexedAO {
1012 
1013   public:
1014 
1015     friend class Analysis;
1016     //friend class AnalysisHandler;
1017 
1018     /// Typedef for the YODA type being represented
1019     using Inner = T;
1020     using MultiplexedAO::_fillOutcomes;
1021     using MultiplexedAO::_fillFractions;
1022 
1023     Multiplexer() = default;
1024 
1025     Multiplexer(const vector<string>& weightNames, const T& p) {
1026       _basePath = p.path();
1027       _baseName = p.name();
1028       for (const string& weightname : weightNames) {
1029         _persistent.push_back(make_shared<T>(p));
1030         _final.push_back(make_shared<T>(p));
1031 
1032         typename T::Ptr obj = _persistent.back();
1033         obj->setPath("/RAW" + obj->path());
1034         typename T::Ptr final = _final.back();
1035         if (weightname != "") {
1036           obj->setPath(obj->path() + "[" + weightname + "]");
1037           final->setPath(final->path() + "[" + weightname + "]");
1038         }
1039       }
1040     }
1041 
1042     ~Multiplexer() = default;
1043 
1044     /// Get the current active analysis object
1045     /// (may be either persistent or final, depending on stage)
1046     typename T::Ptr active() const {
1047       if ( !_active ) {
1048         #ifdef HAVE_BACKTRACE
1049         void* buffer[4];
1050         backtrace(buffer, 4);
1051         backtrace_symbols_fd(buffer, 4 , 1);
1052         #endif
1053         assert(false && "No active pointer set. Was this object booked in init()?");
1054       }
1055       return _active;
1056     }
1057 
1058     template <typename U = T>
1059     auto binning() const -> std::enable_if_t<hasBinning<T>::value, const typename U::BinningT&> {
1060       return _persistent.back()->binning();
1061     }
1062 
1063     /// Get the AO path of the object, without variation suffix
1064     string basePath() const { return _basePath; }
1065 
1066     /// Get the AO name of the object, without variation suffix
1067     string baseName() const { return _baseName; }
1068 
1069 
1070     /// Test for object validity.
1071     ///
1072     /// @note Don't use active() here: assert will catch.
1073     explicit operator bool() const { return static_cast<bool>(_active); }
1074 
1075     /// Test for object invalidity.
1076     ///
1077     /// @note Don't use active() here: assert will catch.
1078     bool operator ! () const { return !_active; }
1079 
1080 
1081     /// Forwarding dereference-call operator.
1082     T* operator -> () { return active().get(); }
1083 
1084     /// Forwarding dereference-call operator.
1085     T* operator -> () const { return active().get(); }
1086 
1087     /// Forwarding dereference operator.
1088     T& operator * () { return *active(); }
1089 
1090     /// Forwarding dereference operator.
1091     const T& operator * () const { return *active(); }
1092 
1093 
1094     /// Equality operator
1095     friend bool operator == (const Multiplexer& a, const Multiplexer& b){
1096       if (a._persistent.size() != b._persistent.size()) {
1097         return false;
1098       }
1099       // also test for binning compatibility
1100       for (size_t i = 0; i < a._persistent.size(); ++i) {
1101         if (a._persistent.at(i) != b._persistent.at(i)) {
1102           return false;
1103         }
1104       }
1105       return true;
1106     }
1107 
1108     /// Inequality operator
1109     friend bool operator != (const Multiplexer& a, const Multiplexer& b) {
1110       return !(a == b);
1111     }
1112 
1113     /// Less-than operator
1114     friend bool operator < (const Multiplexer a, const Multiplexer& b) {
1115       if (a._persistent.size() >= b._persistent.size()) {
1116         return false;
1117       }
1118       for (size_t i = 0; i < a._persistent.size(); ++i) {
1119         if (*(a._persistent.at(i)) >= *(b._persistent.at(i))) {
1120           return false;
1121         }
1122       }
1123       return true;
1124     }
1125 
1126 
1127     /// @}
1128 
1129     /// @name Access methods
1130     /// @{
1131 
1132     /// Set the active-object pointer to point at a variation in the persistent set
1133     void setActiveWeightIdx(size_t iWeight) {
1134       _active = _persistent.at(iWeight);
1135     }
1136 
1137     /// Set the active-object pointer to point at a variation in the final set
1138     void setActiveFinalWeightIdx(size_t iWeight) {
1139       _active = _final.at(iWeight);
1140     }
1141 
1142     /// Unset the active-object pointer
1143     void unsetActiveWeight() { _active.reset(); }
1144 
1145     /// Clear the active object pointer
1146     void reset() { active()->reset(); }
1147 
1148 
1149     /// @brief Create a new FillCollector for the next sub-event.
1150     ///
1151     /// Called every sub-event by AnalysisHandler::analyze() before
1152     /// dispatch to Analysis::analyze(). The fill values will be
1153     /// redistributed over variations by collapseEventGroup().
1154     void newSubEvent() {
1155       _evgroup.emplace_back(new FillCollector<T>(_persistent[0]));
1156       _active = _evgroup.back();
1157       assert(_active);
1158     }
1159 
1160     /// Pushes the (possibly collapsed) fill(s) into the persistent objects
1161     void collapseEventGroup(const vector<std::valarray<double>>& weights, const double nlowfrac=0.0) {
1162 
1163       /// @todo If we don't multiplex (Binned)Estimates, perhaps we can get rid of this protection?
1164       if constexpr( isFillable<T>::value ) {
1165 
1166         // Should have as many subevent fills as subevent weights
1167         assert( _evgroup.size() == weights.size() );
1168 
1169         if (_evgroup.size() == 1) { // Have we had subevents at all?
1170           // ensure vector length matches size of multiplexed AO
1171           if (_fillOutcomes.empty())  initBootstrap();
1172           // reset fill positions
1173           std::fill(_fillOutcomes.begin(), _fillOutcomes.end(), false);
1174           std::fill(_fillFractions.begin(), _fillFractions.end(), 0.0);
1175 
1176           // Simple replay of all collected fills:
1177           // each fill is inserted into every persistent AO
1178           for (auto f : _evgroup[0]->fills()) {
1179             int pos = -1;
1180             double frac = 0.0;
1181             for (size_t m = 0; m < _persistent.size(); ++m) { //< m is the variation index
1182               if (!m)  frac = f.second;
1183               pos = _persistent[m]->fill( std::move(f.first), std::move(f.second) * weights[0][m] );
1184             }
1185             if (pos >= 0) {
1186               _fillOutcomes[pos] = true;
1187               _fillFractions[pos] += frac;
1188             }
1189           }
1190         }
1191         else {
1192           collapseSubevents(weights, nlowfrac);
1193         }
1194       }
1195 
1196       _evgroup.clear();
1197       _active.reset();
1198     }
1199 
1200     /// Collapse the set of FillCollectors (i.e. _evgroup)
1201     /// into combined fills of the persistent objects,
1202     /// using fractional fills if there are subevents
1203     void collapseSubevents(const vector<std::valarray<double>>& weights, const double nlowfrac) {
1204       if constexpr( isFillable<T>::value ) {
1205         if constexpr (!std::is_same<T, YODA::Counter>::value ) { // binned objects
1206           /// @note The number of fills could be different between sub-events,
1207           /// in which case we will first add a padding of "empty" fills.
1208           /// We also take the transpose of subevents vs fills.
1209           ///
1210           /// @todo Do we really need the transposing??
1211           for (const Fills<T>& subEvents : applyEmptyFillPaddingAndTranspose<T>(_evgroup)) {
1212             // construct fill windows with fractional fills
1213             for (const auto& f : applyFillWindows(_persistent[0], subEvents, weights, nlowfrac)) {
1214               for ( size_t m = 0; m < _persistent.size(); ++m ) { // for each multiweight
1215                 _persistent[m]->fill( typename T::FillType(get<0>(f)), get<1>(f)[m], get<2>(f) );
1216               }
1217             } // end of loop over fill windows
1218           } // end of loop over subevents
1219         }
1220         else {
1221           for (size_t m = 0; m < _persistent.size(); ++m) { //< m is the variation index
1222             vector<double> sumfw{0.0}; // final fill weights (one per fill)
1223             for (size_t n = 0; n < _evgroup.size(); ++n) { //< n is the correlated sub-event index
1224               const auto& fills = _evgroup[n]->fills();
1225               // resize if this subevent has an
1226               // even larger number of fills
1227               if (fills.size() > sumfw.size()) {
1228                 sumfw.resize(fills.size(), 0.0);
1229               }
1230               size_t fi = 0;
1231               for (const auto& f : fills) { // collapse sub-events and aggregate final fill weights
1232                 sumfw[fi++] += f.second * weights[n][m]; // f.second is optional user-supplied scaling
1233               }
1234             }
1235             for (double fw : sumfw) { // fill persistent Counters
1236               _persistent[m]->fill(std::move(fw));
1237             }
1238           }
1239         }
1240       }
1241     }
1242 
1243     /// Copy all variations from the "live" persistent set
1244     /// to the final collection used by Analysis::finalize()
1245     void pushToFinal() {
1246       for ( size_t m = 0; m < _persistent.size(); ++m ) { //< variation weight index
1247         _final.at(m)->clearAnnotations(); // in case this is a repeat call
1248         copyAO<T>(_persistent.at(m), _final.at(m));
1249         // Remove the /RAW prefix, if there is one, from the final copy
1250         if ( _final[m]->path().substr(0,4) == "/RAW" )
1251           _final[m]->setPath(_final[m]->path().substr(4));
1252       }
1253     }
1254 
1255     /// Get the set of persistent (i.e. after whole event groups)
1256     /// live objects, as used by Analysis::analyze().
1257     const vector<typename T::Ptr>& persistent() const {
1258       return _persistent;
1259     }
1260 
1261     /// Direct access to the persistent object in weight stream @a iWeight
1262     typename T::Ptr persistent(const size_t iWeight) { return _persistent.at(iWeight); }
1263 
1264     /// Get the set of final analysis objects, as used
1265     /// by Analysis::finalize() and written out
1266     const vector<typename T::Ptr>& final() const {
1267       return _final;
1268     }
1269 
1270     /// Direct access to the finalized object in weight stream @a iWeight
1271     typename T::Ptr final(const size_t iWeight) { return _final.at(iWeight); }
1272 
1273     /// Get the currently active analysis object
1274     YODA::AnalysisObjectPtr activeAO() const { return _active; }
1275 
1276     /// @brief Helper method to resize aux vectors to AO size
1277     void initBootstrap() {
1278       if constexpr( isFillable<T>::value ) {
1279         size_t nPos = 1; // Counter only has a single fill position
1280         if constexpr (!std::is_same<T, YODA::Counter>::value ) { // binned objects
1281           nPos = _persistent.back()->numBins(true, true);
1282         }
1283         _fillOutcomes.resize(nPos);
1284         _fillFractions.resize(nPos);
1285       }
1286     }
1287 
1288     /// @}
1289 
1290   private:
1291 
1292     /// @name Data members
1293     /// @{
1294 
1295     /// M of these, one for each weight
1296     vector<typename T::Ptr> _persistent;
1297 
1298     /// The copy of M-entry _persistent that will be passed to finalize().
1299     vector<typename T::Ptr> _final;
1300 
1301     /// A vector of FillCollectors, one for each subevent
1302     vector<typename FillCollector<T>::Ptr> _evgroup;
1303 
1304     /// The currently active FillCollector (if in analyze)
1305     /// or AO (if in finalize).
1306     typename T::Ptr _active;
1307 
1308     /// The base AO path of this object,
1309     /// without weight-variation suffix.
1310     string _basePath;
1311 
1312     /// The base AO name, without
1313     /// any weight variation suffix.
1314     string _baseName;
1315 
1316     /// @}
1317 
1318   };
1319 
1320 
1321   /// Customised shared pointer of multiplexed AOs,
1322   /// dispatching through two layers of indirection.
1323   ///
1324   /// The customisation is needed in order to dispatch
1325   /// -> and * operators all the way down to the inner
1326   /// YODA analysis objects.
1327   ///
1328   /// @todo Provide remaining functionality that shared_ptr has (not needed right now).
1329   template <typename T>
1330   class MultiplexPtr {
1331 
1332   public:
1333 
1334     using value_type = T;
1335 
1336     MultiplexPtr() = default;
1337 
1338     MultiplexPtr(decltype(nullptr)) : _p(nullptr) { }
1339 
1340     /// Convenience constructor, pass through to the Multiplexer constructor
1341     MultiplexPtr(const vector<string>& weightNames, const typename T::Inner& p)
1342       : _p( make_shared<T>(weightNames, p) ) { }
1343 
1344     // Ensure a shared_ptr<T> can be instantiated from a shared_ptr<U>
1345     template <typename U, typename = decltype(shared_ptr<T>(shared_ptr<U>{}))>
1346     MultiplexPtr(const shared_ptr<U>& p) : _p(p) { }
1347 
1348     // Ensure a shared_ptr<T> can be instantiated from a shared_ptr<U>
1349     template <typename U, typename = decltype(shared_ptr<T>(shared_ptr<U>{}))>
1350     MultiplexPtr(const MultiplexPtr<U>& p) : _p(p.get()) { }
1351 
1352     /// Goes right through to the active Multiplexer<YODA> object's members
1353     T& operator -> () {
1354       if (_p == nullptr) {
1355         throw Error("Dereferencing null AnalysisObject pointer. Is there an unbooked histogram variable?");
1356       }
1357       return *_p;
1358     }
1359 
1360     template <typename U = T>
1361     auto binning() const -> std::enable_if_t<hasBinning<typename T::Inner>::value, const typename U::Inner::BinningT&> {
1362       if (_p == nullptr) {
1363         throw Error("Dereferencing null AnalysisObject pointer. Is there an unbooked histogram variable?");
1364       }
1365       return _p->binning();
1366     }
1367 
1368 
1369     /// Goes right through to the active Multiplexer<YODA> object's members
1370     const T& operator -> () const {
1371       if (_p == nullptr) {
1372         throw Error("Dereferencing null AnalysisObject pointer. Is there an unbooked histogram variable?");
1373       }
1374       return *_p;
1375     }
1376 
1377     /// The active YODA object
1378     typename T::Inner& operator * ()             { return **_p; }
1379     const typename T::Inner& operator * () const { return **_p; }
1380 
1381     /// Object validity check.
1382     explicit operator bool() const { return _p && bool(*_p); }
1383 
1384     /// Object invalidity check.
1385     bool operator ! () const { return !_p || !(*_p);   }
1386 
1387     /// Object validity check.
1388     bool operator == (const MultiplexPtr& other) const {
1389       return _p == other._p;
1390     }
1391 
1392     /// Object invalidity check.
1393     bool operator != (const MultiplexPtr& other) const {
1394       return _p != other._p;
1395     }
1396 
1397     /// Less-than for ptr ordering.
1398     bool operator < (const MultiplexPtr& other) const {
1399       return _p < other._p;
1400     }
1401 
1402     /// Greater-than for ptr ordering.
1403     bool operator > (const MultiplexPtr other) const {
1404       return _p > other._p;
1405     }
1406 
1407     /// Less-equals for ptr ordering.
1408     bool operator <= (const MultiplexPtr& other) const {
1409       return _p <= other._p;
1410     }
1411 
1412     /// Greater-equals for ptr ordering.
1413     bool operator >= (const MultiplexPtr& other) const {
1414       return _p >= other._p;
1415     }
1416 
1417     /// Get the internal shared ptr.
1418     shared_ptr<T> get() const { return _p; }
1419 
1420   private:
1421 
1422     /// The type being wrapped.
1423     shared_ptr<T> _p;
1424 
1425   };
1426 
1427   /// @}
1428 
1429 
1430   /// @name  User-facing analysis object Multiplexers
1431   ///
1432   /// @note Every object listed here needs a virtual fill() method in YODA,
1433   /// otherwise the FillCollector fakery won't work.
1434   ///
1435   /// @{
1436 
1437   using MultiplexAOPtr = MultiplexPtr<MultiplexedAO>;
1438 
1439   template<size_t DbnN, typename... AxisT>
1440   using BinnedDbnPtr = MultiplexPtr<Multiplexer<YODA::BinnedDbn<DbnN, AxisT...>>>;
1441 
1442   template<typename... AxisT>
1443   using BinnedHistoPtr = BinnedDbnPtr<sizeof...(AxisT), AxisT...>;
1444 
1445   template<typename... AxisT>
1446   using BinnedProfilePtr = BinnedDbnPtr<sizeof...(AxisT)+1, AxisT...>;
1447 
1448   template<typename... AxisT>
1449   using BinnedEstimatePtr = MultiplexPtr<Multiplexer<YODA::BinnedEstimate<AxisT...>>>;
1450 
1451   template<size_t N>
1452   using ScatterNDPtr  = MultiplexPtr<Multiplexer<YODA::ScatterND<N>>>;
1453 
1454   using CounterPtr    = MultiplexPtr<Multiplexer<YODA::Counter>>;
1455   using Estimate0DPtr = MultiplexPtr<Multiplexer<YODA::Estimate0D>>;
1456   using Histo1DPtr    = BinnedHistoPtr<double>;
1457   using Histo2DPtr    = BinnedHistoPtr<double,double>;
1458   using Histo3DPtr    = BinnedHistoPtr<double,double,double>;
1459   using Profile1DPtr  = BinnedProfilePtr<double>;
1460   using Profile2DPtr  = BinnedProfilePtr<double,double>;
1461   using Profile3DPtr  = BinnedProfilePtr<double,double,double>;
1462   using Estimate1DPtr = BinnedEstimatePtr<double>;
1463   using Estimate2DPtr = BinnedEstimatePtr<double,double>;
1464   using Estimate3DPtr = BinnedEstimatePtr<double,double,double>;
1465   using Scatter1DPtr  = ScatterNDPtr<1>;
1466   using Scatter2DPtr  = ScatterNDPtr<2>;
1467   using Scatter3DPtr  = ScatterNDPtr<3>;
1468 
1469   using YODA::Counter;
1470   using YODA::Estimate0D;
1471   using YODA::Histo1D;
1472   using YODA::Histo2D;
1473   using YODA::Histo3D;
1474   using YODA::Profile1D;
1475   using YODA::Profile2D;
1476   using YODA::Profile3D;
1477   using YODA::Estimate1D;
1478   using YODA::Estimate2D;
1479   using YODA::Estimate3D;
1480   using YODA::Scatter1D;
1481   using YODA::Scatter2D;
1482   using YODA::Scatter3D;
1483   using YODA::Point1D;
1484   using YODA::Point2D;
1485   using YODA::Point3D;
1486 
1487   ///@}
1488 
1489 
1490   /// @defgroup aomanip Analysis object manipulation functions
1491   /// @{
1492 
1493   inline bool isTmpPath(const std::string& path, const bool tmp_only = false) {
1494     if (tmp_only)  return path.find("/TMP/") != string::npos;
1495     return path.find("/TMP/") != string::npos || path.find("/_") != string::npos;
1496   }
1497 
1498   /// Function to get a map of all the refdata in a paper with the
1499   /// given @a papername.
1500   map<string, YODA::AnalysisObjectPtr> getRefData(const string& papername);
1501 
1502   /// @todo Also provide a Scatter3D getRefData() version?
1503 
1504   /// Get the file system path to the reference file for this paper.
1505   string getDatafilePath(const string& papername);
1506 
1507 
1508   /// Traits class to access the type of the AnalysisObject in the reference files.
1509   /// @todo MIGRATE TO ESTIMATES
1510   template<typename T>
1511   struct ReferenceTraits { };
1512 
1513   template<>
1514   struct ReferenceTraits<YODA::Counter> {
1515     using RefT = YODA::Estimate0D;
1516   };
1517 
1518   template<>
1519   struct ReferenceTraits<YODA::Estimate0D> {
1520     using RefT = YODA::Estimate0D;
1521   };
1522 
1523   template<typename... AxisT>
1524   struct ReferenceTraits<YODA::BinnedEstimate<AxisT...>> {
1525     using RefT = YODA::BinnedEstimate<AxisT...>;
1526   };
1527 
1528   template<size_t DbnN, typename... AxisT>
1529   struct ReferenceTraits<YODA::BinnedDbn<DbnN, AxisT...>> {
1530     using RefT = YODA::ScatterND<sizeof...(AxisT)+1>;
1531   };
1532 
1533   template<size_t N>
1534   struct ReferenceTraits<YODA::ScatterND<N>> {
1535     using RefT = YODA::ScatterND<N>;
1536   };
1537 
1538   /// Check if two analysis objects have the same binning or, if not
1539   /// binned, are in other ways compatible.
1540   template <typename TPtr>
1541   inline bool bookingCompatible(TPtr a, TPtr b) {
1542     return *a == *b;
1543   }
1544   //
1545   inline bool bookingCompatible(CounterPtr, CounterPtr) {
1546     return true;
1547   }
1548   //
1549   inline bool bookingCompatible(YODA::CounterPtr, YODA::CounterPtr) {
1550     return true;
1551   }
1552   //
1553   inline bool bookingCompatible(Estimate0DPtr, Estimate0DPtr) {
1554     return true;
1555   }
1556   //
1557   inline bool bookingCompatible(YODA::Estimate0DPtr, YODA::Estimate0DPtr) {
1558     return true;
1559   }
1560   //
1561   template<size_t N>
1562   inline bool bookingCompatible(ScatterNDPtr<N> a, ScatterNDPtr<N> b) {
1563     return a->numPoints() == b->numPoints();
1564   }
1565   //
1566   template<size_t N>
1567   inline bool bookingCompatible(YODA::ScatterNDPtr<N> a, YODA::ScatterNDPtr<N> b) {
1568     return a->numPoints() == b->numPoints();
1569   }
1570 
1571   inline bool beamInfoCompatible(YODA::AnalysisObjectPtr a, YODA::AnalysisObjectPtr b) {
1572     YODA::BinnedEstimatePtr<string> beamsA = std::dynamic_pointer_cast<YODA::BinnedEstimate<string>>(a);
1573     YODA::BinnedEstimatePtr<string> beamsB = std::dynamic_pointer_cast<YODA::BinnedEstimate<string>>(b);
1574     return  beamsA && beamsB && (*beamsA == *beamsB) && beamsA->numBins() == 2 &&
1575             fuzzyEquals(beamsA->bin(1).val(), beamsB->bin(1).val()) &&
1576             fuzzyEquals(beamsA->bin(2).val(), beamsB->bin(2).val());
1577   }
1578 
1579   /// @}
1580 
1581 
1582 
1583   /// Class representing a YODA path with all its components.
1584   class AOPath {
1585   public:
1586 
1587     /// Constructor
1588     AOPath(string fullpath)
1589       : _valid(false), _path(fullpath), _raw(false), _tmp(false), _ref(false) {
1590       _valid = init(fullpath);
1591     }
1592 
1593     /// The full path.
1594     string path() const { return _path; }
1595 
1596     /// The analysis name.
1597     string analysis() const { return _analysis; }
1598 
1599     /// The analysis name with options.
1600     string analysisWithOptions() const { return _analysis + _optionstring; }
1601 
1602     /// The base name of the analysis object.
1603     string name() const { return _name; }
1604 
1605     /// The weight name.
1606     string weight() const { return _weight; }
1607 
1608     /// The weight component of the path
1609     string weightComponent() const {
1610       if (_weight == "")  return _weight;
1611       return "[" + _weight + "]";
1612     }
1613 
1614     /// Is this a RAW (filling) object?
1615     bool isRaw() const { return _raw; }
1616 
1617     // Is this a temporary (filling) object?
1618     bool isTmp() const { return _tmp; }
1619 
1620     /// Is this a reference object?
1621     bool isRef() const { return _ref; }
1622 
1623     /// The string describing the options passed to the analysis.
1624     string optionString() const { return _optionstring; }
1625 
1626     /// Are there options passed to the analysis?
1627     bool hasOptions() const { return !_options.empty(); }
1628 
1629     /// Don't pass This optionto the analysis
1630     void removeOption(string opt) { _options.erase(opt); fixOptionString(); }
1631 
1632     /// Pass this option to the analysis.
1633     void setOption(string opt, string val) { _options[opt] = val; fixOptionString(); }
1634 
1635     /// Was This option passed to the analyisi.
1636     bool hasOption(string opt) const { return _options.find(opt) != _options.end(); }
1637 
1638     /// Get the value of this option.
1639     string getOption(string opt) const {
1640       auto it = _options.find(opt);
1641       if ( it != _options.end() ) return it->second;
1642       return "";
1643     }
1644 
1645     /// Reset the option string after changes;
1646     void fixOptionString();
1647 
1648     /// Creat a full path (and set) for this.
1649     string mkPath() const;
1650     string setPath() { return _path = mkPath(); }
1651 
1652     /// Print out information
1653     void debug() const;
1654 
1655     /// Make this class ordered.
1656     bool operator<(const AOPath & other) const {
1657       return _path < other._path;
1658     }
1659 
1660     /// Check if path is valid.
1661     bool valid() const { return _valid; };
1662     bool operator!() const { return !valid(); }
1663 
1664   private:
1665 
1666     /// Internal functions for disassembling a path name
1667     bool init(string fullpath);
1668     bool chopweight(string & fullpath);
1669     bool chopoptions(string & anal);
1670 
1671     bool _valid;
1672     string _path;
1673     string _analysis;
1674     string _optionstring;
1675     string _name;
1676     string _weight;
1677     bool _raw;
1678     bool _tmp;
1679     bool _ref;
1680     map<string,string> _options;
1681 
1682   };
1683 
1684 }
1685 
1686 #endif