Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 10:11:00

0001 // @(#)root/tmva $Id$
0002 // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
0003 
0004 /**********************************************************************************
0005  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
0006  * Package: TMVA                                                                  *
0007  * Class  : MethodCFMlpANN_utils                                                  *
0008  *                                             *
0009  *                                                                                *
0010  * Reference for the original FORTRAN version "mlpl3.F":                          *
0011  *      Authors  : J. Proriol and contributions from ALEPH-Clermont-Fd            *
0012  *                 Team members                                                   *
0013  *      Copyright: Laboratoire Physique Corpusculaire                             *
0014  *                 Universite de Blaise Pascal, IN2P3/CNRS                        *
0015  * Description:                                                                   *
0016  *      Utility routine, obtained via f2c from original mlpl3.F FORTRAN routine   *
0017  *                                                                                *
0018  * Authors (alphabetical):                                                        *
0019  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
0020  *      Xavier Prudent  <prudent@lapp.in2p3.fr>  - LAPP, France                   *
0021  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
0022  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
0023  *                                                                                *
0024  * Copyright (c) 2005:                                                            *
0025  *      CERN, Switzerland                                                         *
0026  *      U. of Victoria, Canada                                                    *
0027  *      MPI-K Heidelberg, Germany                                                 *
0028  *      LAPP, Annecy, France                                                      *
0029  *                                                                                *
0030  * Redistribution and use in source and binary forms, with or without             *
0031  * modification, are permitted according to the terms listed in LICENSE           *
0032  * (see tmva/doc/LICENSE)                                          *
0033  **********************************************************************************/
0034 
0035 #ifndef ROOT_TMVA_MethodCFMlpANN_Utils
0036 #define ROOT_TMVA_MethodCFMlpANN_Utils
0037 
0038 #include "TMVA/MethodCFMlpANN_def.h"
0039 #include "TMVA/MsgLogger.h"
0040 
0041 #include "Rtypes.h"
0042 
0043 #include <cstdlib>
0044 //////////////////////////////////////////////////////////////////////////
0045 //                                                                      //
0046 // MethodCFMlpANN_Utils                                                 //
0047 //                                                                      //
0048 // Implementation of Clermond-Ferrand artificial neural network         //
0049 //                                                                      //
0050 //////////////////////////////////////////////////////////////////////////
0051 
0052 namespace TMVA {
0053 
0054    class MethodCFMlpANN_Utils {
0055 
0056    public:
0057 
0058       MethodCFMlpANN_Utils();
0059       virtual ~MethodCFMlpANN_Utils();
0060 
0061    protected:
0062 
0063       void Train_nn( Double_t *tin2, Double_t *tout2, Int_t *ntrain,
0064                      Int_t *ntest, Int_t *nvar2, Int_t *nlayer,
0065                      Int_t *nodes, Int_t *ncycle );
0066 
0067       void Entree_new( Int_t *, char *, Int_t *ntrain, Int_t *ntest,
0068                        Int_t *numlayer, Int_t *nodes, Int_t *numcycle,
0069                        Int_t );
0070 
0071       virtual Int_t DataInterface( Double_t*, Double_t*, Int_t*, Int_t*, Int_t*, Int_t*,
0072                                    Double_t*, Int_t*, Int_t* ) = 0;
0073 
0074       Double_t Fdecroi(Int_t *i__);
0075       Double_t Sen3a(void);
0076 
0077       void  Wini      ();
0078       void  En_avant  (Int_t *ievent);
0079       void  En_avant2 (Int_t *ievent);
0080       void  En_arriere(Int_t *ievent);
0081       void  Leclearn  (Int_t *ktest, Double_t *tout2, Double_t *tin2);
0082       void  Out       (Int_t *iii, Int_t *maxcycle);
0083       void  Cout      (Int_t *, Double_t *xxx);
0084       void  Innit     (char *det, Double_t *tout2, Double_t *tin2, Int_t );
0085       void  TestNN    ();
0086       void  Inl       ();
0087       void  GraphNN   (Int_t *ilearn, Double_t *, Double_t *, char *, Int_t);
0088       void  Foncf     (Int_t *i__, Double_t *u, Double_t *f);
0089       void  Cout2     (Int_t * /*i1*/, Double_t *yyy);
0090       void  Lecev2    (Int_t *ktest, Double_t *tout2, Double_t *tin2);
0091       void  Arret     (const char* mot );
0092       void  CollectVar(Int_t *nvar, Int_t *class__, Double_t *xpg);
0093 
0094    protected:
0095 
0096       Int_t             fg_100;          // constant
0097       Int_t             fg_0;            // constant
0098       static const Int_t       fg_max_nVar_;    // static maximum number of input variables
0099       static const Int_t       fg_max_nNodes_;  // maximum number of nodes per variable
0100       Int_t             fg_999;          // constant
0101       static const char* const fg_MethodName;   // method name for print
0102 
0103       Double_t W_ref(const Double_t wNN[], Int_t a_1, Int_t a_2, Int_t a_3) const {
0104          return wNN [(a_3*max_nNodes_ + a_2)*max_nLayers_ + a_1 - 187];
0105       }
0106       Double_t& W_ref(Double_t wNN[], Int_t a_1, Int_t a_2, Int_t a_3) {
0107          return wNN [((a_3)*max_nNodes_ + (a_2))*max_nLayers_ + a_1 - 187];
0108       }
0109 
0110       Double_t Ww_ref(const Double_t wwNN[], Int_t a_1,Int_t a_2) const {
0111          return wwNN[(a_2)*max_nLayers_ + a_1 - 7];
0112       }
0113       Double_t& Ww_ref(Double_t wwNN[], Int_t a_1,Int_t a_2) {
0114          return wwNN[(a_2)*max_nLayers_ + a_1 - 7];
0115       }
0116 
0117       // ANN training parameters
0118       struct {
0119          Double_t epsmin, epsmax, eeps, eta;
0120          Int_t layerm, lclass, nevl, nblearn, nunilec, nunisor, nunishort, nunap;
0121          Int_t nvar, itest, ndiv, ichoi, ndivis, nevt;
0122       } fParam_1;
0123 
0124       // ANN training results
0125       struct {
0126          Double_t xmax[max_nVar_], xmin[max_nVar_];
0127          Int_t nclass[max_Events_], mclass[max_Events_], iclass;
0128       } fVarn_1;
0129 
0130       // dynamic data table
0131       class VARn2 {
0132       public:
0133          VARn2() : fNevt(0), fNvar(0) {
0134             fxx = nullptr;
0135          }
0136          ~VARn2() {
0137             Delete();
0138          }
0139          void Create( Int_t nevt, Int_t nvar ) {
0140             fNevt = nevt+1; fNvar = nvar+1; // fortran array style 1...N
0141             fxx = new Double_t*[fNevt];
0142             for (Int_t i=0; i<fNevt; i++) fxx[i] = new Double_t[fNvar];
0143          }
0144          Double_t operator=( Double_t val ) { return val; }
0145          Double_t &operator()( Int_t ievt, Int_t ivar ) const {
0146             if (fxx && ievt < fNevt && ivar < fNvar) return fxx[ievt][ivar];
0147             else {
0148                printf( "*** ERROR in varn3_(): fxx is zero pointer ==> abort ***\n") ;
0149                std::exit(1);
0150                return fxx[0][0];
0151             }
0152          }
0153          void Delete( void ) {
0154             if (fxx) for (Int_t i=0; i<fNevt; i++) if (fxx[i]) delete [] fxx[i];
0155             delete[] fxx;
0156             fxx=nullptr;
0157          }
0158 
0159          Double_t** fxx;
0160          Int_t fNevt;
0161          Int_t fNvar;
0162       } fVarn2_1, fVarn3_1;
0163 
0164       // ANN weights
0165       struct {
0166          Double_t x[max_nLayers_*max_nNodes_];
0167          Double_t y[max_nLayers_*max_nNodes_];
0168          Double_t o[max_nNodes_];
0169          Double_t w[max_nLayers_*max_nNodes_*max_nNodes_];
0170          Double_t ww[max_nLayers_*max_nNodes_];
0171          Double_t cut[max_nNodes_];
0172          Double_t deltaww[max_nLayers_*max_nNodes_];
0173          Int_t neuron[max_nLayers_];
0174       } fNeur_1;
0175 
0176       // ANN weights
0177       struct {
0178          Double_t coef[max_nNodes_], temp[max_nLayers_], demin, demax;
0179          Double_t del[max_nLayers_*max_nNodes_];
0180          Double_t delw[max_nLayers_*max_nNodes_*max_nNodes_];
0181          Double_t delta[max_nLayers_*max_nNodes_*max_nNodes_];
0182          Double_t delww[max_nLayers_*max_nNodes_];
0183          Int_t idde;
0184       } fDel_1;
0185 
0186       // flags and stuff (don't ask me...)
0187       struct {
0188          Double_t ancout, tolcou;
0189          Int_t ieps;
0190       } fCost_1;
0191 
0192       void SetLogger(MsgLogger *l) { fLogger = l; }
0193 
0194    private:
0195       MsgLogger * fLogger;
0196       MsgLogger& ULog()  { if (fLogger) return *fLogger; return *(fLogger = new MsgLogger("CFMLP_Utils")); } // avoiding control reaches end of non-void function warning
0197 
0198    public:
0199 
0200       ClassDef(MethodCFMlpANN_Utils,0);  // Implementation of Clermond-Ferrand artificial neural network
0201    };
0202 
0203 } // namespace TMVA
0204 
0205 #endif