Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-18 10:11:00

0001 // @(#)root/tmva $Id$
0002 // Author: Andreas Hoecker, Joerg Stelzer, Helge Voss, Kai Voss
0003 
0004 /**********************************************************************************
0005  * Project: TMVA - a Root-integrated toolkit for multivariate data analysis       *
0006  * Package: TMVA                                                                  *
0007  * Class  : MethodCFMlpANN                                                        *
0008  *                                             *
0009  *                                                                                *
0010  * Description:                                                                   *
0011  *      Interface for Clermond-Ferrand artificial neural network.                 *
0012  *      The ANN code has been translated from FORTRAN77 (f2c);                    *
0013  *      see files: MethodCFMlpANN_f2c_mlpl3.cpp                                   *
0014  *                 MethodCFMlpANN_f2c_datacc.cpp                                  *
0015  *                                                                                *
0016  *      --------------------------------------------------------------------      *
0017  *      Reference for the original FORTRAN version:                               *
0018  *           Authors  : J. Proriol and contributions from ALEPH-Clermont-Fd       *
0019  *                      Team members. Contact : gaypas@afal11.cern.ch             *
0020  *                                                                                *
0021  *           Copyright: Laboratoire Physique Corpusculaire                        *
0022  *                      Universite de Blaise Pascal, IN2P3/CNRS                   *
0023  *      --------------------------------------------------------------------      *
0024  *                                                                                *
0025  * Usage: options are given through Factory:                                      *
0026  *            factory->BookMethod( "MethodCFMlpANN", OptionsString );             *
0027  *                                                                                *
0028  *        where:                                                                  *
0029  *            TString OptionsString = "n_training_cycles:n_hidden_layers"         *
0030  *                                                                                *
0031  *        default is:  n_training_cycles = 5000, n_layers = 4                     *
0032  *        note that the number of hidden layers in the NN is                      *
0033  *                                                                                *
0034  *            n_hidden_layers = n_layers - 2                                      *
0035  *                                                                                *
0036  *        since there is one input and one output layer. The number of            *
0037  *        nodes (neurons) is predefined to be                                     *
0038  *                                                                                *
0039  *           n_nodes[i] = nvars + 1 - i (where i=1..n_layers)                     *
0040  *                                                                                *
0041  *        with nvars being the number of variables used in the NN.                *
0042  *        Hence, the default case is: n_neurons(layer 1 (input)) : nvars          *
0043  *                                    n_neurons(layer 2 (hidden)): nvars-1        *
0044  *                                    n_neurons(layer 3 (hidden)): nvars-1        *
0045  *                                    n_neurons(layer 4 (out))   : 2              *
0046  *                                                                                *
0047  *        This artificial neural network usually needs a relatively large         *
0048  *        number of cycles to converge (8000 and more). Overtraining can          *
0049  *        be efficiently tested by comparing the signal and background            *
0050  *        output of the NN for the events that were used for training and         *
0051  *        an independent data sample (with equal properties). If the separation   *
0052  *        performance is significantly better for the training sample, the        *
0053  *        NN interprets statistical effects, and is hence overtrained. In         *
0054  *        this case, the number of cycles should be reduced, or the size          *
0055  *        of the training sample increased.                                       *
0056  *                                                                                *
0057  * Authors (alphabetical):                                                        *
0058  *      Andreas Hoecker <Andreas.Hocker@cern.ch> - CERN, Switzerland              *
0059  *      Xavier Prudent  <prudent@lapp.in2p3.fr>  - LAPP, France                   *
0060  *      Helge Voss      <Helge.Voss@cern.ch>     - MPI-K Heidelberg, Germany      *
0061  *      Kai Voss        <Kai.Voss@cern.ch>       - U. of Victoria, Canada         *
0062  *                                                                                *
0063  * Copyright (c) 2005:                                                            *
0064  *      CERN, Switzerland                                                         *
0065  *      U. of Victoria, Canada                                                    *
0066  *      MPI-K Heidelberg, Germany                                                 *
0067  *      LAPP, Annecy, France                                                      *
0068  *                                                                                *
0069  * Redistribution and use in source and binary forms, with or without             *
0070  * modification, are permitted according to the terms listed in LICENSE           *
0071  * (see tmva/doc/LICENSE)                                          *
0072  *                                                                                *
0073  **********************************************************************************/
0074 
0075 #ifndef ROOT_TMVA_MethodCFMlpANN
0076 #define ROOT_TMVA_MethodCFMlpANN
0077 
0078 //////////////////////////////////////////////////////////////////////////
0079 //                                                                      //
0080 // MethodCFMlpANN                                                       //
0081 //                                                                      //
0082 // Interface for Clermond-Ferrand artificial neural network             //
0083 //                                                                      //
0084 //////////////////////////////////////////////////////////////////////////
0085 
0086 #include <iosfwd>
0087 #include <vector>
0088 
0089 #include "TMVA/MethodBase.h"
0090 #include "TMVA/MethodCFMlpANN_Utils.h"
0091 #include "TMatrixF.h"
0092 
0093 namespace TMVA {
0094 
0095    class MethodCFMlpANN : public MethodBase, MethodCFMlpANN_Utils {
0096 
0097    public:
0098 
0099       MethodCFMlpANN( const TString& jobName,
0100                       const TString& methodTitle,
0101                       DataSetInfo& theData,
0102                       const TString& theOption = "3000:N-1:N-2");
0103 
0104       MethodCFMlpANN( DataSetInfo& theData,
0105                       const TString& theWeightFile);
0106 
0107       virtual ~MethodCFMlpANN( void );
0108 
0109       virtual Bool_t HasAnalysisType( Types::EAnalysisType type, UInt_t numberClasses, UInt_t /*numberTargets*/ );
0110 
0111       // training method
0112       void Train( void );
0113 
0114       using MethodBase::ReadWeightsFromStream;
0115 
0116       // write weights to file
0117       void AddWeightsXMLTo( void* parent ) const;
0118 
0119       // read weights from file
0120       void ReadWeightsFromStream( std::istream& istr );
0121       void ReadWeightsFromXML( void* wghtnode );
0122       // calculate the MVA value
0123       Double_t GetMvaValue( Double_t* err = nullptr, Double_t* errUpper = nullptr );
0124 
0125       // data accessors for external functions
0126       Double_t GetData ( Int_t isel, Int_t ivar ) const { return (*fData)(isel, ivar); }
0127       Int_t    GetClass( Int_t ivar             ) const { return (*fClass)[ivar]; }
0128 
0129 
0130       // ranking of input variables
0131       const Ranking* CreateRanking() { return nullptr; }
0132 
0133    protected:
0134 
0135       // make ROOT-independent C++ class for classifier response (classifier-specific implementation)
0136       void MakeClassSpecific( std::ostream&, const TString& ) const;
0137 
0138       // header and auxiliary classes
0139       void MakeClassSpecificHeader( std::ostream&, const TString& = "" ) const;
0140 
0141       // get help message text
0142       void GetHelpMessage() const;
0143 
0144       Int_t DataInterface( Double_t*, Double_t*, Int_t*, Int_t*, Int_t*, Int_t*,
0145                            Double_t*, Int_t*, Int_t* );
0146 
0147    private:
0148 
0149       void PrintWeights( std::ostream & o ) const;
0150 
0151       // the option handling methods
0152       void DeclareOptions();
0153       void ProcessOptions();
0154 
0155       // LUTs
0156       TMatrixF       *fData;     // the (data,var) string
0157       std::vector<Int_t> *fClass;    // the event class (1=signal, 2=background)
0158 
0159       Int_t         fNlayers;   // number of layers (including input and output layers)
0160       Int_t         fNcycles;   // number of training cycles
0161       Int_t*        fNodes;     // number of nodes per layer
0162 
0163       // additional member variables for the independent NN::Evaluation phase
0164       Double_t**    fYNN;       // weights
0165       TString       fLayerSpec; // the hidden layer specification string
0166       Int_t MethodCFMlpANN_nsel;
0167 
0168       // auxiliary member functions
0169       Double_t EvalANN( std::vector<Double_t>&, Bool_t& isOK );
0170       void     NN_ava ( Double_t* );
0171       Double_t NN_fonc( Int_t, Double_t ) const;
0172 
0173       // default initialisation
0174       void Init( void );
0175 
0176       ClassDef(MethodCFMlpANN,0); // Interface for Clermond-Ferrand artificial neural network
0177    };
0178 
0179 } // namespace TMVA
0180 
0181 #endif