Back to home page

EIC code displayed by LXR

 
 

    


File indexing completed on 2025-01-30 10:23:01

0001 #ifndef TMVA_SOFIE_ROPERATOR_Reduce
0002 #define TMVA_SOFIE_ROPERATOR_Reduce
0003 
0004 #include "TMVA/SOFIE_common.hxx"
0005 #include "TMVA/ROperator.hxx"
0006 #include "TMVA/RModel.hxx"
0007 
0008 #include <memory>
0009 #include <sstream>
0010 #include <algorithm>
0011 #include <stdexcept>
0012 #include <vector>
0013 #include <cassert>
0014 
0015 namespace TMVA{
0016 namespace Experimental{
0017 namespace SOFIE{
0018 
0019 enum EReduceOpMode { ReduceMean, ReduceSum, ReduceSumsquare, ReduceProd, InvalidReduceOp };
0020 
0021 template <typename T, EReduceOpMode Op>
0022 class ROperator_Reduce final : public ROperator
0023 {
0024 private:
0025     /* Attributes*/
0026     int fkeepdims = 1; //default value
0027     int fAttrAxes;
0028     EReduceOpMode fReduceOpMode;
0029     std::string fNX;
0030     std::string fNY;
0031     std::vector<size_t> fShapeX;
0032     std::vector<size_t> fShapeY;
0033 
0034 
0035 public:
0036 
0037    std::string Name() {
0038       if (fReduceOpMode == ReduceMean)  return "ReduceMean";
0039       else if (fReduceOpMode == ReduceSumsquare )  return "ReduceSumsquare";
0040       else if (fReduceOpMode == ReduceProd ) return "ReduceProd";
0041       else if (fReduceOpMode == ReduceSum) return "ReduceSum";
0042       return "Invalid";
0043    }
0044 
0045    ROperator_Reduce(){}
0046    ROperator_Reduce(int keepdims,int attrAxes,std::string nameX, std::string nameY):
0047    fkeepdims(keepdims), fAttrAxes(attrAxes), fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY)) {
0048       fReduceOpMode = Op;
0049    }
0050 
0051    // type of output given input
0052    std::vector<ETensorType> TypeInference(std::vector<ETensorType> input){
0053       return input;
0054    }
0055 
0056    // shape of output tensors given input tensors
0057    std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input){
0058       auto ret = input; //suggest copy to compiler
0059       ret[0][fAttrAxes] = 1;
0060       return ret;
0061    }
0062     void Initialize(RModel& model){
0063 
0064         fUseSession = model.UseSession();
0065 
0066         if (model.CheckIfTensorAlreadyExist(fNX) == false){   //input must be a graph input, or already initialized intermediate tensor
0067             throw std::runtime_error("TMVA SOFIE Reduce Op Input Tensor " + fNX + " is not found in model");
0068         }
0069         fShapeX = model.GetTensorShape(fNX);
0070          // find shape of Y and add it in the list of intermediate tensors
0071          fShapeY = ShapeInference({fShapeX})[0];
0072          model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY);
0073 
0074     }
0075 
0076     std::string Generate(std::string OpName){
0077       OpName = "op_" + OpName;
0078       if (fShapeX.empty() || fShapeY.empty()) {
0079          throw std::runtime_error("TMVA SOFIE Reduce Op called to Generate without being initialized first");
0080       }
0081 
0082       size_t outputLength = TMVA::Experimental::SOFIE::ConvertShapeToLength(fShapeY);
0083 
0084       auto inputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeX);
0085       auto outputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeY);
0086 
0087       // write here according to size of shape
0088       // in generation code can be done automatically
0089       // i0 =  i / s0 ; i1 = (i % s0) / s1 ; i2 = ( (i % s0) % s1 ) / s2 and so on
0090       // and we have for the inverse
0091       // i = i0 * s0 + i1 * s1 + i2 * s2 + i3 * s3 ....
0092 
0093       // don't need to divide by last stride s[n-1] since it is 1 by definition
0094 
0095       std::stringstream out;
0096       out << "\n//----  operator " << Name() << "  " << OpName << "\n";
0097       out << SP << "for (size_t i = 0; i < " << outputLength << "; i++) {\n";
0098 
0099       size_t dim = fShapeX.size();   // this is the input dimension (e.g. 2, 3 or 4 or more)
0100 
0101       // here we find output indices
0102       out << SP << SP << "size_t idx_0 = i / " << outputStrides[0] << ";\n" ;
0103       out << SP << SP << "size_t itmp = i;\n";
0104       for (size_t k = 1; k < dim; k++) {
0105          out << SP << SP << "itmp = itmp % " << outputStrides[k-1] << ";\n" ;
0106          if (k < dim-1)
0107             out << SP << SP << "size_t idx_" << k << " = itmp / " << outputStrides[k] << ";\n" ;
0108          else
0109            // to avoid division by 1 which is outputStrides[dim-1]
0110            out << SP << SP << "size_t idx_" << k << " = itmp;\n";
0111       }
0112 
0113       // compute reduction
0114 
0115       if(fReduceOpMode == ReduceProd)
0116          out << SP << SP << "float sum = 1;\n";
0117       else 
0118          out << SP << SP << "float sum = 0;\n";
0119       
0120       out << SP << SP << "for (size_t k = 0; k < " << fShapeX[fAttrAxes] <<"; k++) { \n";
0121       out << SP << SP << SP << "idx_" << fAttrAxes << " = k;\n";
0122        // compute input index j
0123       out << SP << SP << SP << "size_t l = ";
0124       for(int n = dim-1; n >=0; n--) {
0125          if (n == int(dim-1))
0126             out << "idx_" << n;
0127          else
0128             out << " + " << "idx_" << n << " * " << inputStrides[n];
0129       }
0130       out << ";\n";
0131 
0132       if(fReduceOpMode == ReduceMean){
0133          out << SP << SP << SP << "sum += tensor_" << fNX << "[l];\n";
0134          out << SP << SP << "}\n";
0135          out << SP << SP << "float reduceResult = sum/static_cast<float>(" << fShapeX[fAttrAxes] << ");\n";
0136       }
0137       else if(fReduceOpMode == ReduceSum){
0138          out << SP << SP << SP << "sum += tensor_" << fNX << "[l];\n";
0139          out << SP << SP << "}\n";
0140          out << SP << SP << "float reduceResult = sum;\n";
0141       }
0142       else if(fReduceOpMode == ReduceSumsquare){
0143          out << SP << SP << SP << "sum += tensor_" << fNX << "[l] * tensor_" << fNX << "[l];\n";
0144          out << SP << SP << "}\n";
0145          out << SP << SP << "float reduceResult = sum;\n";
0146       }
0147       else if(fReduceOpMode == ReduceProd){
0148          out << SP << SP << SP << "sum *= tensor_" << fNX << "[l];\n";
0149          out << SP << SP << "}\n";
0150          out << SP << SP << "float reduceResult = sum;\n";
0151       }
0152 
0153       out << SP << SP << "tensor_" << fNY << "[i] = reduceResult;\n";
0154       out << SP << "}\n";
0155       return out.str();
0156    }
0157 
0158 };
0159 
0160 }//SOFIE
0161 }//Experimental
0162 }//TMVA
0163 
0164 
0165 #endif //TMVA_SOFIE_ROPERATOR_Reduce
0166