Back to home page

EIC code displayed by LXR

 
 

    


Warning, file /include/root/TMVA/ROperator_Reduce.hxx was not indexed or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).

0001 #ifndef TMVA_SOFIE_ROPERATOR_Reduce
0002 #define TMVA_SOFIE_ROPERATOR_Reduce
0003 
0004 #include "TMVA/SOFIE_common.hxx"
0005 #include "TMVA/ROperator.hxx"
0006 #include "TMVA/RModel.hxx"
0007 
0008 #include <memory>
0009 #include <sstream>
0010 #include <algorithm>
0011 #include <stdexcept>
0012 #include <vector>
0013 #include <cassert>
0014 
0015 namespace TMVA{
0016 namespace Experimental{
0017 namespace SOFIE{
0018 
0019 enum EReduceOpMode { ReduceMean, ReduceSum, ReduceSumSquare, ReduceProd, InvalidReduceOp };
0020 
0021 template <typename T, EReduceOpMode Op>
0022 class ROperator_Reduce final : public ROperator
0023 {
0024 private:
0025     /* Attributes*/
0026     bool fInputDimShape = false;
0027     int fkeepdims = 1; //default value
0028     std::vector<int64_t> fAttrAxes;
0029     EReduceOpMode fReduceOpMode;
0030     std::string fNX;
0031     std::string fNAxes;
0032     std::string fNY;
0033     std::vector<Dim> fShapeX;
0034     std::vector<Dim> fShapeY;
0035     std::vector<Dim> fShapeYNotPruned; // needed for fKeepdims=0
0036 
0037 
0038 public:
0039 
0040    std::string Name() {
0041       if (fReduceOpMode == ReduceMean)  return "ReduceMean";
0042       else if (fReduceOpMode == ReduceSumSquare )  return "ReduceSumSquare";
0043       else if (fReduceOpMode == ReduceProd ) return "ReduceProd";
0044       else if (fReduceOpMode == ReduceSum) return "ReduceSum";
0045       return "Invalid";
0046    }
0047 
0048    ROperator_Reduce(){}
0049    ROperator_Reduce(int keepdims, std::vector<int64_t> attrAxes, std::string nameX, std::string nameAxes, std::string nameY):
0050    fkeepdims(keepdims), fAttrAxes(attrAxes), fNX(UTILITY::Clean_name(nameX)), fNAxes(UTILITY::Clean_name(nameAxes)), fNY(UTILITY::Clean_name(nameY)) {
0051       fReduceOpMode = Op;
0052 
0053       fInputTensorNames = { fNX };
0054       if(!fNAxes.empty()){
0055          fInputTensorNames.emplace_back(fNAxes);
0056       }
0057 
0058       fOutputTensorNames = { fNY };
0059    }
0060 
0061    // shape of output tensors given input tensors
0062    std::vector<Dim> DoShapeInference(const std::vector<Dim> &  input)  {
0063       auto ret = input; //suggest copy to compiler
0064       auto & outputShape = ret;
0065       for (size_t j = 0; j < fAttrAxes.size(); j++) {
0066          if (fAttrAxes[j] < 0) fAttrAxes[j] += outputShape.size();
0067          if (fAttrAxes[j] < 0 || (size_t) fAttrAxes[j] >= outputShape.size() )
0068             throw std::runtime_error("TMVA SOFIE Reduce Op - invalid axes values " + std::to_string(fAttrAxes[j]));
0069          // set to 1 the reduced dims
0070          outputShape[fAttrAxes[j]] = Dim{1};
0071       }
0072       fShapeYNotPruned = outputShape;
0073       // in case of pruning dimension we need to sort axes attributes
0074       if (fkeepdims == 0) {
0075          auto ax = fAttrAxes;
0076          std::sort(ax.begin(), ax.end());
0077          for (size_t j = 0; j < ax.size(); j++) {
0078             // erase reduced dimensions, but keep last one
0079             if (outputShape.size() > 1) {
0080                outputShape.erase(outputShape.begin() + ax[j]);
0081                for (size_t k = j+1; k < ax.size(); k++)
0082                   ax[k] -= 1;  // decrease by one since we have removed a value
0083             }
0084          }
0085       }
0086       return ret;
0087    }
0088    void Initialize(RModel& model) override {
0089 
0090       fUseSession = model.UseSession();
0091 
0092       if (!model.CheckIfTensorAlreadyExist(fNX)) {
0093          // input must be a graph input, or already initialized intermediate tensor
0094          throw std::runtime_error("TMVA SOFIE Reduce Op Input Tensor " + fNX + " is not found in model");
0095       }
0096       fShapeX = model.GetDimTensorShape(fNX);
0097       if (model.IsDynamicTensor(fNX))
0098          fInputDimShape = true;
0099       // check if tensor with axes is provided
0100       if (!fNAxes.empty()) {
0101          auto ax_shptr = model.GetInitializedTensorData(fNAxes);
0102          auto ax_ptr = static_cast<int64_t *>(ax_shptr.get());
0103          auto ax_shape = model.GetTensorShape(fNAxes);
0104          size_t ax_length = ConvertShapeToLength(ax_shape);
0105          fAttrAxes = std::vector<int64_t>(ax_ptr, ax_ptr+ax_length);
0106       } else if (fAttrAxes.empty()) {
0107          // in case no axes is passed assume full reduction
0108          fAttrAxes.resize(fShapeX.size());
0109          for (size_t i = 0; i < fAttrAxes.size(); i++)
0110             fAttrAxes[i] = i;
0111       }
0112       // find shape of Y and add it in the list of intermediate tensors
0113       fShapeY = DoShapeInference(fShapeX);
0114       model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY);
0115       if (model.Verbose()){
0116          std::cout << Name() << " : " << fNX << " -> " << fNY << " shape " << ConvertShapeToString(fShapeY) << std::endl;
0117       }
0118       model.AddNeededStdLib("algorithm");
0119    }
0120 
0121    std::string Generate(std::string opName) override {
0122       opName = "op_" + opName;
0123       if (fShapeX.empty() || fShapeY.empty()) {
0124          throw std::runtime_error("TMVA SOFIE Reduce Op called to Generate without being initialized first");
0125       }
0126 
0127       auto inputLength = TMVA::Experimental::SOFIE::ConvertDimShapeToLength(fShapeX);
0128       auto outputLength = TMVA::Experimental::SOFIE::ConvertDimShapeToLength(fShapeY);
0129 
0130       auto inputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeX);
0131       // output stride (or not pruned vector)
0132       auto outputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeYNotPruned);
0133 
0134       // write here according to size of shape
0135       // in generation code can be done automatically
0136       // i0 =  i / stride0  % shape0; i1 = i / stride1 % shape1 and so on
0137       // and we have for the inverse
0138       // i = i0 * s0 + i1 * s1 + i2 * s2 + i3 * s3 ....
0139 
0140       // don't need to divide by last stride s[n-1] since it is 1 by definition
0141 
0142       std::stringstream out;
0143       out << "\n//----  operator " << Name() << "  " << opName << "\n";
0144       // check where is reduced axes are first or last one. In these case we can do a faster implementation
0145       enum EReduceDim {kFirst, kLast, kMiddle};
0146       EReduceDim reduceDims = kLast;
0147       int kmin = fShapeX.size()-fAttrAxes.size();
0148       for (int k = fShapeX.size()-1; k >= kmin; k--) {
0149          // if k is not a reduced axis is not last ones
0150          if (std::find(fAttrAxes.begin(), fAttrAxes.end(), k) == fAttrAxes.end()) {
0151             reduceDims = kMiddle;
0152             break;
0153          }
0154       }
0155       if (reduceDims == kMiddle) {
0156          reduceDims = kFirst;
0157          // check if at the beginning
0158          for (size_t k = 0; k < fAttrAxes.size(); k++) {
0159             // if k is not a reduced axis is not first ones
0160             if (std::find(fAttrAxes.begin(), fAttrAxes.end(), k) == fAttrAxes.end()) {
0161                reduceDims = kMiddle;
0162                break;
0163             }
0164          }
0165       }
0166       std::string reducedLength;
0167       if (fInputDimShape) {
0168          reducedLength = "reducedLength_" + opName;
0169          out << SP << "size_t " << reducedLength << " = " <<  inputLength << " / " << outputLength << ";\n";
0170       } else {
0171          int rLength = std::stoi(inputLength) / std::stoi(outputLength);
0172          reducedLength = std::to_string(rLength);
0173       }
0174       if (reduceDims == kLast) {
0175          //std::cout << "reduction for operator " << opName << " is last" << std::endl;
0176          // new faster implementation using a single loop
0177          // faster to loop first on reduced dimension and then output
0178          // reset output tensors
0179 
0180          // loop on output dimensions
0181          out << SP << "for (size_t i = 0; i < " << outputLength << "; i++) {\n";
0182          // loop on reduce dimensions
0183          std::string startingValue = (fReduceOpMode == ReduceProd) ? "1" : "0";
0184          out << SP << SP << "tensor_" << fNY << "[i] = " << startingValue << ";\n";
0185          out << SP << SP << "for (size_t j = 0; j < " << reducedLength << "; j++) {\n";
0186 
0187          if (fReduceOpMode == ReduceProd)
0188             out << SP << SP << SP <<  "tensor_" << fNY << "[i] *= tensor_" << fNX << "[i * " << reducedLength << " + j];\n";
0189          else if (fReduceOpMode == ReduceSum || fReduceOpMode == ReduceMean)
0190             out << SP << SP << SP <<  "tensor_" << fNY << "[i] += tensor_" << fNX << "[i * " << reducedLength << " + j];\n";
0191          else if(fReduceOpMode == ReduceSumSquare)
0192             out << SP << SP << SP <<  "tensor_" << fNY << "[i] += tensor_" << fNX << "[i * " << reducedLength << " + j] * tensor_"
0193                                     << fNX << "[i * " << reducedLength << " + j];\n";
0194          out << SP << SP << "}\n"; // end j loop
0195          if(fReduceOpMode == ReduceMean)
0196             out << SP << SP << "tensor_" << fNY << "[i] /= static_cast<float>(" << reducedLength << ");\n";
0197 
0198          out << SP << "}\n"; // end i loop
0199       } else if (reduceDims == kFirst) {
0200          //std::cout << "reduction for operator " << opName << " is first" << std::endl;
0201          // case reduction is at beginning
0202          // reset output tensors
0203          if (fReduceOpMode == ReduceProd)
0204             out << SP << "std::fill(tensor_" << fNY <<", tensor_"<< fNY <<" + "<< outputLength << ", 1);\n";
0205          else
0206             out << SP << "std::fill(tensor_" << fNY <<", tensor_"<< fNY <<" + "<< outputLength << ", 0);\n";
0207 
0208          out << SP << "for (size_t i = 0; i < " << reducedLength << "; i++) {\n";
0209          out << SP << SP << "for (size_t j = 0; j < " << outputLength << "; j++) {\n";
0210 
0211          if (fReduceOpMode == ReduceProd)
0212             out << SP << SP << SP << "tensor_" << fNY << "[j] *= tensor_" << fNX << "[i * " << outputLength << " + j];\n";
0213          else if (fReduceOpMode == ReduceSum || fReduceOpMode == ReduceMean)
0214             out << SP << SP << SP << "tensor_" << fNY << "[j] += tensor_" << fNX << "[i * " << outputLength << " + j];\n";
0215          else if(fReduceOpMode == ReduceSumSquare)
0216             out << SP << SP << SP << "tensor_" << fNY << "[j] += tensor_" << fNX << "[i * " << outputLength << " + j] * tensor_"
0217                                     << fNX << "[i * " << outputLength << " + j];\n";
0218          out << SP << SP << "}\n"; // end j loop
0219          out << SP  << "}\n"; // end i loop
0220          if(fReduceOpMode == ReduceMean) {
0221             out << SP  << "for (size_t j = 0; i < " << outputLength << "; j++) {\n";
0222             out << SP << SP << "tensor_" << fNY << "[j] /= static_cast<float>(" << reducedLength << ");\n";
0223             out << SP << "}\n"; // end j loop
0224          }
0225       }
0226       else
0227       { // standard case
0228          //std::cout << "reduction for operator " << opName << " is middle" << std::endl;
0229          // reset output tensors
0230          if (fReduceOpMode == ReduceProd)
0231             out << SP << "std::fill(tensor_" << fNY <<", tensor_"<< fNY <<" + "<< outputLength << ", 1);\n";
0232          else
0233             out << SP << "std::fill(tensor_" << fNY <<", tensor_"<< fNY <<" + "<< outputLength << ",0);\n";
0234 
0235          out << SP << "for (size_t i = 0; i < " << inputLength << "; i++) {\n";
0236 
0237          size_t dim = fShapeX.size(); // this is the input dimension (e.g. 2, 3 or 4 or more)
0238 
0239          // here we find output index
0240          out << SP << SP << "size_t outputIndex = 0;\n";
0241          for (size_t k = 0; k < dim; k++) {
0242             if (std::find(fAttrAxes.begin(), fAttrAxes.end(), k) == fAttrAxes.end()) {
0243                // do for not reducing axes
0244                out << SP << SP << "size_t i_" << k << " = i / " << inputStrides[k] << " % " << fShapeX[k] << ";\n";
0245                out << SP << SP << "outputIndex += i_" << k << " * " << outputStrides[k] << ";\n";
0246             }
0247          }
0248          // now compute reduction
0249          out << SP << SP << "// compute reduction....\n";
0250          if (fReduceOpMode == ReduceProd)
0251             out << SP << SP << "tensor_" << fNY << "[outputIndex] *= tensor_" << fNX << "[i];\n";
0252          else if (fReduceOpMode == ReduceSum || fReduceOpMode == ReduceMean)
0253             out << SP << SP << "tensor_" << fNY << "[outputIndex] += tensor_" << fNX << "[i];\n";
0254          else if (fReduceOpMode == ReduceSumSquare) {
0255             out << SP << SP << "tensor_" << fNY << "[outputIndex] += tensor_" << fNX << "[i] * tensor_" << fNX
0256                 << "[i];\n";
0257          }
0258          out << SP << "}\n"; // end loop on input elements
0259          // normalize for reduced mean
0260          if (fReduceOpMode == ReduceMean) {
0261             out << SP << "for (size_t i = 0; i < " << outputLength << "; i++) {\n";
0262             out << SP << SP << "tensor_" << fNY << "[i] /= static_cast<float>(" << reducedLength << ");\n";
0263             out << SP << "}\n";
0264          }
0265       }
0266 
0267       return out.str();
0268    }
0269 
0270 };
0271 
0272 }//SOFIE
0273 }//Experimental
0274 }//TMVA
0275 
0276 
0277 #endif //TMVA_SOFIE_ROPERATOR_Reduce
0278