Warning, file /include/root/TMVA/ROperator_Reduce.hxx was not indexed
or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).
0001 #ifndef TMVA_SOFIE_ROPERATOR_Reduce
0002 #define TMVA_SOFIE_ROPERATOR_Reduce
0003
0004 #include "TMVA/SOFIE_common.hxx"
0005 #include "TMVA/ROperator.hxx"
0006 #include "TMVA/RModel.hxx"
0007
0008 #include <memory>
0009 #include <sstream>
0010 #include <algorithm>
0011 #include <stdexcept>
0012 #include <vector>
0013 #include <cassert>
0014
0015 namespace TMVA{
0016 namespace Experimental{
0017 namespace SOFIE{
0018
0019 enum EReduceOpMode { ReduceMean, ReduceSum, ReduceSumSquare, ReduceProd, InvalidReduceOp };
0020
0021 template <typename T, EReduceOpMode Op>
0022 class ROperator_Reduce final : public ROperator
0023 {
0024 private:
0025
0026 bool fInputDimShape = false;
0027 int fkeepdims = 1;
0028 std::vector<int64_t> fAttrAxes;
0029 EReduceOpMode fReduceOpMode;
0030 std::string fNX;
0031 std::string fNAxes;
0032 std::string fNY;
0033 std::vector<Dim> fShapeX;
0034 std::vector<Dim> fShapeY;
0035 std::vector<Dim> fShapeYNotPruned;
0036
0037
0038 public:
0039
0040 std::string Name() {
0041 if (fReduceOpMode == ReduceMean) return "ReduceMean";
0042 else if (fReduceOpMode == ReduceSumSquare ) return "ReduceSumSquare";
0043 else if (fReduceOpMode == ReduceProd ) return "ReduceProd";
0044 else if (fReduceOpMode == ReduceSum) return "ReduceSum";
0045 return "Invalid";
0046 }
0047
0048 ROperator_Reduce(){}
0049 ROperator_Reduce(int keepdims, std::vector<int64_t> attrAxes, std::string nameX, std::string nameAxes, std::string nameY):
0050 fkeepdims(keepdims), fAttrAxes(attrAxes), fNX(UTILITY::Clean_name(nameX)), fNAxes(UTILITY::Clean_name(nameAxes)), fNY(UTILITY::Clean_name(nameY)) {
0051 fReduceOpMode = Op;
0052
0053 fInputTensorNames = { fNX };
0054 if(!fNAxes.empty()){
0055 fInputTensorNames.emplace_back(fNAxes);
0056 }
0057
0058 fOutputTensorNames = { fNY };
0059 }
0060
0061
0062 std::vector<Dim> DoShapeInference(const std::vector<Dim> & input) {
0063 auto ret = input;
0064 auto & outputShape = ret;
0065 for (size_t j = 0; j < fAttrAxes.size(); j++) {
0066 if (fAttrAxes[j] < 0) fAttrAxes[j] += outputShape.size();
0067 if (fAttrAxes[j] < 0 || (size_t) fAttrAxes[j] >= outputShape.size() )
0068 throw std::runtime_error("TMVA SOFIE Reduce Op - invalid axes values " + std::to_string(fAttrAxes[j]));
0069
0070 outputShape[fAttrAxes[j]] = Dim{1};
0071 }
0072 fShapeYNotPruned = outputShape;
0073
0074 if (fkeepdims == 0) {
0075 auto ax = fAttrAxes;
0076 std::sort(ax.begin(), ax.end());
0077 for (size_t j = 0; j < ax.size(); j++) {
0078
0079 if (outputShape.size() > 1) {
0080 outputShape.erase(outputShape.begin() + ax[j]);
0081 for (size_t k = j+1; k < ax.size(); k++)
0082 ax[k] -= 1;
0083 }
0084 }
0085 }
0086 return ret;
0087 }
0088 void Initialize(RModel& model) override {
0089
0090 fUseSession = model.UseSession();
0091
0092 if (!model.CheckIfTensorAlreadyExist(fNX)) {
0093
0094 throw std::runtime_error("TMVA SOFIE Reduce Op Input Tensor " + fNX + " is not found in model");
0095 }
0096 fShapeX = model.GetDimTensorShape(fNX);
0097 if (model.IsDynamicTensor(fNX))
0098 fInputDimShape = true;
0099
0100 if (!fNAxes.empty()) {
0101 auto ax_shptr = model.GetInitializedTensorData(fNAxes);
0102 auto ax_ptr = static_cast<int64_t *>(ax_shptr.get());
0103 auto ax_shape = model.GetTensorShape(fNAxes);
0104 size_t ax_length = ConvertShapeToLength(ax_shape);
0105 fAttrAxes = std::vector<int64_t>(ax_ptr, ax_ptr+ax_length);
0106 } else if (fAttrAxes.empty()) {
0107
0108 fAttrAxes.resize(fShapeX.size());
0109 for (size_t i = 0; i < fAttrAxes.size(); i++)
0110 fAttrAxes[i] = i;
0111 }
0112
0113 fShapeY = DoShapeInference(fShapeX);
0114 model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY);
0115 if (model.Verbose()){
0116 std::cout << Name() << " : " << fNX << " -> " << fNY << " shape " << ConvertShapeToString(fShapeY) << std::endl;
0117 }
0118 model.AddNeededStdLib("algorithm");
0119 }
0120
0121 std::string Generate(std::string opName) override {
0122 opName = "op_" + opName;
0123 if (fShapeX.empty() || fShapeY.empty()) {
0124 throw std::runtime_error("TMVA SOFIE Reduce Op called to Generate without being initialized first");
0125 }
0126
0127 auto inputLength = TMVA::Experimental::SOFIE::ConvertDimShapeToLength(fShapeX);
0128 auto outputLength = TMVA::Experimental::SOFIE::ConvertDimShapeToLength(fShapeY);
0129
0130 auto inputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeX);
0131
0132 auto outputStrides = TMVA::Experimental::SOFIE::UTILITY::ComputeStrideFromShape(fShapeYNotPruned);
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142 std::stringstream out;
0143 out << "\n//---- operator " << Name() << " " << opName << "\n";
0144
0145 enum EReduceDim {kFirst, kLast, kMiddle};
0146 EReduceDim reduceDims = kLast;
0147 int kmin = fShapeX.size()-fAttrAxes.size();
0148 for (int k = fShapeX.size()-1; k >= kmin; k--) {
0149
0150 if (std::find(fAttrAxes.begin(), fAttrAxes.end(), k) == fAttrAxes.end()) {
0151 reduceDims = kMiddle;
0152 break;
0153 }
0154 }
0155 if (reduceDims == kMiddle) {
0156 reduceDims = kFirst;
0157
0158 for (size_t k = 0; k < fAttrAxes.size(); k++) {
0159
0160 if (std::find(fAttrAxes.begin(), fAttrAxes.end(), k) == fAttrAxes.end()) {
0161 reduceDims = kMiddle;
0162 break;
0163 }
0164 }
0165 }
0166 std::string reducedLength;
0167 if (fInputDimShape) {
0168 reducedLength = "reducedLength_" + opName;
0169 out << SP << "size_t " << reducedLength << " = " << inputLength << " / " << outputLength << ";\n";
0170 } else {
0171 int rLength = std::stoi(inputLength) / std::stoi(outputLength);
0172 reducedLength = std::to_string(rLength);
0173 }
0174 if (reduceDims == kLast) {
0175
0176
0177
0178
0179
0180
0181 out << SP << "for (size_t i = 0; i < " << outputLength << "; i++) {\n";
0182
0183 std::string startingValue = (fReduceOpMode == ReduceProd) ? "1" : "0";
0184 out << SP << SP << "tensor_" << fNY << "[i] = " << startingValue << ";\n";
0185 out << SP << SP << "for (size_t j = 0; j < " << reducedLength << "; j++) {\n";
0186
0187 if (fReduceOpMode == ReduceProd)
0188 out << SP << SP << SP << "tensor_" << fNY << "[i] *= tensor_" << fNX << "[i * " << reducedLength << " + j];\n";
0189 else if (fReduceOpMode == ReduceSum || fReduceOpMode == ReduceMean)
0190 out << SP << SP << SP << "tensor_" << fNY << "[i] += tensor_" << fNX << "[i * " << reducedLength << " + j];\n";
0191 else if(fReduceOpMode == ReduceSumSquare)
0192 out << SP << SP << SP << "tensor_" << fNY << "[i] += tensor_" << fNX << "[i * " << reducedLength << " + j] * tensor_"
0193 << fNX << "[i * " << reducedLength << " + j];\n";
0194 out << SP << SP << "}\n";
0195 if(fReduceOpMode == ReduceMean)
0196 out << SP << SP << "tensor_" << fNY << "[i] /= static_cast<float>(" << reducedLength << ");\n";
0197
0198 out << SP << "}\n";
0199 } else if (reduceDims == kFirst) {
0200
0201
0202
0203 if (fReduceOpMode == ReduceProd)
0204 out << SP << "std::fill(tensor_" << fNY <<", tensor_"<< fNY <<" + "<< outputLength << ", 1);\n";
0205 else
0206 out << SP << "std::fill(tensor_" << fNY <<", tensor_"<< fNY <<" + "<< outputLength << ", 0);\n";
0207
0208 out << SP << "for (size_t i = 0; i < " << reducedLength << "; i++) {\n";
0209 out << SP << SP << "for (size_t j = 0; j < " << outputLength << "; j++) {\n";
0210
0211 if (fReduceOpMode == ReduceProd)
0212 out << SP << SP << SP << "tensor_" << fNY << "[j] *= tensor_" << fNX << "[i * " << outputLength << " + j];\n";
0213 else if (fReduceOpMode == ReduceSum || fReduceOpMode == ReduceMean)
0214 out << SP << SP << SP << "tensor_" << fNY << "[j] += tensor_" << fNX << "[i * " << outputLength << " + j];\n";
0215 else if(fReduceOpMode == ReduceSumSquare)
0216 out << SP << SP << SP << "tensor_" << fNY << "[j] += tensor_" << fNX << "[i * " << outputLength << " + j] * tensor_"
0217 << fNX << "[i * " << outputLength << " + j];\n";
0218 out << SP << SP << "}\n";
0219 out << SP << "}\n";
0220 if(fReduceOpMode == ReduceMean) {
0221 out << SP << "for (size_t j = 0; i < " << outputLength << "; j++) {\n";
0222 out << SP << SP << "tensor_" << fNY << "[j] /= static_cast<float>(" << reducedLength << ");\n";
0223 out << SP << "}\n";
0224 }
0225 }
0226 else
0227 {
0228
0229
0230 if (fReduceOpMode == ReduceProd)
0231 out << SP << "std::fill(tensor_" << fNY <<", tensor_"<< fNY <<" + "<< outputLength << ", 1);\n";
0232 else
0233 out << SP << "std::fill(tensor_" << fNY <<", tensor_"<< fNY <<" + "<< outputLength << ",0);\n";
0234
0235 out << SP << "for (size_t i = 0; i < " << inputLength << "; i++) {\n";
0236
0237 size_t dim = fShapeX.size();
0238
0239
0240 out << SP << SP << "size_t outputIndex = 0;\n";
0241 for (size_t k = 0; k < dim; k++) {
0242 if (std::find(fAttrAxes.begin(), fAttrAxes.end(), k) == fAttrAxes.end()) {
0243
0244 out << SP << SP << "size_t i_" << k << " = i / " << inputStrides[k] << " % " << fShapeX[k] << ";\n";
0245 out << SP << SP << "outputIndex += i_" << k << " * " << outputStrides[k] << ";\n";
0246 }
0247 }
0248
0249 out << SP << SP << "// compute reduction....\n";
0250 if (fReduceOpMode == ReduceProd)
0251 out << SP << SP << "tensor_" << fNY << "[outputIndex] *= tensor_" << fNX << "[i];\n";
0252 else if (fReduceOpMode == ReduceSum || fReduceOpMode == ReduceMean)
0253 out << SP << SP << "tensor_" << fNY << "[outputIndex] += tensor_" << fNX << "[i];\n";
0254 else if (fReduceOpMode == ReduceSumSquare) {
0255 out << SP << SP << "tensor_" << fNY << "[outputIndex] += tensor_" << fNX << "[i] * tensor_" << fNX
0256 << "[i];\n";
0257 }
0258 out << SP << "}\n";
0259
0260 if (fReduceOpMode == ReduceMean) {
0261 out << SP << "for (size_t i = 0; i < " << outputLength << "; i++) {\n";
0262 out << SP << SP << "tensor_" << fNY << "[i] /= static_cast<float>(" << reducedLength << ");\n";
0263 out << SP << "}\n";
0264 }
0265 }
0266
0267 return out.str();
0268 }
0269
0270 };
0271
0272 }
0273 }
0274 }
0275
0276
0277 #endif
0278