Warning, file /include/root/TMVA/ROperator_Pool.hxx was not indexed
or was modified since last indexation (in which case cross-reference links may be missing, inaccurate or erroneous).
0001 #ifndef TMVA_SOFIE_ROPERATOR_POOL
0002 #define TMVA_SOFIE_ROPERATOR_POOL
0003
0004 #include "TMVA/SOFIE_common.hxx"
0005 #include "TMVA/ROperator.hxx"
0006 #include "TMVA/RModel.hxx"
0007
0008 #include <memory>
0009 #include <sstream>
0010 #include <algorithm>
0011 #include <stdexcept>
0012 #include <vector>
0013 #include <cassert>
0014
0015 namespace TMVA {
0016 namespace Experimental {
0017 namespace SOFIE {
0018
0019 struct RAttributes_Pool {
0020
0021 std::string auto_pad = "NOTSET";
0022 int ceil_mode = 0;
0023 int count_include_pad = 0;
0024 int storage_order = 0;
0025 std::vector<size_t> dilations;
0026 std::vector<size_t> kernel_shape;
0027 std::vector<size_t> pads;
0028 std::vector<size_t> strides;
0029 };
0030
0031 enum PoolOpMode { InvalidPool, MaxPool, AveragePool, GlobalAveragePool };
0032
0033 template<typename T>
0034 class ROperator_Pool final : public ROperator
0035 {
0036
0037 private:
0038
0039 PoolOpMode fPoolMode;
0040
0041 size_t fAttrCeilMode;
0042 size_t fAttrCountIncludePad;
0043 size_t fAttrStorageOrder;
0044 std::string fAttrAutopad;
0045 std::vector<size_t> fAttrDilations;
0046 std::vector<size_t> fAttrKernelShape;
0047 std::vector<size_t> fAttrPads;
0048 std::vector<size_t> fAttrStrides;
0049
0050 std::string fNX;
0051 std::string fNY;
0052
0053 std::vector<size_t> fShapeX;
0054 std::vector<size_t> fShapeY;
0055
0056 std::string fType;
0057
0058 size_t fDim;
0059 bool fUseSession = false;
0060
0061 public:
0062
0063 std::string Name() {
0064 if (fPoolMode == AveragePool) return "AveragePool";
0065 if (fPoolMode == MaxPool) return "MaxPool";
0066 return "Invalid";
0067 }
0068
0069 ROperator_Pool() {}
0070
0071 ROperator_Pool(PoolOpMode mode, RAttributes_Pool attr, std::string nameX, std::string nameY)
0072 : fPoolMode(mode), fAttrCeilMode(attr.ceil_mode), fAttrCountIncludePad(attr.count_include_pad),
0073 fAttrStorageOrder(attr.storage_order), fAttrAutopad(attr.auto_pad),
0074 fAttrDilations(attr.dilations), fAttrKernelShape(attr.kernel_shape), fAttrPads(attr.pads), fAttrStrides(attr.strides),
0075 fNX(UTILITY::Clean_name(nameX)), fNY(UTILITY::Clean_name(nameY))
0076 {
0077 if(std::is_same<T, float>::value) {
0078 fType = "float";
0079 } else {
0080 throw
0081 std::runtime_error("TMVA SOFIE Encountered unsupported type parsing a Pool operator");
0082 }
0083 fInputTensorNames = { fNX };
0084 fOutputTensorNames = { fNY };
0085 }
0086
0087
0088 std::vector<ETensorType> TypeInference(std::vector<ETensorType> input) override {
0089
0090 return input;
0091 }
0092
0093
0094 std::vector<std::vector<size_t>> ShapeInference(std::vector<std::vector<size_t>> input) override {
0095
0096
0097
0098 if (input.size() != 1 ) {
0099 throw std::runtime_error("TMVA SOFIE" + Name() + "Op Shape inference need 1 input tensor");
0100 }
0101 if (input[0].size() < 3) {
0102 throw std::runtime_error("TMVA SOFIE" + Name() + "Op Shape inference only accept tensor with at least 3 dimensions");
0103 }
0104
0105 if (input[0].size() < 3 || input[0].size() > 5) {
0106 throw std::runtime_error("TMVA SOFIE" + Name() + "Op : tensors with dimension " + std::to_string(input[0].size()) + " are not yet supported");
0107 }
0108
0109 if (input[0].size() -2 != fDim) {
0110 throw
0111 std::runtime_error("TMVA SOFIE Pool Op Shape inference - invalid inputs ");
0112 }
0113
0114 size_t k1 = ((fAttrKernelShape.empty())? input[0][2] : fAttrKernelShape[0]);
0115 size_t k2 = (fDim > 1) ? ((fAttrKernelShape.empty()) ? input[0][3] : fAttrKernelShape[1]) : 1;
0116 size_t k3 = (fDim > 2) ? ((fAttrKernelShape.empty()) ? input[0][4] : fAttrKernelShape[2]) : 1;
0117
0118
0119 size_t i1 = (fDim > 1) ? ((fDim > 2) ? 3 : 2) : 1;
0120 size_t i2 = (fDim > 2) ? 4 : 3;
0121 size_t i3 = 5;
0122
0123 if (fAttrDilations.empty()) {
0124 fAttrDilations = {1, 1, 1};
0125 }
0126 fAttrDilations.resize(3);
0127 if (fDim < 3) {
0128 fAttrDilations.resize(3, 1);
0129 }
0130
0131 fAttrKernelShape = {k1 + (fAttrDilations[0] - 1) * (k1 - 1),
0132 k2 + (fAttrDilations[1] - 1) * (k2 - 1),
0133 k3 + (fAttrDilations[2] - 1) * (k3 - 1)};
0134
0135 if (fAttrAutopad == "NOTSET") {
0136
0137 if (fAttrPads.empty()) {
0138 fAttrPads = {0, 0, 0, 0, 0, 0};
0139 }
0140 } else if (fAttrAutopad == "SAME_UPPER" || fAttrAutopad == "SAME_LOWER") {
0141 if (fDim == 1)
0142 fAttrPads = {fAttrKernelShape[0] / 2, fAttrKernelShape[0] / 2};
0143 else if (fDim == 2)
0144 fAttrPads = {fAttrKernelShape[0] / 2, fAttrKernelShape[1] / 2, fAttrKernelShape[0] / 2, fAttrKernelShape[1] / 2};
0145 else if (fDim == 3)
0146 fAttrPads = {fAttrKernelShape[0] / 2, fAttrKernelShape[1] / 2, fAttrKernelShape[2] / 2,
0147 fAttrKernelShape[0] / 2, fAttrKernelShape[1] / 2, fAttrKernelShape[2] / 2};
0148
0149
0150 if (fAttrKernelShape[0] % 2 == 1) {
0151 (fAttrAutopad == "SAME_UPPER") ? fAttrPads[0]++ : fAttrPads[i1]++;
0152 }
0153 if (fDim > 1 && fAttrKernelShape[1] % 2 == 1) {
0154 (fAttrAutopad == "SAME_UPPER") ? fAttrPads[1]++ : fAttrPads[i2]++;
0155 }
0156 if (fDim > 2 && fAttrKernelShape[2] % 2 == 1) {
0157 (fAttrAutopad == "SAME_UPPER") ? fAttrPads[2]++ : fAttrPads[i3]++;
0158 }
0159 } else if (fAttrAutopad != "VALID") {
0160 throw
0161 std::runtime_error("TMVA SOFIE" + Name() + "Op invalid Autopad value : " + fAttrAutopad);
0162 }
0163
0164 if (fDim < 3) fAttrPads.resize(6, 0);
0165
0166 if (fAttrStrides.empty()) {
0167 fAttrStrides = {1, 1, 1};
0168 }
0169
0170 if (fDim < 3)
0171 fAttrStrides.resize(3, 1);
0172
0173 size_t input1 = input[0][2];
0174 size_t input2 = (fDim > 1) ? input[0][3] : 1;
0175 size_t input3 = (fDim > 2) ? input[0][4] : 1;
0176
0177 size_t pad1 = fAttrPads[0] + fAttrPads[i1];
0178 size_t output1 = (input1 + pad1 - fAttrKernelShape[0]) / fAttrStrides[0] + 1;
0179
0180 size_t batch_size = input[0][0];
0181 size_t output_channels = input[0][1];
0182
0183 std::vector<std::vector<size_t>> ret({{ batch_size, output_channels, output1 }});
0184
0185 if (fDim == 1)
0186 return ret;
0187
0188 size_t pad2 = fAttrPads[1] + fAttrPads[i2];
0189 size_t output2 = (input2 + pad2 - fAttrKernelShape[1]) / fAttrStrides[1] + 1;
0190
0191 ret[0].push_back(output2);
0192 if (fDim == 2)
0193 return ret;
0194
0195 size_t pad3 = fAttrPads[2] + fAttrPads[i3];
0196 size_t output3 = (input3 + pad3 - fAttrKernelShape[2] ) / fAttrStrides[2] + 1;
0197
0198
0199 ret[0].push_back(output3);
0200 return ret;
0201 }
0202
0203 void Initialize(RModel& model) override {
0204
0205 fUseSession = model.UseSession();
0206
0207 if (!model.CheckIfTensorAlreadyExist(fNX)) {
0208 throw
0209 std::runtime_error("TMVA SOFIE Pool op Input Tensor " + fNX + " is not found in model");
0210 }
0211 fShapeX = model.GetTensorShape(fNX);
0212 if (fShapeX.size() < 3 || fShapeX.size() > 5) {
0213 std::cout << fNX << " : " << ConvertShapeToString(fShapeX) << std::endl;
0214 throw
0215 std::runtime_error("TMVA SOFIE Pool Op input data tensor" + fNX + " is not of 3,4 or 5 dimensions");
0216 }
0217 fDim = fShapeX.size() - 2;
0218
0219 if (fPoolMode == GlobalAveragePool) {
0220 fPoolMode = AveragePool;
0221 fAttrKernelShape.resize(3);
0222 fAttrKernelShape[0] = fShapeX[2];
0223 if (fDim > 1)
0224 fAttrKernelShape[1] = fShapeX[3];
0225 if (fDim > 2)
0226 fAttrKernelShape[2] = fShapeX[4];
0227 fAttrAutopad = "VALID";
0228 fAttrPads = {0, 0, 0, 0, 0, 0 };
0229 assert(fAttrStrides.empty());
0230 }
0231
0232 fShapeY = ShapeInference({fShapeX})[0];
0233 model.AddIntermediateTensor(fNY, model.GetTensorType(fNX), fShapeY);
0234
0235
0236 if (fPoolMode == MaxPool) model.AddNeededStdLib("cmath");
0237
0238 }
0239
0240 std::string GenerateInitCode() override {
0241 std::stringstream out;
0242 return out.str();
0243 }
0244
0245
0246 virtual std::string GenerateSessionMembersCode(std::string opName) override {
0247 opName = "op_" + opName;
0248 std::stringstream out;
0249
0250 if(fDim == 1){
0251 out << "std::vector<" << fType << "> fVec_" << opName << "_xpad = std::vector<" << fType << ">("
0252 << fShapeX[1] * (fShapeX[2] + fAttrPads[0] + fAttrPads[2]) << ");\n";
0253 }
0254 else if(fDim == 2){
0255 out << "std::vector<" << fType << "> fVec_" << opName << "_xpad = std::vector<" << fType << ">("
0256 << fShapeX[1] * (fShapeX[2] + fAttrPads[0] + fAttrPads[2]) * (fShapeX[3] + fAttrPads[1] + fAttrPads[3])
0257 << ");\n";
0258 }
0259 else{
0260 out << "std::vector<" << fType << "> fVec_" << opName << "_xpad = std::vector<" << fType << ">("
0261 << fShapeX[1] * (fShapeX[2] + fAttrPads[0] + fAttrPads[2]) * (fShapeX[3] + fAttrPads[1] + fAttrPads[3]) *
0262 (fShapeX[4] + fAttrPads[2] + fAttrPads[4]) << ");\n";
0263 }
0264
0265 return out.str();
0266 }
0267
0268 std::string Generate(std::string OpName) override {
0269 OpName = "op_" + OpName;
0270
0271 if (fShapeX.empty() || fShapeY.empty()) {
0272 throw std::runtime_error("TMVA SOFIE Pool Op called to Generate without being initialized first");
0273 }
0274
0275 std::stringstream out;
0276
0277 out << "\n//---- operator " << Name() << " " << OpName << "\n";
0278 out << "{\n";
0279
0280 assert(fShapeX[0] == fShapeY[0]);
0281 assert(fShapeX[1] == fShapeY[1]);
0282 assert(fAttrPads.size() == 6);
0283 assert(fAttrKernelShape.size() == 3);
0284
0285 int hmin = - fAttrPads[0];
0286 int hmax = fShapeX[2] + fAttrPads[1] - fAttrKernelShape[0] +1;
0287 int wmin,wmax,dmin,dmax;
0288
0289 if(fDim >= 2){
0290 wmin = - fAttrPads[2];
0291 wmax = fShapeX[3] + fAttrPads[3] - fAttrKernelShape[1] +1;
0292 }
0293 else{
0294 wmin=1;
0295 wmax=1;
0296 }
0297 if(fDim == 3){
0298 dmin = - fAttrPads[4];
0299 dmax = fShapeX[4] + fAttrPads[5] - fAttrKernelShape[2] +1;
0300 }
0301 else{
0302 dmin=1;
0303 dmax=1;
0304 }
0305 out << SP << "constexpr int hsize = " << fShapeX[2] << ";\n";
0306 out << SP << "constexpr int hmin = " << hmin << ";\n";
0307 out << SP << "constexpr int hmax = " << hmax << ";\n";
0308 out << SP << "constexpr int kh = " << fAttrKernelShape[0] << ";\n";
0309 if (fDim > 1) {
0310 size_t wsize = fShapeX[3];
0311 out << SP << "constexpr int wsize = " << wsize << ";\n";
0312 out << SP << "constexpr int wmin = " << wmin << ";\n";
0313 out << SP << "constexpr int wmax = " << wmax << ";\n";
0314 out << SP << "constexpr int kw = " << fAttrKernelShape[1] << ";\n";
0315 if (fDim > 2) {
0316 size_t dsize = fShapeX[4];
0317 out << SP << "constexpr int dsize = " << dsize << ";\n";
0318 out << SP << "constexpr int dwsize = " << dsize*wsize << ";\n";
0319 out << SP << "constexpr int dmin = " << dmin << ";\n";
0320 out << SP << "constexpr int dmax = " << dmax << ";\n";
0321 out << SP << "constexpr int kd = " << fAttrKernelShape[2] << ";\n";
0322 }
0323 }
0324
0325
0326 bool doPadding = false;
0327 for ( auto & e : fAttrPads)
0328 doPadding |= (e > 0);
0329
0330
0331 if(fDim==1){
0332
0333 out << SP << "size_t outIndex = 0;\n";
0334 out << SP << "for (size_t n = 0; n < " << fShapeX[0]*fShapeX[1] << "; n++) {\n";
0335 out << SP << SP << "size_t inputOffset = n*" << fShapeX[2] << ";\n";
0336 out << SP << SP << "for (int i = hmin; i < hmax; i+=" << fAttrStrides[0] << ") {\n";
0337
0338 if (fPoolMode == MaxPool)
0339 out << SP << SP << SP << SP << "float value = -INFINITY;\n";
0340 else if (fPoolMode == AveragePool) {
0341 out << SP << SP << SP << SP << "float value = 0;\n";
0342 if (fAttrCountIncludePad == 0 && doPadding)
0343 out << SP << SP << SP << SP << "int nsum = 0;\n";
0344 else
0345 out << SP << SP << SP << SP << "constexpr int nsum = kh;\n";
0346 }
0347
0348 out << SP << SP << SP << SP << "for (int l = i; l < i + kh; l++) {\n";
0349 out << SP << SP << SP << SP << SP << "if (l < 0 || l >= hsize) continue;\n";
0350 out << SP << SP << SP << SP << SP << SP << "int index = inputOffset + l;\n";
0351 if (fPoolMode == MaxPool) {
0352 out << SP << SP << SP << SP << SP << SP << "auto xval = tensor_" << fNX << "[index];\n";
0353 out << SP << SP << SP << SP << SP << SP << "if (xval > value) value = xval;\n";
0354 }
0355 else if (fPoolMode == AveragePool) {
0356
0357 out << SP << SP << SP << SP << SP << SP << "value += tensor_" << fNX << "[index];\n";
0358 if (fAttrCountIncludePad == 0 && doPadding)
0359
0360 out << SP << SP << SP << SP << SP << SP << "nsum++;\n";
0361 }
0362 out << SP << SP << SP << SP << SP << "}\n";
0363 if (fPoolMode == AveragePool) {
0364
0365 out << SP << SP << SP << SP << "value /= float(nsum);\n";
0366 }
0367
0368 out << SP << SP << SP << SP << "tensor_" << fNY << "[outIndex++] = value;\n";
0369
0370 out << SP << SP << "}\n";
0371 out << SP << "}\n";
0372 }
0373 else if(fDim==2){
0374
0375 out << SP << "size_t outIndex = 0;\n";
0376 out << SP << "for (size_t n = 0; n < " << fShapeX[0]*fShapeX[1] << "; n++) {\n";
0377 out << SP << SP << "size_t inputOffset = n*" << fShapeX[2]*fShapeX[3] << ";\n";
0378 out << SP << SP << "for (int i = hmin; i < hmax; i+=" << fAttrStrides[0] << ") {\n";
0379 out << SP << SP << SP << "for (int j = wmin; j < wmax; j+=" << fAttrStrides[1] << ") {\n";
0380
0381 if (fPoolMode == MaxPool)
0382 out << SP << SP << SP << SP << "float value = -INFINITY;\n";
0383 else if (fPoolMode == AveragePool) {
0384 out << SP << SP << SP << SP << "float value = 0;\n";
0385 if (fAttrCountIncludePad == 0 && doPadding)
0386 out << SP << SP << SP << SP << "int nsum = 0;\n";
0387 else
0388 out << SP << SP << SP << SP << "constexpr int nsum = kw*kh;\n";
0389 }
0390
0391 out << SP << SP << SP << SP << "for (int l = i; l < i + kh; l++) {\n";
0392 out << SP << SP << SP << SP << SP << "if (l < 0 || l >= hsize) continue;\n";
0393
0394 out << SP << SP << SP << SP << SP << "for (int m = j; m < j + kw; m++) {\n";
0395 out << SP << SP << SP << SP << SP << SP << "if (m < 0 || m >= wsize) continue;\n";
0396 out << SP << SP << SP << SP << SP << SP << SP << "int index = inputOffset + l*wsize + m;\n";
0397 if (fPoolMode == MaxPool) {
0398 out << SP << SP << SP << SP << SP << SP << SP << "auto xval = tensor_" << fNX << "[index];\n";
0399 out << SP << SP << SP << SP << SP << SP << SP << "if (xval > value) value = xval;\n";
0400 }
0401 else if (fPoolMode == AveragePool) {
0402
0403 out << SP << SP << SP << SP << SP << SP << SP << "value += tensor_" << fNX << "[index];\n";
0404 if (fAttrCountIncludePad == 0 && doPadding)
0405
0406 out << SP << SP << SP << SP << SP << SP << SP << "nsum++;\n";
0407 }
0408 out << SP << SP << SP << SP << SP << SP << "}\n";
0409 out << SP << SP << SP << SP << SP << "}\n";
0410 if (fPoolMode == AveragePool) {
0411
0412 out << SP << SP << SP << SP << "value /= float(nsum);\n";
0413 }
0414 out << SP << SP << SP << SP << "tensor_" << fNY << "[outIndex++] = value;\n";
0415 out << SP << SP << SP << "}\n";
0416 out << SP << SP << "}\n";
0417 out << SP << "}\n";
0418 }
0419 else if(fDim==3){
0420
0421 out << SP << "size_t outIndex = 0;\n";
0422 out << SP << "for (size_t n = 0; n < " << fShapeX[0]*fShapeX[1] << "; n++) {\n";
0423 out << SP << SP << "size_t inputOffset = n*" << fShapeX[2]*fShapeX[3]*fShapeX[4] << ";\n";
0424 out << SP << SP << "for (int i = hmin; i < hmax; i+=" << fAttrStrides[0] << ") {\n";
0425 out << SP << SP << SP << "for (int j = wmin; j < wmax; j+=" << fAttrStrides[1] << ") {\n";
0426 out << SP << SP << SP << SP << "for (int k = dmin; k < dmax; k+=" << fAttrStrides[2] << ") {\n";
0427
0428 if (fPoolMode == MaxPool)
0429 out << SP << SP << SP << SP << "float value = -INFINITY;\n";
0430 else if (fPoolMode == AveragePool) {
0431 out << SP << SP << SP << SP << "float value = 0;\n";
0432 if (fAttrCountIncludePad == 0 && doPadding)
0433 out << SP << SP << SP << SP << "int nsum = 0;\n";
0434 else
0435 out << SP << SP << SP << SP << "constexpr int nsum = kw*kh*kd;\n";
0436 }
0437
0438 out << SP << SP << SP << SP << "for (int l = i; l < i + kh; l++) {\n";
0439 out << SP << SP << SP << SP << SP << "if (l < 0 || l >= hsize) continue;\n";
0440
0441 out << SP << SP << SP << SP << SP << "for (int m = j; m < j + kw; m++) {\n";
0442 out << SP << SP << SP << SP << SP << SP << "if (m < 0 || m >= wsize) continue;\n";
0443
0444 out << SP << SP << SP << SP << SP << SP << "for (int p = k; p < k + kd; p++) {\n";
0445 out << SP << SP << SP << SP << SP << SP << SP << "if (p < 0 || p >= dsize) continue;\n";
0446 out << SP << SP << SP << SP << SP << SP << SP << SP << "int index = inputOffset + l*dwsize + m*dsize + p;\n";
0447
0448 if (fPoolMode == MaxPool) {
0449 out << SP << SP << SP << SP << SP << SP << SP << SP << "auto xval = tensor_" << fNX << "[index];\n";
0450 out << SP << SP << SP << SP << SP << SP << SP << SP << "if (xval > value) value = xval;\n";
0451 }
0452 else if (fPoolMode == AveragePool) {
0453
0454 out << SP << SP << SP << SP << SP << SP << SP << SP << "value += tensor_" << fNX << "[index];\n";
0455 if (fAttrCountIncludePad == 0 && doPadding)
0456
0457 out << SP << SP << SP << SP << SP << SP << SP << SP << "nsum++;\n";
0458 }
0459 out << SP << SP << SP << SP << SP << SP << "}\n";
0460 out << SP << SP << SP << SP << SP << "}\n";
0461 out << SP << SP << SP << SP << "}\n";
0462 if (fPoolMode == AveragePool) {
0463
0464 out << SP << SP << SP << SP << "value /= float(nsum);\n";
0465 }
0466
0467 out << SP << SP << SP << SP << "tensor_" << fNY << "[outIndex++] = value;\n";
0468 out << SP << SP << SP << SP << "}\n" ;
0469 out << SP << SP << SP << "}\n";
0470 out << SP << SP << "}\n";
0471 out << SP << "}\n";
0472 }
0473
0474 out << SP << "}\n";
0475
0476
0477 return out.str();
0478 }
0479 };
0480
0481 }
0482 }
0483 }
0484
0485
0486 #endif