File indexing completed on 2025-01-18 10:10:54
0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030 #ifndef TMVA_DNN_LSTM_LAYER
0031 #define TMVA_DNN_LSTM_LAYER
0032
0033 #include <cmath>
0034 #include <iostream>
0035 #include <vector>
0036
0037 #include "TMatrix.h"
0038 #include "TMVA/DNN/Functions.h"
0039
0040 namespace TMVA
0041 {
0042 namespace DNN
0043 {
0044 namespace RNN
0045 {
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 template<typename Architecture_t>
0056 class TBasicLSTMLayer : public VGeneralLayer<Architecture_t>
0057 {
0058
0059 public:
0060
0061 using Matrix_t = typename Architecture_t::Matrix_t;
0062 using Scalar_t = typename Architecture_t::Scalar_t;
0063 using Tensor_t = typename Architecture_t::Tensor_t;
0064
0065 using LayerDescriptor_t = typename Architecture_t::RecurrentDescriptor_t;
0066 using WeightsDescriptor_t = typename Architecture_t::FilterDescriptor_t;
0067 using TensorDescriptor_t = typename Architecture_t::TensorDescriptor_t;
0068 using HelperDescriptor_t = typename Architecture_t::DropoutDescriptor_t;
0069
0070 using RNNWorkspace_t = typename Architecture_t::RNNWorkspace_t;
0071 using RNNDescriptors_t = typename Architecture_t::RNNDescriptors_t;
0072
0073 private:
0074
0075 size_t fStateSize;
0076 size_t fCellSize;
0077 size_t fTimeSteps;
0078
0079 bool fRememberState;
0080 bool fReturnSequence = false;
0081
0082 DNN::EActivationFunction fF1;
0083 DNN::EActivationFunction fF2;
0084
0085 Matrix_t fInputValue;
0086 Matrix_t fCandidateValue;
0087 Matrix_t fForgetValue;
0088 Matrix_t fOutputValue;
0089 Matrix_t fState;
0090 Matrix_t fCell;
0091
0092 Matrix_t &fWeightsInputGate;
0093 Matrix_t &fWeightsInputGateState;
0094 Matrix_t &fInputGateBias;
0095
0096 Matrix_t &fWeightsForgetGate;
0097 Matrix_t &fWeightsForgetGateState;
0098 Matrix_t &fForgetGateBias;
0099
0100 Matrix_t &fWeightsCandidate;
0101 Matrix_t &fWeightsCandidateState;
0102 Matrix_t &fCandidateBias;
0103
0104 Matrix_t &fWeightsOutputGate;
0105 Matrix_t &fWeightsOutputGateState;
0106 Matrix_t &fOutputGateBias;
0107
0108 std::vector<Matrix_t> input_gate_value;
0109 std::vector<Matrix_t> forget_gate_value;
0110 std::vector<Matrix_t> candidate_gate_value;
0111 std::vector<Matrix_t> output_gate_value;
0112 std::vector<Matrix_t> cell_value;
0113 std::vector<Matrix_t> fDerivativesInput;
0114 std::vector<Matrix_t> fDerivativesForget;
0115 std::vector<Matrix_t> fDerivativesCandidate;
0116 std::vector<Matrix_t> fDerivativesOutput;
0117
0118 Matrix_t &fWeightsInputGradients;
0119 Matrix_t &fWeightsInputStateGradients;
0120 Matrix_t &fInputBiasGradients;
0121 Matrix_t &fWeightsForgetGradients;
0122 Matrix_t &fWeightsForgetStateGradients;
0123 Matrix_t &fForgetBiasGradients;
0124 Matrix_t &fWeightsCandidateGradients;
0125 Matrix_t &fWeightsCandidateStateGradients;
0126 Matrix_t &fCandidateBiasGradients;
0127 Matrix_t &fWeightsOutputGradients;
0128 Matrix_t &fWeightsOutputStateGradients;
0129 Matrix_t &fOutputBiasGradients;
0130
0131
0132 Tensor_t fWeightsTensor;
0133 Tensor_t fWeightGradientsTensor;
0134
0135
0136 Tensor_t fX;
0137 Tensor_t fY;
0138 Tensor_t fDx;
0139 Tensor_t fDy;
0140
0141 TDescriptors *fDescriptors = nullptr;
0142 TWorkspace *fWorkspace = nullptr;
0143
0144 public:
0145
0146
0147 TBasicLSTMLayer(size_t batchSize, size_t stateSize, size_t inputSize, size_t timeSteps, bool rememberState = false,
0148 bool returnSequence = false,
0149 DNN::EActivationFunction f1 = DNN::EActivationFunction::kSigmoid,
0150 DNN::EActivationFunction f2 = DNN::EActivationFunction::kTanh, bool training = true,
0151 DNN::EInitialization fA = DNN::EInitialization::kZero);
0152
0153
0154 TBasicLSTMLayer(const TBasicLSTMLayer &);
0155
0156
0157
0158 virtual void Initialize();
0159
0160
0161 void InitState(DNN::EInitialization m = DNN::EInitialization::kZero);
0162
0163
0164
0165 void Forward(Tensor_t &input, bool isTraining = true);
0166
0167
0168 void CellForward(Matrix_t &inputGateValues, const Matrix_t &forgetGateValues,
0169 const Matrix_t &candidateValues, const Matrix_t &outputGateValues);
0170
0171
0172
0173 void Backward(Tensor_t &gradients_backward,
0174 const Tensor_t &activations_backward);
0175
0176
0177 void Update(const Scalar_t learningRate);
0178
0179
0180
0181 Matrix_t & CellBackward(Matrix_t & state_gradients_backward,
0182 Matrix_t & cell_gradients_backward,
0183 const Matrix_t & precStateActivations, const Matrix_t & precCellActivations,
0184 const Matrix_t & input_gate, const Matrix_t & forget_gate,
0185 const Matrix_t & candidate_gate, const Matrix_t & output_gate,
0186 const Matrix_t & input, Matrix_t & input_gradient,
0187 Matrix_t &di, Matrix_t &df, Matrix_t &dc, Matrix_t &dout, size_t t);
0188
0189
0190 void InputGate(const Matrix_t &input, Matrix_t &di);
0191
0192
0193 void ForgetGate(const Matrix_t &input, Matrix_t &df);
0194
0195
0196 void CandidateValue(const Matrix_t &input, Matrix_t &dc);
0197
0198
0199 void OutputGate(const Matrix_t &input, Matrix_t &dout);
0200
0201
0202 void Print() const;
0203
0204
0205 void AddWeightsXMLTo(void *parent);
0206
0207
0208 void ReadWeightsFromXML(void *parent);
0209
0210
0211 size_t GetInputSize() const { return this->GetInputWidth(); }
0212 size_t GetTimeSteps() const { return fTimeSteps; }
0213 size_t GetStateSize() const { return fStateSize; }
0214 size_t GetCellSize() const { return fCellSize; }
0215
0216 inline bool DoesRememberState() const { return fRememberState; }
0217 inline bool DoesReturnSequence() const { return fReturnSequence; }
0218
0219 inline DNN::EActivationFunction GetActivationFunctionF1() const { return fF1; }
0220 inline DNN::EActivationFunction GetActivationFunctionF2() const { return fF2; }
0221
0222 const Matrix_t & GetInputGateValue() const { return fInputValue; }
0223 Matrix_t & GetInputGateValue() { return fInputValue; }
0224 const Matrix_t & GetCandidateValue() const { return fCandidateValue; }
0225 Matrix_t & GetCandidateValue() { return fCandidateValue; }
0226 const Matrix_t & GetForgetGateValue() const { return fForgetValue; }
0227 Matrix_t & GetForgetGateValue() { return fForgetValue; }
0228 const Matrix_t & GetOutputGateValue() const { return fOutputValue; }
0229 Matrix_t & GetOutputGateValue() { return fOutputValue; }
0230
0231 const Matrix_t & GetState() const { return fState; }
0232 Matrix_t & GetState() { return fState; }
0233 const Matrix_t & GetCell() const { return fCell; }
0234 Matrix_t & GetCell() { return fCell; }
0235
0236 const Matrix_t & GetWeightsInputGate() const { return fWeightsInputGate; }
0237 Matrix_t & GetWeightsInputGate() { return fWeightsInputGate; }
0238 const Matrix_t & GetWeightsCandidate() const { return fWeightsCandidate; }
0239 Matrix_t & GetWeightsCandidate() { return fWeightsCandidate; }
0240 const Matrix_t & GetWeightsForgetGate() const { return fWeightsForgetGate; }
0241 Matrix_t & GetWeightsForgetGate() { return fWeightsForgetGate; }
0242 const Matrix_t & GetWeightsOutputGate() const { return fWeightsOutputGate; }
0243 Matrix_t & GetWeightsOutputGate() { return fWeightsOutputGate; }
0244 const Matrix_t & GetWeightsInputGateState() const { return fWeightsInputGateState; }
0245 Matrix_t & GetWeightsInputGateState() { return fWeightsInputGateState; }
0246 const Matrix_t & GetWeightsForgetGateState() const { return fWeightsForgetGateState; }
0247 Matrix_t & GetWeightsForgetGateState() { return fWeightsForgetGateState; }
0248 const Matrix_t & GetWeightsCandidateState() const { return fWeightsCandidateState; }
0249 Matrix_t & GetWeightsCandidateState() { return fWeightsCandidateState; }
0250 const Matrix_t & GetWeightsOutputGateState() const { return fWeightsOutputGateState; }
0251 Matrix_t & GetWeightsOutputGateState() { return fWeightsOutputGateState; }
0252
0253 const std::vector<Matrix_t> & GetDerivativesInput() const { return fDerivativesInput; }
0254 std::vector<Matrix_t> & GetDerivativesInput() { return fDerivativesInput; }
0255 const Matrix_t & GetInputDerivativesAt(size_t i) const { return fDerivativesInput[i]; }
0256 Matrix_t & GetInputDerivativesAt(size_t i) { return fDerivativesInput[i]; }
0257 const std::vector<Matrix_t> & GetDerivativesForget() const { return fDerivativesForget; }
0258 std::vector<Matrix_t> & GetDerivativesForget() { return fDerivativesForget; }
0259 const Matrix_t & GetForgetDerivativesAt(size_t i) const { return fDerivativesForget[i]; }
0260 Matrix_t & GetForgetDerivativesAt(size_t i) { return fDerivativesForget[i]; }
0261 const std::vector<Matrix_t> & GetDerivativesCandidate() const { return fDerivativesCandidate; }
0262 std::vector<Matrix_t> & GetDerivativesCandidate() { return fDerivativesCandidate; }
0263 const Matrix_t & GetCandidateDerivativesAt(size_t i) const { return fDerivativesCandidate[i]; }
0264 Matrix_t & GetCandidateDerivativesAt(size_t i) { return fDerivativesCandidate[i]; }
0265 const std::vector<Matrix_t> & GetDerivativesOutput() const { return fDerivativesOutput; }
0266 std::vector<Matrix_t> & GetDerivativesOutput() { return fDerivativesOutput; }
0267 const Matrix_t & GetOutputDerivativesAt(size_t i) const { return fDerivativesOutput[i]; }
0268 Matrix_t & GetOutputDerivativesAt(size_t i) { return fDerivativesOutput[i]; }
0269
0270 const std::vector<Matrix_t> & GetInputGateTensor() const { return input_gate_value; }
0271 std::vector<Matrix_t> & GetInputGateTensor() { return input_gate_value; }
0272 const Matrix_t & GetInputGateTensorAt(size_t i) const { return input_gate_value[i]; }
0273 Matrix_t & GetInputGateTensorAt(size_t i) { return input_gate_value[i]; }
0274 const std::vector<Matrix_t> & GetForgetGateTensor() const { return forget_gate_value; }
0275 std::vector<Matrix_t> & GetForgetGateTensor() { return forget_gate_value; }
0276 const Matrix_t & GetForgetGateTensorAt(size_t i) const { return forget_gate_value[i]; }
0277 Matrix_t & GetForgetGateTensorAt(size_t i) { return forget_gate_value[i]; }
0278 const std::vector<Matrix_t> & GetCandidateGateTensor() const { return candidate_gate_value; }
0279 std::vector<Matrix_t> & GetCandidateGateTensor() { return candidate_gate_value; }
0280 const Matrix_t & GetCandidateGateTensorAt(size_t i) const { return candidate_gate_value[i]; }
0281 Matrix_t & GetCandidateGateTensorAt(size_t i) { return candidate_gate_value[i]; }
0282 const std::vector<Matrix_t> & GetOutputGateTensor() const { return output_gate_value; }
0283 std::vector<Matrix_t> & GetOutputGateTensor() { return output_gate_value; }
0284 const Matrix_t & GetOutputGateTensorAt(size_t i) const { return output_gate_value[i]; }
0285 Matrix_t & GetOutputGateTensorAt(size_t i) { return output_gate_value[i]; }
0286 const std::vector<Matrix_t> & GetCellTensor() const { return cell_value; }
0287 std::vector<Matrix_t> & GetCellTensor() { return cell_value; }
0288 const Matrix_t & GetCellTensorAt(size_t i) const { return cell_value[i]; }
0289 Matrix_t & GetCellTensorAt(size_t i) { return cell_value[i]; }
0290
0291 const Matrix_t & GetInputGateBias() const { return fInputGateBias; }
0292 Matrix_t & GetInputGateBias() { return fInputGateBias; }
0293 const Matrix_t & GetForgetGateBias() const { return fForgetGateBias; }
0294 Matrix_t & GetForgetGateBias() { return fForgetGateBias; }
0295 const Matrix_t & GetCandidateBias() const { return fCandidateBias; }
0296 Matrix_t & GetCandidateBias() { return fCandidateBias; }
0297 const Matrix_t & GetOutputGateBias() const { return fOutputGateBias; }
0298 Matrix_t & GetOutputGateBias() { return fOutputGateBias; }
0299 const Matrix_t & GetWeightsInputGradients() const { return fWeightsInputGradients; }
0300 Matrix_t & GetWeightsInputGradients() { return fWeightsInputGradients; }
0301 const Matrix_t & GetWeightsInputStateGradients() const { return fWeightsInputStateGradients; }
0302 Matrix_t & GetWeightsInputStateGradients() { return fWeightsInputStateGradients; }
0303 const Matrix_t & GetInputBiasGradients() const { return fInputBiasGradients; }
0304 Matrix_t & GetInputBiasGradients() { return fInputBiasGradients; }
0305 const Matrix_t & GetWeightsForgetGradients() const { return fWeightsForgetGradients; }
0306 Matrix_t & GetWeightsForgetGradients() { return fWeightsForgetGradients; }
0307 const Matrix_t & GetWeigthsForgetStateGradients() const { return fWeightsForgetStateGradients; }
0308 Matrix_t & GetWeightsForgetStateGradients() { return fWeightsForgetStateGradients; }
0309 const Matrix_t & GetForgetBiasGradients() const { return fForgetBiasGradients; }
0310 Matrix_t & GetForgetBiasGradients() { return fForgetBiasGradients; }
0311 const Matrix_t & GetWeightsCandidateGradients() const { return fWeightsCandidateGradients; }
0312 Matrix_t & GetWeightsCandidateGradients() { return fWeightsCandidateGradients; }
0313 const Matrix_t & GetWeightsCandidateStateGradients() const { return fWeightsCandidateStateGradients; }
0314 Matrix_t & GetWeightsCandidateStateGradients() { return fWeightsCandidateStateGradients; }
0315 const Matrix_t & GetCandidateBiasGradients() const { return fCandidateBiasGradients; }
0316 Matrix_t & GetCandidateBiasGradients() { return fCandidateBiasGradients; }
0317 const Matrix_t & GetWeightsOutputGradients() const { return fWeightsOutputGradients; }
0318 Matrix_t & GetWeightsOutputGradients() { return fWeightsOutputGradients; }
0319 const Matrix_t & GetWeightsOutputStateGradients() const { return fWeightsOutputStateGradients; }
0320 Matrix_t & GetWeightsOutputStateGradients() { return fWeightsOutputStateGradients; }
0321 const Matrix_t & GetOutputBiasGradients() const { return fOutputBiasGradients; }
0322 Matrix_t & GetOutputBiasGradients() { return fOutputBiasGradients; }
0323
0324 Tensor_t &GetWeightsTensor() { return fWeightsTensor; }
0325 const Tensor_t &GetWeightsTensor() const { return fWeightsTensor; }
0326 Tensor_t &GetWeightGradientsTensor() { return fWeightGradientsTensor; }
0327 const Tensor_t &GetWeightGradientsTensor() const { return fWeightGradientsTensor; }
0328
0329 Tensor_t &GetX() { return fX; }
0330 Tensor_t &GetY() { return fY; }
0331 Tensor_t &GetDX() { return fDx; }
0332 Tensor_t &GetDY() { return fDy; }
0333 };
0334
0335
0336
0337
0338
0339
0340 template <typename Architecture_t>
0341 TBasicLSTMLayer<Architecture_t>::TBasicLSTMLayer(size_t batchSize, size_t stateSize, size_t inputSize, size_t timeSteps,
0342 bool rememberState, bool returnSequence, DNN::EActivationFunction f1,
0343 DNN::EActivationFunction f2, bool ,
0344 DNN::EInitialization fA)
0345 : VGeneralLayer<Architecture_t>(
0346 batchSize, 1, timeSteps, inputSize, 1, (returnSequence) ? timeSteps : 1, stateSize, 8,
0347 {stateSize, stateSize, stateSize, stateSize, stateSize, stateSize, stateSize, stateSize},
0348 {inputSize, inputSize, inputSize, inputSize, stateSize, stateSize, stateSize, stateSize}, 4,
0349 {stateSize, stateSize, stateSize, stateSize}, {1, 1, 1, 1}, batchSize, (returnSequence) ? timeSteps : 1,
0350 stateSize, fA),
0351 fStateSize(stateSize), fCellSize(stateSize), fTimeSteps(timeSteps), fRememberState(rememberState),
0352 fReturnSequence(returnSequence), fF1(f1), fF2(f2), fInputValue(batchSize, stateSize),
0353 fCandidateValue(batchSize, stateSize), fForgetValue(batchSize, stateSize), fOutputValue(batchSize, stateSize),
0354 fState(batchSize, stateSize), fCell(batchSize, stateSize), fWeightsInputGate(this->GetWeightsAt(0)),
0355 fWeightsInputGateState(this->GetWeightsAt(4)), fInputGateBias(this->GetBiasesAt(0)),
0356 fWeightsForgetGate(this->GetWeightsAt(1)), fWeightsForgetGateState(this->GetWeightsAt(5)),
0357 fForgetGateBias(this->GetBiasesAt(1)), fWeightsCandidate(this->GetWeightsAt(2)),
0358 fWeightsCandidateState(this->GetWeightsAt(6)), fCandidateBias(this->GetBiasesAt(2)),
0359 fWeightsOutputGate(this->GetWeightsAt(3)), fWeightsOutputGateState(this->GetWeightsAt(7)),
0360 fOutputGateBias(this->GetBiasesAt(3)), fWeightsInputGradients(this->GetWeightGradientsAt(0)),
0361 fWeightsInputStateGradients(this->GetWeightGradientsAt(4)), fInputBiasGradients(this->GetBiasGradientsAt(0)),
0362 fWeightsForgetGradients(this->GetWeightGradientsAt(1)),
0363 fWeightsForgetStateGradients(this->GetWeightGradientsAt(5)), fForgetBiasGradients(this->GetBiasGradientsAt(1)),
0364 fWeightsCandidateGradients(this->GetWeightGradientsAt(2)),
0365 fWeightsCandidateStateGradients(this->GetWeightGradientsAt(6)),
0366 fCandidateBiasGradients(this->GetBiasGradientsAt(2)), fWeightsOutputGradients(this->GetWeightGradientsAt(3)),
0367 fWeightsOutputStateGradients(this->GetWeightGradientsAt(7)), fOutputBiasGradients(this->GetBiasGradientsAt(3))
0368 {
0369 for (size_t i = 0; i < timeSteps; ++i) {
0370 fDerivativesInput.emplace_back(batchSize, stateSize);
0371 fDerivativesForget.emplace_back(batchSize, stateSize);
0372 fDerivativesCandidate.emplace_back(batchSize, stateSize);
0373 fDerivativesOutput.emplace_back(batchSize, stateSize);
0374 input_gate_value.emplace_back(batchSize, stateSize);
0375 forget_gate_value.emplace_back(batchSize, stateSize);
0376 candidate_gate_value.emplace_back(batchSize, stateSize);
0377 output_gate_value.emplace_back(batchSize, stateSize);
0378 cell_value.emplace_back(batchSize, stateSize);
0379 }
0380 Architecture_t::InitializeLSTMTensors(this);
0381 }
0382
0383
0384 template <typename Architecture_t>
0385 TBasicLSTMLayer<Architecture_t>::TBasicLSTMLayer(const TBasicLSTMLayer &layer)
0386 : VGeneralLayer<Architecture_t>(layer),
0387 fStateSize(layer.fStateSize),
0388 fCellSize(layer.fCellSize),
0389 fTimeSteps(layer.fTimeSteps),
0390 fRememberState(layer.fRememberState),
0391 fReturnSequence(layer.fReturnSequence),
0392 fF1(layer.GetActivationFunctionF1()),
0393 fF2(layer.GetActivationFunctionF2()),
0394 fInputValue(layer.GetBatchSize(), layer.GetStateSize()),
0395 fCandidateValue(layer.GetBatchSize(), layer.GetStateSize()),
0396 fForgetValue(layer.GetBatchSize(), layer.GetStateSize()),
0397 fOutputValue(layer.GetBatchSize(), layer.GetStateSize()),
0398 fState(layer.GetBatchSize(), layer.GetStateSize()),
0399 fCell(layer.GetBatchSize(), layer.GetCellSize()),
0400 fWeightsInputGate(this->GetWeightsAt(0)),
0401 fWeightsInputGateState(this->GetWeightsAt(4)),
0402 fInputGateBias(this->GetBiasesAt(0)),
0403 fWeightsForgetGate(this->GetWeightsAt(1)),
0404 fWeightsForgetGateState(this->GetWeightsAt(5)),
0405 fForgetGateBias(this->GetBiasesAt(1)),
0406 fWeightsCandidate(this->GetWeightsAt(2)),
0407 fWeightsCandidateState(this->GetWeightsAt(6)),
0408 fCandidateBias(this->GetBiasesAt(2)),
0409 fWeightsOutputGate(this->GetWeightsAt(3)),
0410 fWeightsOutputGateState(this->GetWeightsAt(7)),
0411 fOutputGateBias(this->GetBiasesAt(3)),
0412 fWeightsInputGradients(this->GetWeightGradientsAt(0)),
0413 fWeightsInputStateGradients(this->GetWeightGradientsAt(4)),
0414 fInputBiasGradients(this->GetBiasGradientsAt(0)),
0415 fWeightsForgetGradients(this->GetWeightGradientsAt(1)),
0416 fWeightsForgetStateGradients(this->GetWeightGradientsAt(5)),
0417 fForgetBiasGradients(this->GetBiasGradientsAt(1)),
0418 fWeightsCandidateGradients(this->GetWeightGradientsAt(2)),
0419 fWeightsCandidateStateGradients(this->GetWeightGradientsAt(6)),
0420 fCandidateBiasGradients(this->GetBiasGradientsAt(2)),
0421 fWeightsOutputGradients(this->GetWeightGradientsAt(3)),
0422 fWeightsOutputStateGradients(this->GetWeightGradientsAt(7)),
0423 fOutputBiasGradients(this->GetBiasGradientsAt(3))
0424 {
0425 for (size_t i = 0; i < fTimeSteps; ++i) {
0426 fDerivativesInput.emplace_back(layer.GetBatchSize(), layer.GetStateSize());
0427 Architecture_t::Copy(fDerivativesInput[i], layer.GetInputDerivativesAt(i));
0428
0429 fDerivativesForget.emplace_back(layer.GetBatchSize(), layer.GetStateSize());
0430 Architecture_t::Copy(fDerivativesForget[i], layer.GetForgetDerivativesAt(i));
0431
0432 fDerivativesCandidate.emplace_back(layer.GetBatchSize(), layer.GetStateSize());
0433 Architecture_t::Copy(fDerivativesCandidate[i], layer.GetCandidateDerivativesAt(i));
0434
0435 fDerivativesOutput.emplace_back(layer.GetBatchSize(), layer.GetStateSize());
0436 Architecture_t::Copy(fDerivativesOutput[i], layer.GetOutputDerivativesAt(i));
0437
0438 input_gate_value.emplace_back(layer.GetBatchSize(), layer.GetStateSize());
0439 Architecture_t::Copy(input_gate_value[i], layer.GetInputGateTensorAt(i));
0440
0441 forget_gate_value.emplace_back(layer.GetBatchSize(), layer.GetStateSize());
0442 Architecture_t::Copy(forget_gate_value[i], layer.GetForgetGateTensorAt(i));
0443
0444 candidate_gate_value.emplace_back(layer.GetBatchSize(), layer.GetStateSize());
0445 Architecture_t::Copy(candidate_gate_value[i], layer.GetCandidateGateTensorAt(i));
0446
0447 output_gate_value.emplace_back(layer.GetBatchSize(), layer.GetStateSize());
0448 Architecture_t::Copy(output_gate_value[i], layer.GetOutputGateTensorAt(i));
0449
0450 cell_value.emplace_back(layer.GetBatchSize(), layer.GetStateSize());
0451 Architecture_t::Copy(cell_value[i], layer.GetCellTensorAt(i));
0452 }
0453
0454
0455 Architecture_t::Copy(fState, layer.GetState());
0456 Architecture_t::Copy(fCell, layer.GetCell());
0457
0458
0459 Architecture_t::Copy(fInputValue, layer.GetInputGateValue());
0460 Architecture_t::Copy(fCandidateValue, layer.GetCandidateValue());
0461 Architecture_t::Copy(fForgetValue, layer.GetForgetGateValue());
0462 Architecture_t::Copy(fOutputValue, layer.GetOutputGateValue());
0463
0464 Architecture_t::InitializeLSTMTensors(this);
0465 }
0466
0467
0468 template <typename Architecture_t>
0469 void TBasicLSTMLayer<Architecture_t>::Initialize()
0470 {
0471 VGeneralLayer<Architecture_t>::Initialize();
0472
0473 Architecture_t::InitializeLSTMDescriptors(fDescriptors, this);
0474 Architecture_t::InitializeLSTMWorkspace(fWorkspace, fDescriptors, this);
0475 }
0476
0477
0478 template <typename Architecture_t>
0479 auto inline TBasicLSTMLayer<Architecture_t>::InputGate(const Matrix_t &input, Matrix_t &di)
0480 -> void
0481 {
0482
0483
0484
0485 const DNN::EActivationFunction fInp = this->GetActivationFunctionF1();
0486 Matrix_t tmpState(fInputValue.GetNrows(), fInputValue.GetNcols());
0487 Architecture_t::MultiplyTranspose(tmpState, fState, fWeightsInputGateState);
0488 Architecture_t::MultiplyTranspose(fInputValue, input, fWeightsInputGate);
0489 Architecture_t::ScaleAdd(fInputValue, tmpState);
0490 Architecture_t::AddRowWise(fInputValue, fInputGateBias);
0491 DNN::evaluateDerivativeMatrix<Architecture_t>(di, fInp, fInputValue);
0492 DNN::evaluateMatrix<Architecture_t>(fInputValue, fInp);
0493 }
0494
0495
0496 template <typename Architecture_t>
0497 auto inline TBasicLSTMLayer<Architecture_t>::ForgetGate(const Matrix_t &input, Matrix_t &df)
0498 -> void
0499 {
0500
0501
0502
0503 const DNN::EActivationFunction fFor = this->GetActivationFunctionF1();
0504 Matrix_t tmpState(fForgetValue.GetNrows(), fForgetValue.GetNcols());
0505 Architecture_t::MultiplyTranspose(tmpState, fState, fWeightsForgetGateState);
0506 Architecture_t::MultiplyTranspose(fForgetValue, input, fWeightsForgetGate);
0507 Architecture_t::ScaleAdd(fForgetValue, tmpState);
0508 Architecture_t::AddRowWise(fForgetValue, fForgetGateBias);
0509 DNN::evaluateDerivativeMatrix<Architecture_t>(df, fFor, fForgetValue);
0510 DNN::evaluateMatrix<Architecture_t>(fForgetValue, fFor);
0511 }
0512
0513
0514 template <typename Architecture_t>
0515 auto inline TBasicLSTMLayer<Architecture_t>::CandidateValue(const Matrix_t &input, Matrix_t &dc)
0516 -> void
0517 {
0518
0519
0520
0521 const DNN::EActivationFunction fCan = this->GetActivationFunctionF2();
0522 Matrix_t tmpState(fCandidateValue.GetNrows(), fCandidateValue.GetNcols());
0523 Architecture_t::MultiplyTranspose(tmpState, fState, fWeightsCandidateState);
0524 Architecture_t::MultiplyTranspose(fCandidateValue, input, fWeightsCandidate);
0525 Architecture_t::ScaleAdd(fCandidateValue, tmpState);
0526 Architecture_t::AddRowWise(fCandidateValue, fCandidateBias);
0527 DNN::evaluateDerivativeMatrix<Architecture_t>(dc, fCan, fCandidateValue);
0528 DNN::evaluateMatrix<Architecture_t>(fCandidateValue, fCan);
0529 }
0530
0531
0532 template <typename Architecture_t>
0533 auto inline TBasicLSTMLayer<Architecture_t>::OutputGate(const Matrix_t &input, Matrix_t &dout)
0534 -> void
0535 {
0536
0537
0538
0539 const DNN::EActivationFunction fOut = this->GetActivationFunctionF1();
0540 Matrix_t tmpState(fOutputValue.GetNrows(), fOutputValue.GetNcols());
0541 Architecture_t::MultiplyTranspose(tmpState, fState, fWeightsOutputGateState);
0542 Architecture_t::MultiplyTranspose(fOutputValue, input, fWeightsOutputGate);
0543 Architecture_t::ScaleAdd(fOutputValue, tmpState);
0544 Architecture_t::AddRowWise(fOutputValue, fOutputGateBias);
0545 DNN::evaluateDerivativeMatrix<Architecture_t>(dout, fOut, fOutputValue);
0546 DNN::evaluateMatrix<Architecture_t>(fOutputValue, fOut);
0547 }
0548
0549
0550
0551
0552 template <typename Architecture_t>
0553 auto inline TBasicLSTMLayer<Architecture_t>::Forward(Tensor_t &input, bool isTraining )
0554 -> void
0555 {
0556
0557
0558 if (Architecture_t::IsCudnn()) {
0559
0560
0561 assert(input.GetStrides()[1] == this->GetInputSize());
0562
0563 Tensor_t &x = this->fX;
0564 Tensor_t &y = this->fY;
0565 Architecture_t::Rearrange(x, input);
0566
0567
0568 const auto &weights = this->GetWeightsTensor();
0569
0570
0571
0572
0573 auto &hx = this->fState;
0574
0575 auto &cx = this->fCell;
0576
0577 auto &hy = this->fState;
0578 auto &cy = this->fCell;
0579
0580 auto & rnnDesc = static_cast<RNNDescriptors_t &>(*fDescriptors);
0581 auto & rnnWork = static_cast<RNNWorkspace_t &>(*fWorkspace);
0582
0583 Architecture_t::RNNForward(x, hx, cx, weights, y, hy, cy, rnnDesc, rnnWork, isTraining);
0584
0585 if (fReturnSequence) {
0586 Architecture_t::Rearrange(this->GetOutput(), y);
0587 } else {
0588
0589 Tensor_t tmp = (y.At(y.GetShape()[0] - 1)).Reshape({y.GetShape()[1], 1, y.GetShape()[2]});
0590 Architecture_t::Copy(this->GetOutput(), tmp);
0591 }
0592
0593 return;
0594 }
0595
0596
0597
0598
0599
0600
0601
0602
0603 Tensor_t arrInput( fTimeSteps, this->GetBatchSize(), this->GetInputWidth());
0604
0605
0606 Architecture_t::Rearrange(arrInput, input);
0607
0608 Tensor_t arrOutput ( fTimeSteps, this->GetBatchSize(), fStateSize);
0609
0610
0611 if (!this->fRememberState) {
0612 InitState(DNN::EInitialization::kZero);
0613 }
0614
0615
0616
0617 for (size_t t = 0; t < fTimeSteps; ++t) {
0618
0619 Matrix_t arrInputMt = arrInput[t];
0620 InputGate(arrInputMt, fDerivativesInput[t]);
0621 ForgetGate(arrInputMt, fDerivativesForget[t]);
0622 CandidateValue(arrInputMt, fDerivativesCandidate[t]);
0623 OutputGate(arrInputMt, fDerivativesOutput[t]);
0624
0625 Architecture_t::Copy(this->GetInputGateTensorAt(t), fInputValue);
0626 Architecture_t::Copy(this->GetForgetGateTensorAt(t), fForgetValue);
0627 Architecture_t::Copy(this->GetCandidateGateTensorAt(t), fCandidateValue);
0628 Architecture_t::Copy(this->GetOutputGateTensorAt(t), fOutputValue);
0629
0630 CellForward(fInputValue, fForgetValue, fCandidateValue, fOutputValue);
0631 Matrix_t arrOutputMt = arrOutput[t];
0632 Architecture_t::Copy(arrOutputMt, fState);
0633 Architecture_t::Copy(this->GetCellTensorAt(t), fCell);
0634 }
0635
0636
0637 if (fReturnSequence)
0638 Architecture_t::Rearrange(this->GetOutput(), arrOutput);
0639 else {
0640
0641 Tensor_t tmp = arrOutput.At(fTimeSteps - 1);
0642
0643
0644 tmp = tmp.Reshape( {tmp.GetShape()[0], tmp.GetShape()[1], 1});
0645 assert(tmp.GetSize() == this->GetOutput().GetSize());
0646 assert( tmp.GetShape()[0] == this->GetOutput().GetShape()[2]);
0647 Architecture_t::Rearrange(this->GetOutput(), tmp);
0648
0649 fY = arrOutput;
0650 }
0651 }
0652
0653
0654 template <typename Architecture_t>
0655 auto inline TBasicLSTMLayer<Architecture_t>::CellForward(Matrix_t &inputGateValues, const Matrix_t &forgetGateValues,
0656 const Matrix_t &candidateValues, const Matrix_t &outputGateValues)
0657 -> void
0658 {
0659
0660
0661 Architecture_t::Hadamard(fCell, forgetGateValues);
0662 Architecture_t::Hadamard(inputGateValues, candidateValues);
0663 Architecture_t::ScaleAdd(fCell, inputGateValues);
0664
0665 Matrix_t cache(fCell.GetNrows(), fCell.GetNcols());
0666 Architecture_t::Copy(cache, fCell);
0667
0668
0669 const DNN::EActivationFunction fAT = this->GetActivationFunctionF2();
0670 DNN::evaluateMatrix<Architecture_t>(cache, fAT);
0671
0672
0673
0674
0675 Architecture_t::Copy(fState, cache);
0676 Architecture_t::Hadamard(fState, outputGateValues);
0677 }
0678
0679
0680 template <typename Architecture_t>
0681 auto inline TBasicLSTMLayer<Architecture_t>::Backward(Tensor_t &gradients_backward,
0682 const Tensor_t &activations_backward)
0683 -> void
0684 {
0685
0686
0687 if (Architecture_t::IsCudnn()) {
0688
0689 Tensor_t &x = this->fX;
0690 Tensor_t &y = this->fY;
0691 Tensor_t &dx = this->fDx;
0692 Tensor_t &dy = this->fDy;
0693
0694
0695 assert(activations_backward.GetStrides()[1] == this->GetInputSize());
0696
0697 Architecture_t::Rearrange(x, activations_backward);
0698
0699 if (!fReturnSequence) {
0700
0701
0702 Architecture_t::InitializeZero(dy);
0703
0704
0705
0706
0707 Tensor_t tmp2 = dy.At(dy.GetShape()[0] - 1).Reshape({dy.GetShape()[1], 1, dy.GetShape()[2]});
0708
0709
0710 Architecture_t::Copy(tmp2, this->GetActivationGradients());
0711 } else {
0712 Architecture_t::Rearrange(y, this->GetOutput());
0713 Architecture_t::Rearrange(dy, this->GetActivationGradients());
0714 }
0715
0716
0717
0718
0719 const auto &weights = this->GetWeightsTensor();
0720 auto &weightGradients = this->GetWeightGradientsTensor();
0721
0722
0723 Architecture_t::InitializeZero(weightGradients);
0724
0725
0726 auto &hx = this->GetState();
0727 auto &cx = this->GetCell();
0728
0729
0730 auto &dhy = hx;
0731 auto &dcy = cx;
0732 auto &dhx = hx;
0733 auto &dcx = cx;
0734
0735 auto & rnnDesc = static_cast<RNNDescriptors_t &>(*fDescriptors);
0736 auto & rnnWork = static_cast<RNNWorkspace_t &>(*fWorkspace);
0737
0738 Architecture_t::RNNBackward(x, hx, cx, y, dy, dhy, dcy, weights, dx, dhx, dcx, weightGradients, rnnDesc, rnnWork);
0739
0740
0741
0742 if (gradients_backward.GetSize() != 0)
0743 Architecture_t::Rearrange(gradients_backward, dx);
0744
0745 return;
0746 }
0747
0748
0749
0750
0751
0752 Matrix_t state_gradients_backward(this->GetBatchSize(), fStateSize);
0753 DNN::initialize<Architecture_t>(state_gradients_backward, DNN::EInitialization::kZero);
0754
0755
0756 Matrix_t cell_gradients_backward(this->GetBatchSize(), fStateSize);
0757 DNN::initialize<Architecture_t>(cell_gradients_backward, DNN::EInitialization::kZero);
0758
0759
0760 bool dummy = false;
0761 if (gradients_backward.GetSize() == 0 || gradients_backward[0].GetNrows() == 0 || gradients_backward[0].GetNcols() == 0) {
0762 dummy = true;
0763 }
0764
0765
0766 Tensor_t arr_gradients_backward ( fTimeSteps, this->GetBatchSize(), this->GetInputSize());
0767
0768
0769
0770
0771 Tensor_t arr_activations_backward ( fTimeSteps, this->GetBatchSize(), this->GetInputSize());
0772
0773 Architecture_t::Rearrange(arr_activations_backward, activations_backward);
0774
0775
0776
0777 Tensor_t arr_output ( fTimeSteps, this->GetBatchSize(), fStateSize);
0778
0779 Matrix_t initState(this->GetBatchSize(), fCellSize);
0780 DNN::initialize<Architecture_t>(initState, DNN::EInitialization::kZero);
0781
0782
0783
0784 Tensor_t arr_actgradients(fTimeSteps, this->GetBatchSize(), fStateSize);
0785
0786 if (fReturnSequence) {
0787 Architecture_t::Rearrange(arr_output, this->GetOutput());
0788 Architecture_t::Rearrange(arr_actgradients, this->GetActivationGradients());
0789 } else {
0790
0791 arr_output = fY;
0792 Architecture_t::InitializeZero(arr_actgradients);
0793
0794 Tensor_t tmp_grad = arr_actgradients.At(fTimeSteps - 1).Reshape( {this->GetBatchSize(), fStateSize, 1});
0795 assert(tmp_grad.GetSize() == this->GetActivationGradients().GetSize());
0796 assert(tmp_grad.GetShape()[0] == this->GetActivationGradients().GetShape()[2]);
0797
0798 Architecture_t::Rearrange(tmp_grad, this->GetActivationGradients());
0799 }
0800
0801
0802
0803
0804
0805 fWeightsInputGradients.Zero();
0806 fWeightsInputStateGradients.Zero();
0807 fInputBiasGradients.Zero();
0808
0809
0810 fWeightsForgetGradients.Zero();
0811 fWeightsForgetStateGradients.Zero();
0812 fForgetBiasGradients.Zero();
0813
0814
0815 fWeightsCandidateGradients.Zero();
0816 fWeightsCandidateStateGradients.Zero();
0817 fCandidateBiasGradients.Zero();
0818
0819
0820 fWeightsOutputGradients.Zero();
0821 fWeightsOutputStateGradients.Zero();
0822 fOutputBiasGradients.Zero();
0823
0824
0825 for (size_t t = fTimeSteps; t > 0; t--) {
0826
0827 Architecture_t::ScaleAdd(state_gradients_backward, arr_actgradients[t-1]);
0828 if (t > 1) {
0829 const Matrix_t &prevStateActivations = arr_output[t-2];
0830 const Matrix_t &prevCellActivations = this->GetCellTensorAt(t-2);
0831
0832 Matrix_t dx = arr_gradients_backward[t-1];
0833 CellBackward(state_gradients_backward, cell_gradients_backward,
0834 prevStateActivations, prevCellActivations,
0835 this->GetInputGateTensorAt(t-1), this->GetForgetGateTensorAt(t-1),
0836 this->GetCandidateGateTensorAt(t-1), this->GetOutputGateTensorAt(t-1),
0837 arr_activations_backward[t-1], dx,
0838 fDerivativesInput[t-1], fDerivativesForget[t-1],
0839 fDerivativesCandidate[t-1], fDerivativesOutput[t-1], t-1);
0840 } else {
0841 const Matrix_t &prevStateActivations = initState;
0842 const Matrix_t &prevCellActivations = initState;
0843 Matrix_t dx = arr_gradients_backward[t-1];
0844 CellBackward(state_gradients_backward, cell_gradients_backward,
0845 prevStateActivations, prevCellActivations,
0846 this->GetInputGateTensorAt(t-1), this->GetForgetGateTensorAt(t-1),
0847 this->GetCandidateGateTensorAt(t-1), this->GetOutputGateTensorAt(t-1),
0848 arr_activations_backward[t-1], dx,
0849 fDerivativesInput[t-1], fDerivativesForget[t-1],
0850 fDerivativesCandidate[t-1], fDerivativesOutput[t-1], t-1);
0851 }
0852 }
0853
0854 if (!dummy) {
0855 Architecture_t::Rearrange(gradients_backward, arr_gradients_backward );
0856 }
0857
0858 }
0859
0860
0861
0862 template <typename Architecture_t>
0863 auto inline TBasicLSTMLayer<Architecture_t>::CellBackward(Matrix_t & state_gradients_backward,
0864 Matrix_t & cell_gradients_backward,
0865 const Matrix_t & precStateActivations, const Matrix_t & precCellActivations,
0866 const Matrix_t & input_gate, const Matrix_t & forget_gate,
0867 const Matrix_t & candidate_gate, const Matrix_t & output_gate,
0868 const Matrix_t & input, Matrix_t & input_gradient,
0869 Matrix_t &di, Matrix_t &df, Matrix_t &dc, Matrix_t &dout,
0870 size_t t)
0871 -> Matrix_t &
0872 {
0873
0874
0875
0876
0877
0878 const DNN::EActivationFunction fAT = this->GetActivationFunctionF2();
0879 Matrix_t cell_gradient(this->GetCellTensorAt(t).GetNrows(), this->GetCellTensorAt(t).GetNcols());
0880 DNN::evaluateDerivativeMatrix<Architecture_t>(cell_gradient, fAT, this->GetCellTensorAt(t));
0881
0882
0883 Matrix_t cell_tanh(this->GetCellTensorAt(t).GetNrows(), this->GetCellTensorAt(t).GetNcols());
0884 Architecture_t::Copy(cell_tanh, this->GetCellTensorAt(t));
0885 DNN::evaluateMatrix<Architecture_t>(cell_tanh, fAT);
0886
0887 return Architecture_t::LSTMLayerBackward(state_gradients_backward, cell_gradients_backward,
0888 fWeightsInputGradients, fWeightsForgetGradients, fWeightsCandidateGradients,
0889 fWeightsOutputGradients, fWeightsInputStateGradients, fWeightsForgetStateGradients,
0890 fWeightsCandidateStateGradients, fWeightsOutputStateGradients, fInputBiasGradients, fForgetBiasGradients,
0891 fCandidateBiasGradients, fOutputBiasGradients, di, df, dc, dout,
0892 precStateActivations, precCellActivations,
0893 input_gate, forget_gate, candidate_gate, output_gate,
0894 fWeightsInputGate, fWeightsForgetGate, fWeightsCandidate, fWeightsOutputGate,
0895 fWeightsInputGateState, fWeightsForgetGateState, fWeightsCandidateState,
0896 fWeightsOutputGateState, input, input_gradient,
0897 cell_gradient, cell_tanh);
0898 }
0899
0900
0901 template <typename Architecture_t>
0902 auto TBasicLSTMLayer<Architecture_t>::InitState(DNN::EInitialization )
0903 -> void
0904 {
0905 DNN::initialize<Architecture_t>(this->GetState(), DNN::EInitialization::kZero);
0906 DNN::initialize<Architecture_t>(this->GetCell(), DNN::EInitialization::kZero);
0907 }
0908
0909
0910 template<typename Architecture_t>
0911 auto TBasicLSTMLayer<Architecture_t>::Print() const
0912 -> void
0913 {
0914 std::cout << " LSTM Layer: \t ";
0915 std::cout << " (NInput = " << this->GetInputSize();
0916 std::cout << ", NState = " << this->GetStateSize();
0917 std::cout << ", NTime = " << this->GetTimeSteps() << " )";
0918 std::cout << "\tOutput = ( " << this->GetOutput().GetFirstSize() << " , " << this->GetOutput()[0].GetNrows() << " , " << this->GetOutput()[0].GetNcols() << " )\n";
0919 }
0920
0921
0922 template <typename Architecture_t>
0923 auto inline TBasicLSTMLayer<Architecture_t>::AddWeightsXMLTo(void *parent)
0924 -> void
0925 {
0926 auto layerxml = gTools().xmlengine().NewChild(parent, nullptr, "LSTMLayer");
0927
0928
0929 gTools().xmlengine().NewAttr(layerxml, nullptr, "StateSize", gTools().StringFromInt(this->GetStateSize()));
0930 gTools().xmlengine().NewAttr(layerxml, nullptr, "CellSize", gTools().StringFromInt(this->GetCellSize()));
0931 gTools().xmlengine().NewAttr(layerxml, nullptr, "InputSize", gTools().StringFromInt(this->GetInputSize()));
0932 gTools().xmlengine().NewAttr(layerxml, nullptr, "TimeSteps", gTools().StringFromInt(this->GetTimeSteps()));
0933 gTools().xmlengine().NewAttr(layerxml, nullptr, "RememberState", gTools().StringFromInt(this->DoesRememberState()));
0934 gTools().xmlengine().NewAttr(layerxml, nullptr, "ReturnSequence", gTools().StringFromInt(this->DoesReturnSequence()));
0935
0936
0937 this->WriteMatrixToXML(layerxml, "InputWeights", this->GetWeightsAt(0));
0938 this->WriteMatrixToXML(layerxml, "InputStateWeights", this->GetWeightsAt(1));
0939 this->WriteMatrixToXML(layerxml, "InputBiases", this->GetBiasesAt(0));
0940 this->WriteMatrixToXML(layerxml, "ForgetWeights", this->GetWeightsAt(2));
0941 this->WriteMatrixToXML(layerxml, "ForgetStateWeights", this->GetWeightsAt(3));
0942 this->WriteMatrixToXML(layerxml, "ForgetBiases", this->GetBiasesAt(1));
0943 this->WriteMatrixToXML(layerxml, "CandidateWeights", this->GetWeightsAt(4));
0944 this->WriteMatrixToXML(layerxml, "CandidateStateWeights", this->GetWeightsAt(5));
0945 this->WriteMatrixToXML(layerxml, "CandidateBiases", this->GetBiasesAt(2));
0946 this->WriteMatrixToXML(layerxml, "OuputWeights", this->GetWeightsAt(6));
0947 this->WriteMatrixToXML(layerxml, "OutputStateWeights", this->GetWeightsAt(7));
0948 this->WriteMatrixToXML(layerxml, "OutputBiases", this->GetBiasesAt(3));
0949 }
0950
0951
0952 template <typename Architecture_t>
0953 auto inline TBasicLSTMLayer<Architecture_t>::ReadWeightsFromXML(void *parent)
0954 -> void
0955 {
0956
0957 this->ReadMatrixXML(parent, "InputWeights", this->GetWeightsAt(0));
0958 this->ReadMatrixXML(parent, "InputStateWeights", this->GetWeightsAt(1));
0959 this->ReadMatrixXML(parent, "InputBiases", this->GetBiasesAt(0));
0960 this->ReadMatrixXML(parent, "ForgetWeights", this->GetWeightsAt(2));
0961 this->ReadMatrixXML(parent, "ForgetStateWeights", this->GetWeightsAt(3));
0962 this->ReadMatrixXML(parent, "ForgetBiases", this->GetBiasesAt(1));
0963 this->ReadMatrixXML(parent, "CandidateWeights", this->GetWeightsAt(4));
0964 this->ReadMatrixXML(parent, "CandidateStateWeights", this->GetWeightsAt(5));
0965 this->ReadMatrixXML(parent, "CandidateBiases", this->GetBiasesAt(2));
0966 this->ReadMatrixXML(parent, "OuputWeights", this->GetWeightsAt(6));
0967 this->ReadMatrixXML(parent, "OutputStateWeights", this->GetWeightsAt(7));
0968 this->ReadMatrixXML(parent, "OutputBiases", this->GetBiasesAt(3));
0969 }
0970
0971 }
0972 }
0973 }
0974
0975 #endif