Imported Upstream version 1.25.0
[platform/core/ml/nnfw.git] / runtime / onert / backend / train / ops / FullyConnectedLayer.h
1 /*
2  * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #ifndef __ONERT_BACKEND_TRAIN_OPS_FULLYCONNECTEDLAYER_H__
18 #define __ONERT_BACKEND_TRAIN_OPS_FULLYCONNECTEDLAYER_H__
19
20 #include "../ExternalContext.h"
21 #include "../Tensor.h"
22
23 #include <exec/train/ITrainableFunction.h>
24 #include <ops/FullyConnectedLayer.h>
25
26 namespace onert
27 {
28 namespace backend
29 {
30 namespace train
31 {
32 namespace ops
33 {
34
35 class FullyConnectedLayer : public exec::train::ITrainableFunction,
36                             public cpu::ops::FullyConnectedLayer
37 {
38 public:
39   FullyConnectedLayer();
40   ~FullyConnectedLayer();
41
42 public:
43   void configure(const IPortableTensor *input, const IPortableTensor *weights,
44                  const IPortableTensor *bias, IPortableTensor *output, IPortableTensor *deriv_input,
45                  IPortableTensor *grad_weights, IPortableTensor *grad_bias,
46                  const IPortableTensor *deriv_output, ir::Activation activation,
47                  ir::FullyConnectedWeightsFormat weights_format,
48                  const std::shared_ptr<train::ExternalContext> &external_context);
49
50   void forward(bool training) override;
51   void backward() override;
52
53 private:
54   void backwardFloat32();
55
56 private:
57   IPortableTensor *_grad_weights;
58   IPortableTensor *_grad_bias;
59   IPortableTensor *_deriv_input;
60   const IPortableTensor *_deriv_output;
61
62   // TODO Optimize memory
63   std::unique_ptr<Tensor> _transposed_weights;
64   std::unique_ptr<Tensor> _transposed_input;
65   std::unique_ptr<Tensor> _transposed_deriv_output;
66   std::unique_ptr<Tensor> _act_deriv_output;
67 };
68
69 } // namespace ops
70 } // namespace train
71 } // namespace backend
72 } // namespace onert
73
74 #endif // __ONERT_BACKEND_TRAIN_OPS_FULLYCONNECTEDLAYER_H__