2 * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __ONERT_BACKEND_TRAIN_OPS_FULLYCONNECTEDLAYER_H__
18 #define __ONERT_BACKEND_TRAIN_OPS_FULLYCONNECTEDLAYER_H__
20 #include "../ExternalContext.h"
21 #include "../Tensor.h"
23 #include <exec/train/ITrainableFunction.h>
24 #include <ops/FullyConnectedLayer.h>
35 class FullyConnectedLayer : public exec::train::ITrainableFunction,
36 public cpu::ops::FullyConnectedLayer
39 FullyConnectedLayer();
40 ~FullyConnectedLayer();
43 void configure(const IPortableTensor *input, const IPortableTensor *weights,
44 const IPortableTensor *bias, IPortableTensor *output, IPortableTensor *deriv_input,
45 IPortableTensor *grad_weights, IPortableTensor *grad_bias,
46 const IPortableTensor *deriv_output, ir::Activation activation,
47 ir::FullyConnectedWeightsFormat weights_format,
48 const std::shared_ptr<train::ExternalContext> &external_context);
50 void forward(bool training) override;
51 void backward() override;
54 void backwardFloat32();
57 IPortableTensor *_grad_weights;
58 IPortableTensor *_grad_bias;
59 IPortableTensor *_deriv_input;
60 const IPortableTensor *_deriv_output;
62 // TODO Optimize memory
63 std::unique_ptr<Tensor> _transposed_weights;
64 std::unique_ptr<Tensor> _transposed_input;
65 std::unique_ptr<Tensor> _transposed_deriv_output;
66 std::unique_ptr<Tensor> _act_deriv_output;
71 } // namespace backend
74 #endif // __ONERT_BACKEND_TRAIN_OPS_FULLYCONNECTEDLAYER_H__