Imported Upstream version 1.12.0
[platform/core/ml/nnfw.git] / runtime / onert / backend / ruy / ops / FullyConnectedLayer.cc
1 /*
2  * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "FullyConnectedLayer.h"
18
19 #include "../Tensor.h"
20 #include <ruy/operation/FullyConnected.h>
21 #include <ruy/TensorUtils.h>
22
23 namespace onert
24 {
25 namespace backend
26 {
27 namespace ruy
28 {
29 namespace ops
30 {
31
32 FullyConnectedLayer::FullyConnectedLayer()
33     : _input(nullptr), _weights(nullptr), _bias(nullptr), _output(nullptr),
34       _activation(ir::Activation::NONE), _external_context(nullptr)
35 {
36   // DO NOTHING
37 }
38
39 FullyConnectedLayer::~FullyConnectedLayer() = default;
40
41 void FullyConnectedLayer::fullyConnectedFloat32()
42 {
43   float output_activation_min = 0, output_activation_max = 0;
44   CalculateActivationRange(_activation, &output_activation_min, &output_activation_max);
45   nnfw::ruy::FullyConnectedParams op_params;
46
47   op_params.float_activation_min = output_activation_min;
48   op_params.float_activation_max = output_activation_max;
49   op_params.activation = convertActivationType(_activation);
50   op_params.lhs_cacheable = _weights->is_constant();
51   op_params.rhs_cacheable = _input->is_constant();
52
53   nnfw::ruy::FullyConnected(
54       op_params, getTensorShape(_input), reinterpret_cast<const float *>(_input->buffer()),
55       getTensorShape(_weights), reinterpret_cast<const float *>(_weights->buffer()),
56       getTensorShape(_bias), reinterpret_cast<const float *>(_bias ? _bias->buffer() : nullptr),
57       getTensorShape(_output), reinterpret_cast<float *>(_output->buffer()),
58       _external_context->ruy_context());
59 }
60
61 void FullyConnectedLayer::configure(const IPortableTensor *input, const IPortableTensor *weights,
62                                     const IPortableTensor *bias, ir::Activation activation,
63                                     ir::FullyConnectedWeightsFormat weights_format,
64                                     IPortableTensor *output,
65                                     const std::shared_ptr<ExternalContext> &external_context)
66 {
67   UNUSED_RELEASE(weights_format);
68   _input = input;
69   _weights = weights;
70   _bias = bias;
71   _activation = activation;
72   _output = output;
73   _external_context = external_context;
74 }
75
76 void FullyConnectedLayer::run()
77 {
78   if (_input->data_type() == OperandType::FLOAT32)
79   {
80     fullyConnectedFloat32();
81   }
82   else
83   {
84     throw std::runtime_error{"FullyConnected: unsupported data type"};
85   }
86 }
87
88 void FullyConnectedLayer::prepare()
89 {
90   if (_bias && _bias->is_constant())
91   {
92     const int bias_size = getTensorShape(_bias).FlatSize();
93     if (nnfw::ruy::IsZeroVector(reinterpret_cast<float *>(_bias->buffer()), bias_size))
94     {
95       _bias = nullptr;
96     }
97   }
98 }
99
100 } // namespace ops
101 } // namespace ruy
102 } // namespace backend
103 } // namespace onert