2 * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ConcatLayer.h"
19 #include "OperationUtils.h"
21 #include <cker/operation/Concatenation.h>
32 ConcatLayer::ConcatLayer() : _inputs(), _output(nullptr), _axis(0)
37 template <typename T> void ConcatLayer::concatenationGeneral()
39 uint32_t num_inputs = _inputs.size();
41 nnfw::cker::ConcatenationParams op_params;
42 op_params.axis = _axis;
43 op_params.inputs_count = num_inputs;
45 std::vector<nnfw::cker::Shape *> inputDimsPtr;
46 std::vector<nnfw::cker::Shape> inputDims;
47 inputDimsPtr.reserve(num_inputs);
48 inputDims.reserve(num_inputs);
50 for (uint32_t i = 0; i < num_inputs; i++)
52 inputDims.push_back(getTensorShape(_inputs[i]));
53 inputDimsPtr.push_back(&inputDims[i]);
56 std::vector<const T *> inputDataPtrs;
58 for (const auto input : _inputs)
60 inputDataPtrs.emplace_back(reinterpret_cast<const T *>(input->buffer()));
63 nnfw::cker::Concatenation<T>(op_params, inputDimsPtr.data(), inputDataPtrs.data(),
64 getTensorShape(_output), reinterpret_cast<T *>(_output->buffer()));
66 void ConcatLayer::concatenationQuant8()
68 uint32_t num_inputs = _inputs.size();
70 std::vector<int32_t> input_zeropoints(num_inputs);
71 std::vector<float> input_scales(num_inputs);
72 for (uint32_t i = 0; i < num_inputs; i++)
74 input_zeropoints[i] = _inputs[i]->data_offset();
75 input_scales[i] = _inputs[i]->data_scale();
78 nnfw::cker::ConcatenationParams op_params;
79 op_params.axis = _axis;
80 op_params.inputs_count = num_inputs;
81 op_params.input_zeropoint = input_zeropoints.data();
82 op_params.input_scale = input_scales.data();
83 op_params.output_zeropoint = _output->data_offset();
84 op_params.output_scale = _output->data_scale();
86 std::vector<nnfw::cker::Shape *> inputDimsPtr;
87 std::vector<nnfw::cker::Shape> inputDims;
88 inputDimsPtr.reserve(num_inputs);
89 inputDims.reserve(num_inputs);
90 for (uint32_t i = 0; i < num_inputs; i++)
92 inputDims.push_back(getTensorShape(_inputs[i]));
93 inputDimsPtr.push_back(&inputDims[i]);
96 std::vector<const uint8_t *> inputDataPtrs;
97 for (const auto input : _inputs)
99 inputDataPtrs.emplace_back(reinterpret_cast<const uint8_t *>(input->buffer()));
102 nnfw::cker::ConcatenationWithScaling(op_params, inputDimsPtr.data(), inputDataPtrs.data(),
103 getTensorShape(_output),
104 reinterpret_cast<uint8_t *>(_output->buffer()));
107 void ConcatLayer::configure(const std::vector<const IPortableTensor *> &inputs, int32_t axis,
108 IPortableTensor *output)
110 assert(inputs.size() > 0);
111 assert(output != nullptr);
118 void ConcatLayer::run()
120 if (_output->data_type() == OperandType::FLOAT32)
122 concatenationGeneral<float>();
124 else if (_output->data_type() == OperandType::QUANT_UINT8_ASYMM)
126 concatenationQuant8();
128 else if (_output->data_type() == OperandType::INT32)
130 concatenationGeneral<int32_t>();
132 else if (_output->data_type() == OperandType::INT64)
134 concatenationGeneral<int64_t>();
137 throw std::runtime_error("Concat: unsupported data type");
142 } // namespace backend