2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2019 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include "kernels/Concatenation.h"
19 #include "kernels/Utils.h"
21 #include <tensorflow/lite/kernels/internal/reference/concatenation.h>
25 namespace luci_interpreter
30 Concatenation::Concatenation(std::vector<const Tensor *> inputs, Tensor *output,
31 const ConcatenationParams ¶ms)
32 : KernelWithParams<ConcatenationParams>(std::move(inputs), {output}, params)
36 void Concatenation::configure()
38 const int num_inputs = _inputs.size();
39 LUCI_INTERPRETER_CHECK(num_inputs > 0);
40 const Tensor *t0 = _inputs[0];
42 // TODO: Support concat with fused activation function
43 LUCI_INTERPRETER_CHECK(params().activation == luci::FusedActFunc::NONE);
45 int axis = _params.axis;
47 axis += t0->shape().num_dims();
48 LUCI_INTERPRETER_CHECK(axis >= 0 && axis < t0->shape().num_dims());
50 int32_t sum_axis = t0->shape().dim(axis);
51 for (int i = 1; i < num_inputs; ++i)
53 const Tensor *tensor = _inputs[i];
54 LUCI_INTERPRETER_CHECK(tensor->element_type() == t0->element_type());
55 LUCI_INTERPRETER_CHECK(tensor->shape().num_dims() == t0->shape().num_dims());
56 for (int d = 0; d < t0->shape().num_dims(); ++d)
60 sum_axis += tensor->shape().dim(axis);
64 LUCI_INTERPRETER_CHECK(tensor->shape().dim(d) == t0->shape().dim(d));
69 Shape output_shape = t0->shape();
70 output_shape.dim(axis) = sum_axis;
72 // TODO S8 type needs more checking: quantization parameters of all input tensors and the output
73 // tensor should be the same. Note that there is no such requirement for U8 type.
74 if (t0->element_type() == DataType::S8)
75 throw std::runtime_error("Unsupported type.");
77 output()->resize(output_shape);
80 void Concatenation::execute() const
82 switch (_inputs[0]->element_type())
84 case DataType::FLOAT32:
91 evalGeneric<int8_t>();
94 evalGeneric<int32_t>();
97 evalGeneric<int64_t>();
100 throw std::runtime_error("Unsupported type.");
104 template <typename T> void Concatenation::evalGeneric() const
106 int axis = _params.axis;
108 axis += output()->shape().num_dims();
110 VectorOfTensors<T, true> inputs(_inputs);
111 tflite::ConcatenationParams params{};
113 params.inputs_count = _inputs.size();
114 tflite::reference_ops::Concatenation(params, inputs.shapes(), inputs.data(),
115 getTensorShape(output()), getTensorData<T>(output()));
118 void Concatenation::evalQuantized() const
120 int axis = _params.axis;
122 axis += output()->shape().num_dims();
124 VectorOfQuantizedTensors<true> inputs(_inputs);
125 tflite::ConcatenationParams params{};
127 params.input_zeropoint = inputs.zero_point();
128 params.input_scale = inputs.scale();
129 params.inputs_count = _inputs.size();
130 params.output_zeropoint = output()->zero_point();
131 params.output_scale = output()->scale();
133 tflite::reference_ops::ConcatenationWithScaling(params, inputs.shapes(), inputs.data(),
134 getTensorShape(output()),
135 getTensorData<uint8_t>(output()));
138 } // namespace kernels
139 } // namespace luci_interpreter