1 // SPDX-License-Identifier: Apache-2.0
3 * Copyright (C) 2020 Jijoong Moon <jijoong.moon@samsung.com>
5 * @file concat_layer.cpp
7 * @see https://github.com/nnstreamer/nntrainer
8 * @author Jijoong Moon <jijoong.moon@samsung.com>
9 * @bug No known bugs except for NYI items
10 * @brief This is Concat Layer Class for Neural Network
12 * @todo merge concat and split layer to a common implementation
15 #include <concat_layer.h>
17 #include <layer_context.h>
18 #include <nntrainer_error.h>
19 #include <nntrainer_log.h>
20 #include <node_exporter.h>
21 #include <tensor_dim.h>
22 #include <util_func.h>
25 ConcatLayer::ConcatLayer() : Layer(), leading_helper_dim(1) {}
27 static constexpr size_t SINGLE_INOUT_IDX = 0;
29 void ConcatLayer::finalize(InitLayerContext &context) {
30 auto &concat_dimension_prop = std::get<props::ConcatDimension>(concat_props);
31 /** for backward compatibility, default concat dimension will be channel */
32 /// @todo this is hacky way to force concat dimension to width if channel
33 /// dimension is taken, this is because recurrent realizer, return sequence
34 /// exploits concat layer but have no control over where to stack/axis
35 unsigned int concat_dimension =
36 context.getInputDimensions().front().channel() > 1 ? 3 : 1;
37 if (!concat_dimension_prop.empty())
38 concat_dimension = concat_dimension_prop.get();
41 * The concat is only done along the axis dimension.
42 * For example, consider 2 inputs a, b with dimensions [b,c,h,w] each
43 * 1. concat_dimension = 1, output_dim = [b,c_a+c_b,h,w]
44 * 2. concat_dimension = 2, output_dim = [b,c,h_a+h_b,w]
45 * 3. concat_dimension = 3, output_dim = [b,c,h,w_a+w_b]
47 auto const &input_dims = context.getInputDimensions();
48 const TensorDim &input_dim_0 = input_dims[SINGLE_INOUT_IDX];
49 unsigned int concat_dim_val = input_dim_0.getTensorDim(concat_dimension);
51 for (unsigned int idx = 1; idx < input_dims.size(); ++idx) {
52 const TensorDim &dim = input_dims[idx];
54 for (unsigned int i = 0; i < ml::train::TensorDim::getNumDim(); ++i) {
55 if (i == concat_dimension)
57 NNTR_THROW_IF(input_dim_0[i] != dim[i], std::runtime_error)
58 << "Error: concat layer requires same shape from all input layers "
59 "along non-concat dimension";
61 concat_dim_val += dim[concat_dimension];
64 TensorDim output_dim = input_dim_0;
65 output_dim.setTensorDim(concat_dimension, concat_dim_val);
67 context.setOutputDimensions({output_dim});
70 * Setup output_reshape_helper to which output will be reshaped in forwarding
71 * to facilitate easier processing.
73 * The helper shape consolidates all the dimensions before the axis
74 * together and all the dimensions after the axis to facilitate
75 * easier splitting of the data.
77 leading_helper_dim = 1;
78 output_reshape_helper.channel(1);
79 output_reshape_helper.height(1);
80 output_reshape_helper.width(1);
81 for (unsigned int idx = 1; idx < concat_dimension; ++idx) {
82 leading_helper_dim *= output_dim.getTensorDim(idx);
85 output_reshape_helper.height(output_dim.getTensorDim(concat_dimension));
87 for (unsigned int idx = concat_dimension + 1;
88 idx < ml::train::TensorDim::getNumDim(); ++idx) {
89 output_reshape_helper.width(output_reshape_helper.width() *
90 output_dim.getTensorDim(idx));
94 * Setup input_reshape_helper to which inputs will be reshaped in forwarding
95 * to facilitate easier processing.
97 input_reshape_helper.resize(input_dims.size());
98 for (unsigned int idx = 0; idx < input_reshape_helper.size(); idx++) {
99 input_reshape_helper[idx] = output_reshape_helper;
100 input_reshape_helper[idx].height(
101 input_dims[idx].getTensorDim(concat_dimension));
104 setBatch(input_dims[SINGLE_INOUT_IDX].batch());
107 void ConcatLayer::forwarding(RunLayerContext &context, bool training) {
109 * @todo avoid copy by creating input here as a shared_tensor of the output
110 * here and then this layer can be in_place as well
112 Tensor &output = context.getOutput(SINGLE_INOUT_IDX);
114 const TensorDim out_dim = output.getDim();
115 output.reshape(output_reshape_helper);
116 unsigned int output_height_offset = 0;
117 unsigned int data_copy_size = output_reshape_helper.width();
119 for (unsigned int idx = 0; idx < context.getNumInputs(); idx++) {
120 Tensor &input = context.getInput(idx);
121 const TensorDim in_dim = input.getDim();
122 auto const &irh = input_reshape_helper[idx];
125 /** loop over the dimensions before the concat dimension */
126 for (unsigned int batch = 0; batch < output.batch(); batch++) {
127 /** loop over the concat dimension itself */
128 for (unsigned int count = 0; count < irh.height(); count++) {
129 Tensor dest_tensor = Tensor::Map<float>(
130 output.getAddress<float>(batch, 0, output_height_offset + count, 0),
131 data_copy_size * sizeof(float), {1, 1, 1, data_copy_size});
132 const Tensor source_tensor = Tensor::Map<float>(
133 input.getAddress(batch, 0, count, 0), data_copy_size * sizeof(float),
134 {1, 1, 1, data_copy_size});
135 dest_tensor.copy(source_tensor);
139 input.reshape(in_dim);
140 output_height_offset += irh.height();
143 output.reshape(out_dim);
146 void ConcatLayer::calcDerivative(RunLayerContext &context) {
148 * @todo avoid copy by creating input here as a shared_tensor of the output
149 * here and then this layer can be in_place as well
151 Tensor output = context.getIncomingDerivative(SINGLE_INOUT_IDX);
153 output.reshape(output_reshape_helper);
154 unsigned int output_height_offset = 0;
155 unsigned int data_copy_size = output_reshape_helper.width();
157 for (unsigned int idx = 0; idx < context.getNumInputs(); idx++) {
158 Tensor &input = context.getOutgoingDerivative(idx);
159 const TensorDim in_dim = input.getDim();
160 auto const &irh = input_reshape_helper[idx];
163 /** loop over the dimensions before the concat dimension */
164 for (unsigned int batch = 0; batch < output.batch(); batch++) {
165 /** loop over the concat dimension itself */
166 for (unsigned int count = 0; count < irh.height(); count++) {
167 const Tensor source_tensor = Tensor::Map<float>(
168 output.getAddress(batch, 0, output_height_offset + count, 0),
169 data_copy_size * sizeof(float), {1, 1, 1, data_copy_size});
170 Tensor dest_tensor = Tensor::Map<float>(input.getAddress(batch, 0, count, 0),
171 data_copy_size * sizeof(float),
172 {1, 1, 1, data_copy_size});
173 dest_tensor.copy(source_tensor);
177 input.reshape(in_dim);
178 output_height_offset += irh.height();
182 void ConcatLayer::setProperty(const std::vector<std::string> &values) {
183 auto remain_props = loadProperties(values, concat_props);
184 NNTR_THROW_IF(!remain_props.empty(), std::invalid_argument)
185 << "[ConcatLayer] Unknown Layer Properties count " +
186 std::to_string(values.size());
189 void ConcatLayer::exportTo(Exporter &exporter,
190 const ml::train::ExportMethods &method) const {
191 Layer::exportTo(exporter, method);
192 exporter.saveResult(concat_props, method, this);
195 } /* namespace nntrainer */