2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
3 * Copyright 2017 The TensorFlow Authors. All Rights Reserved.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include "kernels/TransposeConv.h"
20 #include "kernels/Utils.h"
22 #include <tensorflow/lite/kernels/internal/reference/reference_ops.h>
26 namespace luci_interpreter
32 TransposeConv::TransposeConv(const Tensor *output_shape, const Tensor *filter, const Tensor *input,
33 const Tensor *bias, Tensor *output, const TransposeConvParams ¶ms)
34 : KernelWithParams<TransposeConvParams>({output_shape, filter, input, bias}, {output}, params)
38 void TransposeConv::configure()
40 assert(output_shape()->shape().num_dims() == 1);
41 assert(input()->shape().num_dims() == 4);
42 assert(filter()->shape().num_dims() == 4);
43 assert(input()->element_type() == DataType::FLOAT32 || input()->element_type() == DataType::U8);
44 assert(input()->element_type() == output()->element_type());
45 assert(input()->shape().dim(3) == filter()->shape().dim(3));
46 if (input()->element_type() == DataType::U8)
49 std::make_unique<Tensor>(DataType::S32, output()->shape(), AffineQuantization{}, "");
50 double real_multiplier = 0.0;
51 const double input_product_scale = input()->scale() * filter()->scale();
52 assert(input_product_scale >= 0);
53 real_multiplier = input_product_scale / output()->scale();
55 quantizeMultiplier(real_multiplier, &_output_multiplier, &exponent);
56 _output_shift = -exponent;
59 const int num_dims = output_shape()->shape().dim(0);
60 Shape out_shape(num_dims);
61 const auto *shape_data = getTensorData<int32_t>(output_shape());
62 for (int i = 0; i < num_dims; i++)
63 out_shape.dim(i) = shape_data[i];
64 output()->resize(out_shape);
67 void TransposeConv::execute() const
69 switch (input()->element_type())
71 case DataType::FLOAT32:
78 throw std::runtime_error("Unsupported type.");
82 void TransposeConv::evalFloat() const
84 const int width = output()->shape().dim(2);
85 const int height = output()->shape().dim(1);
87 const int filter_width = filter()->shape().dim(2);
88 const int filter_height = filter()->shape().dim(1);
90 int unused_output_height, unused_output_width;
92 computeOutputSize(params().padding, width, filter_width, params().stride_width, 1);
93 unused_output_height =
94 computeOutputSize(params().padding, height, filter_height, params().stride_height, 1);
96 tflite::ConvParams op_params{};
97 op_params.padding_type = tflite::PaddingType::kSame;
98 op_params.padding_values.height = computePaddingWithOffset(
99 params().stride_height, 1, height, filter_height, unused_output_height, &offset);
100 op_params.padding_values.height_offset = offset;
101 op_params.padding_values.width = computePaddingWithOffset(
102 params().stride_width, 1, width, filter_width, unused_output_width, &offset);
103 op_params.padding_values.width_offset = offset;
104 op_params.stride_height = params().stride_height;
105 op_params.stride_width = params().stride_width;
106 op_params.output_multiplier = _output_multiplier;
107 tflite::reference_ops::TransposeConv(
108 op_params, getTensorShape(input()), getTensorData<float>(input()), getTensorShape(filter()),
109 getTensorData<float>(filter()), getTensorShape(bias()), getTensorData<float>(bias()),
110 getTensorShape(output()), getTensorData<float>(output()), tflite::RuntimeShape(),
114 void TransposeConv::evalQuantized() const
116 int32_t input_offset = -input()->zero_point();
117 int32_t filter_offset = -filter()->zero_point();
118 int32_t output_offset = filter()->zero_point();
119 const int width = output()->shape().dim(2);
120 const int height = output()->shape().dim(1);
122 const int filter_width = filter()->shape().dim(2);
123 const int filter_height = filter()->shape().dim(1);
125 int unused_output_height, unused_output_width;
126 unused_output_width =
127 computeOutputSize(params().padding, width, filter_width, params().stride_width, 1);
128 unused_output_height =
129 computeOutputSize(params().padding, height, filter_height, params().stride_height, 1);
131 tflite::ConvParams op_params{};
132 op_params.padding_type = tflite::PaddingType::kSame;
133 op_params.padding_values.height = computePaddingWithOffset(
134 params().stride_height, 1, height, filter_height, unused_output_height, &offset);
135 op_params.padding_values.width = computePaddingWithOffset(
136 params().stride_width, 1, width, filter_width, unused_output_width, &offset);
137 op_params.stride_height = params().stride_height;
138 op_params.stride_width = params().stride_width;
139 op_params.input_offset = input_offset;
140 op_params.output_offset = output_offset;
141 op_params.weights_offset = filter_offset;
142 op_params.output_multiplier = _output_multiplier;
143 op_params.output_shift = -_output_shift;
144 op_params.quantized_activation_min = std::numeric_limits<uint8_t>::min();
145 op_params.quantized_activation_max = std::numeric_limits<uint8_t>::max();
147 tflite::reference_ops::TransposeConv(
148 op_params, getTensorShape(input()), getTensorData<uint8>(input()), getTensorShape(filter()),
149 getTensorData<uint8>(filter()), getTensorShape(bias()), getTensorData<int32_t>(bias()),
150 getTensorShape(output()), getTensorData<uint8>(output()), tflite::RuntimeShape(),
151 (uint8 *)nullptr, getTensorData<int32_t>(_scratch_tensor.get()));
154 } // namespace kernels
155 } // namespace luci_interpreter