2 * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 * Copyright (c) 2017-2019 ARM Limited.
20 * SPDX-License-Identifier: MIT
22 * Permission is hereby granted, free of charge, to any person obtaining a copy
23 * of this software and associated documentation files (the "Software"), to
24 * deal in the Software without restriction, including without limitation the
25 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
26 * sell copies of the Software, and to permit persons to whom the Software is
27 * furnished to do so, subject to the following conditions:
29 * The above copyright notice and this permission notice shall be included in all
30 * copies or substantial portions of the Software.
32 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
33 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
34 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
35 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
36 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
37 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
41 #include "arm_compute/core/NEON/kernels/NEQuantizationSymmetricKernel.h"
43 #include "arm_compute/core/Error.h"
44 #include "arm_compute/core/Helpers.h"
45 #include "arm_compute/core/NEON/NEAsymm.h"
46 #include "arm_compute/core/NEON/wrapper/wrapper.h"
47 #include "arm_compute/core/Utils.h"
48 #include "arm_compute/core/Validate.h"
49 #include "arm_compute/core/Window.h"
51 #include "arm_compute/core/CPP/Validate.h"
55 using namespace arm_compute;
59 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
60 const ITensorInfo *scale_factor)
62 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, output);
63 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
64 ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > 2);
65 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
66 ARM_COMPUTE_RETURN_ERROR_ON(output->tensor_shape().total_size() == 0);
67 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(output, 1, DataType::QASYMM8_SIGNED);
68 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
69 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(scale_factor, 1, DataType::F16,
71 ARM_COMPUTE_RETURN_ERROR_ON(scale_factor->tensor_shape().total_size() == 0);
72 ARM_COMPUTE_RETURN_ERROR_ON(scale_factor->num_dimensions() > 1);
73 ARM_COMPUTE_RETURN_ERROR_ON(scale_factor->dimension(0) != input->dimension(1));
78 inline float32x4x4_t load_value(const float *input_ptr)
80 return {wrapper::vloadq(input_ptr), wrapper::vloadq(input_ptr + 4),
81 wrapper::vloadq(input_ptr + 8), wrapper::vloadq(input_ptr + 12)};
83 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
84 inline const float32x4x4_t load_value(const float16_t *input_ptr)
86 return {vcvt_f32_f16(wrapper::vload(input_ptr)), vcvt_f32_f16(wrapper::vload(input_ptr + 4)),
87 vcvt_f32_f16(wrapper::vload(input_ptr + 8)),
88 vcvt_f32_f16(wrapper::vload(input_ptr + 12))};
91 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
93 inline float32x4_t round(const float32x4_t &fv)
95 const float32x4_t point5_f32x4 = vdupq_n_f32(0.5f);
96 const float32x4_t zero_f32x4 = vdupq_n_f32(0.0f);
97 // If value < 0, mask = -1, else mask = 0
98 int32x4_t mask_less_zero_ui32x4 = reinterpret_cast<int32x4_t>(vcltq_f32(fv, zero_f32x4));
99 return vaddq_f32(fv, vaddq_f32(vcvtq_f32_s32(mask_less_zero_ui32x4), point5_f32x4));
102 inline int8x16_t vquantizeSymm(const float32x4x4_t &fv, float scale_factor_inv, int32_t max_scale)
104 const float32x4_t vinvscale = vdupq_n_f32(scale_factor_inv);
105 const int32x4_t vposend = vdupq_n_s32(max_scale);
106 const int32x4_t vnagend = vdupq_n_s32(-max_scale);
108 const int32x4x4_t rf = {{
111 vmaxq_s32(vnagend, vcvtnq_s32_f32(round(vmulq_f32(fv.val[0], vinvscale))))),
113 vmaxq_s32(vnagend, vcvtnq_s32_f32(round(vmulq_f32(fv.val[1], vinvscale))))),
115 vmaxq_s32(vnagend, vcvtnq_s32_f32(round(vmulq_f32(fv.val[2], vinvscale))))),
117 vmaxq_s32(vnagend, vcvtnq_s32_f32(round(vmulq_f32(fv.val[3], vinvscale))))),
119 vminq_s32(vposend, vmaxq_s32(vnagend, vcvtq_s32_f32(round(vmulq_f32(fv.val[0], vinvscale))))),
120 vminq_s32(vposend, vmaxq_s32(vnagend, vcvtq_s32_f32(round(vmulq_f32(fv.val[1], vinvscale))))),
121 vminq_s32(vposend, vmaxq_s32(vnagend, vcvtq_s32_f32(round(vmulq_f32(fv.val[2], vinvscale))))),
122 vminq_s32(vposend, vmaxq_s32(vnagend, vcvtq_s32_f32(round(vmulq_f32(fv.val[3], vinvscale))))),
125 const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf.val[0]), vqmovn_s32(rf.val[1])));
126 const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf.val[2]), vqmovn_s32(rf.val[3])));
127 return vcombine_s8(pa, pb);
131 NEQuantizationSymmetricKernel::NEQuantizationSymmetricKernel()
132 : _input(nullptr), _output(nullptr), _scale_factor(nullptr)
136 void NEQuantizationSymmetricKernel::configure(const ITensor *input, ITensor *output,
137 ITensor *scale_factor)
139 ARM_COMPUTE_ERROR_ON_NULLPTR(input, output);
140 ARM_COMPUTE_ERROR_THROW_ON(
141 validate_arguments(input->info(), output->info(), scale_factor->info()));
145 _scale_factor = scale_factor;
147 // Configure kernel window
148 Window win_config = calculate_max_window(*input->info(), Steps());
151 coord.set_num_dimensions(output->info()->num_dimensions());
152 output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape()));
154 INEKernel::configure(win_config);
157 Status NEQuantizationSymmetricKernel::validate(const ITensorInfo *input, const ITensorInfo *output,
158 const ITensorInfo *scale_factor)
160 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, scale_factor));
165 template <typename T> void NEQuantizationSymmetricKernel::quantize(const Window &window)
167 constexpr auto window_step = 16;
168 const auto window_start_x = static_cast<int>(window.x().start());
169 const auto window_end_x = static_cast<int>(window.x().end());
172 constexpr RoundingPolicy rounding_policy = RoundingPolicy::TO_NEAREST_EVEN;
174 constexpr RoundingPolicy rounding_policy = RoundingPolicy::TO_NEAREST_UP;
177 // Collapse window and reset first dimension to handle tail calculations manually
178 // Support Only 2D input
179 Window win_collapsed = window;
180 Iterator input(_input, win_collapsed);
181 Iterator output(_output, win_collapsed);
182 const auto dim_x = _input->info()->dimension(0);
183 win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
186 [&](const Coordinates &id) {
187 const auto start = reinterpret_cast<const T *>(input.ptr());
188 const auto min_max = std::minmax_element(start, start + dim_x);
189 const auto int8_scale = 127;
190 auto range = std::max(std::abs(*min_max.first), std::abs(*min_max.second));
193 *reinterpret_cast<T *>(_scale_factor->ptr_to_element({id.y()})) = 1;
198 *reinterpret_cast<T *>(_scale_factor->ptr_to_element({id.y()})) = range / int8_scale;
200 const auto scale_factor_inv = int8_scale / range;
202 auto input_ptr = reinterpret_cast<const T *>(input.ptr());
203 auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
204 int x = window_start_x;
205 for (; x <= (window_end_x - window_step); x += window_step)
207 wrapper::vstore(&output_ptr[x],
208 vquantizeSymm(load_value(&input_ptr[x]), scale_factor_inv, int8_scale));
210 // Compute left-over elements
211 for (; x < window_end_x; ++x)
213 int quantized = arm_compute::round(input_ptr[x] * scale_factor_inv, rounding_policy);
214 quantized = std::min(int8_scale, std::max(quantized, -int8_scale));
215 output_ptr[x] = static_cast<int8_t>(quantized);
221 void NEQuantizationSymmetricKernel::run(const Window &window, const ThreadInfo &info)
223 ARM_COMPUTE_UNUSED(info);
224 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
225 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
227 switch (_input->info()->data_type())
230 NEQuantizationSymmetricKernel::quantize<float>(window);
232 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
234 NEQuantizationSymmetricKernel::quantize<float16_t>(window);
236 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
238 ARM_COMPUTE_ERROR("Unsupported data type.");