This commit makes acl_neon to support InstanceNorm op.
Signed-off-by: jiseob.jang <jiseob.jang@samsung.com>
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYERKERNELEX_H__
+#define __ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYERKERNELEX_H__
+
+#include "arm_compute/core/NEON/INEKernel.h"
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Interface for performing an instance normalization */
+class NEInstanceNormalizationLayerKernelEx : public INEKernel
+{
+public:
+ const char *name() const override { return "NEInstanceNormalizationLayerKernelEx"; }
+ /** Default constructor */
+ NEInstanceNormalizationLayerKernelEx();
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEInstanceNormalizationLayerKernelEx(const NEInstanceNormalizationLayerKernelEx &) = delete;
+ /** Prevent instances of this class from being copied (As this class contains pointers) */
+ NEInstanceNormalizationLayerKernelEx &
+ operator=(const NEInstanceNormalizationLayerKernelEx &) = delete;
+ /** Allow instances of this class to be moved */
+ NEInstanceNormalizationLayerKernelEx(NEInstanceNormalizationLayerKernelEx &&) = default;
+ /** Allow instances of this class to be moved */
+ NEInstanceNormalizationLayerKernelEx &
+ operator=(NEInstanceNormalizationLayerKernelEx &&) = default;
+ /** Default destructor */
+ ~NEInstanceNormalizationLayerKernelEx() = default;
+ /** Set the input and output tensors.
+ *
+ * @param[in, out] input Source tensor. Data types supported: F16/F32. Data layout supported:
+ * NCHW
+ * In case of @p output tensor = nullptr this tensor will store the result
+ * of the normalization.
+ * @param[out] output Destination tensor. Data types and data layouts supported: same as @p
+ * input.
+ * @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor.
+ * Defaults to 1.0
+ * @param[in] beta (Optional) The offset scalar value applied to the normalized tensor.
+ * Defaults to 0.0
+ * @param[in] epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12
+ */
+ void configure(ITensor *input, ITensor *output, ITensor *gamma = nullptr, ITensor *beta = nullptr,
+ float epsilon = 1e-12f);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref
+ * NEInstanceNormalizationLayer.
+ *
+ * @param[in] input Source tensor info. Data types supported: F16/F32. Data layout supported:
+ * NCHW
+ * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p
+ * input.
+ * @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults
+ * to 1.0
+ * @param[in] beta (Optional) The offset scalar value applied to the normalized tensor.
+ * Defaults to 0.0
+ * @param[in] epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *gamma = nullptr, const ITensorInfo *beta = nullptr,
+ float epsilon = 1e-12f);
+
+ // Inherited methods overridden:
+ void run(const Window &window, const ThreadInfo &info) override;
+
+private:
+ /** Common signature for all the specialized instance normalization functions
+ *
+ * @param[in, out] input An input tensor. In case of @p output tensor = nullptr this tensor will
+ * store the result of the normalization.
+ * @param[out] output The output tensor.
+ * @param[in] gamma The scale scalar value applied to the normalized tensor. Defaults to
+ * 1.0
+ * @param[in] beta The offset scalar value applied to the normalized tensor. Defaults to
+ * 0.0
+ * @param[in] epsilon Lower bound value for the normalization. Defaults to 1e-12
+ */
+ using NormalizationFunction = void(ITensor *input, ITensor *output, ITensor *gamma, ITensor *beta,
+ float epsilon, const Window &window);
+
+ NormalizationFunction *_func;
+ ITensor *_input;
+ ITensor *_output;
+ ITensor *_gamma;
+ ITensor *_beta;
+ float _epsilon;
+};
+} // namespace arm_compute
+#endif /*__ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYERKERNELEX_H__ */
#include <arm_compute/runtime/NEON/functions/NEFullyConnectedReshapingLayer.h>
#include <arm_compute/runtime/NEON/functions/NEGatherEx.h>
#include <arm_compute/runtime/NEON/functions/NEHashtableLookup.h>
+#include <arm_compute/runtime/NEON/functions/NEInstanceNormalizationLayerEx.h>
#include <arm_compute/runtime/NEON/functions/NEPReLU.h>
#include <arm_compute/runtime/NEON/functions/NEReduceMeanEx.h>
#include <arm_compute/runtime/NEON/functions/NEReduceSum.h>
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYEREX_H__
+#define __ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYEREX_H__
+
+#include "arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernelEx.h"
+#include "arm_compute/runtime/IFunction.h"
+#include "arm_compute/runtime/IMemoryManager.h"
+#include "arm_compute/runtime/MemoryGroup.h"
+#include "arm_compute/runtime/NEON/functions/NEPermute.h"
+#include "arm_compute/runtime/NEON/functions/NEReductionOperation.h"
+#include "arm_compute/runtime/Tensor.h"
+
+#include <memory>
+
+namespace arm_compute
+{
+class ITensor;
+
+/** Basic function to perform a Instance normalization.
+ *
+ * This function runs the following kernels:
+ * -# @ref NEInstanceNormalizationLayerKernelEx
+ */
+class NEInstanceNormalizationLayerEx : public IFunction
+{
+public:
+ /** Constructor */
+ NEInstanceNormalizationLayerEx(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
+ /** Set the input and output tensors.
+ *
+ * @param[in, out] input Source tensor. In case of @p output tensor = nullptr this tensor will
+ * store the result of the normalization.
+ * Data types supported: F16/F32. Data layout supported: NHWC, NCHW
+ * @param[out] output Destination tensor. Data types and data layouts supported: same as @p
+ * input.
+ * @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor.
+ * Defaults to 1.0
+ * @param[in] beta (Optional) The offset scalar value applied to the normalized tensor.
+ * Defaults to 0.0
+ * @param[in] epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12
+ */
+ void configure(ITensor *input, ITensor *output, ITensor *gamma, ITensor *beta,
+ float epsilon = 1e-12f);
+
+ /** Static function to check if given info will lead to a valid configuration of @ref
+ * NEInstanceNormalizationLayer.
+ *
+ * @param[in] input Source tensor info. Data types supported: F16/F32. Data layout supported:
+ * NHWC, NCHW
+ * @param[in] output Destination tensor info. Data types and data layouts supported: same as @p
+ * input.
+ * @param[in] gamma (Optional) The scale scalar value applied to the normalized tensor. Defaults
+ * to 1.0
+ * @param[in] beta (Optional) The offset scalar value applied to the normalized tensor.
+ * Defaults to 0.0
+ * @param[in] epsilon (Optional) Lower bound value for the normalization. Defaults to 1e-12
+ *
+ * @return a status
+ */
+ static Status validate(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *gamma = nullptr, const ITensorInfo *beta = nullptr,
+ float epsilon = 1e-12f);
+
+ // Inherited methods overridden:
+ void run() override;
+
+private:
+ MemoryGroup _memory_group;
+ NEInstanceNormalizationLayerKernelEx _normalization_kernel;
+ bool _is_nchw;
+ NEPermute _permute_input;
+ NEPermute _permute_output;
+ Tensor _permuted_input;
+ Tensor _permuted_output;
+};
+}
+#endif /* __ARM_COMPUTE_NEINSTANCENORMALIZATIONLAYEREX_H__ */
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/core/NEON/kernels/NEInstanceNormalizationLayerKernelEx.h"
+
+#include "arm_compute/core/CPP/Validate.h"
+#include "arm_compute/core/Error.h"
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/core/ITensor.h"
+#include "arm_compute/core/NEON/NEMath.h"
+#include "arm_compute/core/NEON/wrapper/wrapper.h"
+#include "arm_compute/core/TensorInfo.h"
+#include "arm_compute/core/Utils.h"
+#include "arm_compute/core/Validate.h"
+#include "arm_compute/core/Window.h"
+
+#include <arm_neon.h>
+
+namespace arm_compute
+{
+namespace
+{
+template <typename T>
+void instance_normalization_nchw(ITensor *input, ITensor *output, ITensor *gamma, ITensor *beta,
+ float epsilon, const Window &window)
+{
+ /** NEON vector tag type. */
+ using ExactTagType =
+ typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
+
+ // Clear X/Y dimensions on execution window as we handle the planes manually
+ Window win = window;
+ win.set(Window::DimX, Window::Dimension(0, 1, 1));
+ win.set(Window::DimY, Window::Dimension(0, 1, 1));
+
+ constexpr int window_step_x = 16 / sizeof(T);
+ const unsigned int elements_plane = input->info()->dimension(0) * output->info()->dimension(1);
+ const auto channel_idx =
+ get_data_layout_dimension_index(input->info()->data_layout(), DataLayoutDimension::CHANNEL);
+
+ Iterator input_it(input, win);
+ execute_window_loop(
+ win,
+ [&](const Coordinates &id) {
+ Window win_plane = window;
+ win_plane.set(Window::DimX, Window::Dimension(0, 1, 1));
+ win_plane.set(Window::DimZ, Window::Dimension(id[2], id[2] + 1, 1));
+ win_plane.set(3, Window::Dimension(id[3], id[3] + 1, 1));
+
+ Iterator input_plane_it(input, win_plane);
+ Iterator output_plane_it(output, win_plane);
+
+ auto sum_h_w = static_cast<T>(0.f);
+ auto sum_squares_h_w = static_cast<T>(0.f);
+
+ execute_window_loop(
+ win_plane,
+ [&](const Coordinates &) {
+ const auto input_ptr = reinterpret_cast<const T *>(input_plane_it.ptr());
+
+ auto vec_sum_h_w = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
+ auto vec_sum_squares_h_w = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
+
+ // Compute S elements per iteration
+ int x = window.x().start();
+ for (; x <= (window.x().end() - window_step_x); x += window_step_x)
+ {
+ auto vec_input_val = wrapper::vloadq(input_ptr + x);
+ vec_sum_h_w = wrapper::vadd(vec_sum_h_w, vec_input_val);
+ vec_sum_squares_h_w =
+ wrapper::vadd(vec_sum_squares_h_w, wrapper::vmul(vec_input_val, vec_input_val));
+ }
+
+ auto vec2_sum_h_w =
+ wrapper::vpadd(wrapper::vgethigh(vec_sum_h_w), wrapper::vgetlow(vec_sum_h_w));
+ auto vec2_sum_squares_h_w = wrapper::vpadd(wrapper::vgethigh(vec_sum_squares_h_w),
+ wrapper::vgetlow(vec_sum_squares_h_w));
+ for (int i = 0; i < window_step_x / 4; ++i)
+ {
+ vec2_sum_h_w = wrapper::vpadd(vec2_sum_h_w, vec2_sum_h_w);
+ vec2_sum_squares_h_w = wrapper::vpadd(vec2_sum_squares_h_w, vec2_sum_squares_h_w);
+ }
+ sum_h_w += wrapper::vgetlane(vec2_sum_h_w, 0);
+ sum_squares_h_w += wrapper::vgetlane(vec2_sum_squares_h_w, 0);
+
+ // Compute left-over elements
+ for (; x < window.x().end(); ++x)
+ {
+ const auto value = *(input_ptr + x);
+ sum_h_w += value;
+ sum_squares_h_w += value * value;
+ }
+ },
+ input_plane_it, output_plane_it);
+
+ const auto mean_h_w = sum_h_w / elements_plane;
+ const auto var_h_w = sum_squares_h_w / elements_plane - mean_h_w * mean_h_w;
+
+ auto gamma_val = 1.0f;
+ if (gamma != nullptr)
+ {
+ gamma_val = *reinterpret_cast<T *>(gamma->ptr_to_element({id[channel_idx]}));
+ }
+ const auto multip_h_w = gamma_val / std::sqrt(var_h_w + epsilon);
+ const auto vec_mean_h_w = wrapper::vdup_n(static_cast<T>(mean_h_w), ExactTagType{});
+ const auto vec_multip_h_w = wrapper::vdup_n(static_cast<T>(multip_h_w), ExactTagType{});
+ auto beta_val = 0.0f;
+ if (beta != nullptr)
+ {
+ beta_val = *reinterpret_cast<T *>(beta->ptr_to_element({id[channel_idx]}));
+ }
+ const auto vec_beta = wrapper::vdup_n(static_cast<T>(beta_val), ExactTagType{});
+
+ execute_window_loop(
+ win_plane,
+ [&](const Coordinates &) {
+ auto input_ptr = reinterpret_cast<T *>(input_plane_it.ptr());
+ auto output_ptr = reinterpret_cast<T *>(output_plane_it.ptr());
+
+ // Compute S elements per iteration
+ int x = window.x().start();
+ auto vec_val = wrapper::vdup_n(static_cast<T>(0.0f), ExactTagType{});
+ for (; x <= (window.x().end() - window_step_x); x += window_step_x)
+ {
+ vec_val = wrapper::vloadq(input_ptr + x);
+ vec_val = wrapper::vadd(
+ wrapper::vmul(wrapper::vsub(vec_val, vec_mean_h_w), vec_multip_h_w), vec_beta);
+ wrapper::vstore(output_ptr + x, vec_val);
+ }
+
+ // Compute left-over elements
+ for (; x < window.x().end(); ++x)
+ {
+ *(output_ptr + x) = ((*(input_ptr + x)) - mean_h_w) * multip_h_w + beta_val;
+ }
+ },
+ input_plane_it, output_plane_it);
+ },
+ input_it);
+}
+
+Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *gamma, const ITensorInfo *beta, float epsilon)
+{
+ ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(epsilon == 0.f, "Epsilon must be different than 0");
+
+ ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_layout() == DataLayout::NHWC,
+ "NHWC data layout is not supported by the kernel directly");
+
+ if (output != nullptr && output->total_size() != 0)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_channels() != output->num_channels(),
+ "Input and output have different number of channels");
+ }
+
+ if (gamma != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, gamma);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->dimension(get_data_layout_dimension_index(
+ input->data_layout(), DataLayoutDimension::CHANNEL)) !=
+ gamma->dimension(0),
+ "Gamma's size must be the same as size of input's channel");
+ }
+
+ if (beta != nullptr)
+ {
+ ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, beta);
+ ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->dimension(get_data_layout_dimension_index(
+ input->data_layout(), DataLayoutDimension::CHANNEL)) !=
+ beta->dimension(0),
+ "Beta's size must be the same as size of input's channel");
+ }
+
+ return Status{};
+}
+
+std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
+{
+ // We handle the planes manually
+ Window win = calculate_max_window(*input, Steps(1));
+
+ // Output auto initialization if not yet initialized
+ auto_init_if_empty(*output, input->tensor_shape(), 1, input->data_type());
+
+ // NEInstanceNormalizationLayerKernelEx doesn't need padding so update_window_and_padding() can be
+ // skipped
+ Coordinates coord;
+ coord.set_num_dimensions(output->num_dimensions());
+ output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
+ return std::make_pair(Status{}, win);
+}
+} // namespace
+
+NEInstanceNormalizationLayerKernelEx::NEInstanceNormalizationLayerKernelEx()
+ : _func(nullptr), _input(nullptr), _output(nullptr), _gamma(nullptr), _beta(nullptr),
+ _epsilon(1e-12)
+{
+}
+
+void NEInstanceNormalizationLayerKernelEx::configure(ITensor *input, ITensor *output,
+ ITensor *gamma, ITensor *beta, float epsilon)
+{
+ ARM_COMPUTE_ERROR_ON_NULLPTR(input);
+
+ _input = input;
+ _output = output == nullptr ? input : output;
+ _gamma = gamma;
+ _beta = beta;
+ _epsilon = epsilon;
+
+ ARM_COMPUTE_ERROR_THROW_ON(
+ validate_arguments(_input->info(), _output->info(), gamma->info(), beta->info(), epsilon));
+
+ if (_input->info()->data_type() == DataType::F32)
+ {
+ _func = &instance_normalization_nchw<float>;
+ }
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ else if (_input->info()->data_type() == DataType::F16)
+ {
+ _func = &instance_normalization_nchw<float16_t>;
+ }
+#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+ else
+ {
+ ARM_COMPUTE_ERROR("Unsupported data type");
+ }
+
+ // Configure kernel window
+ auto win_config = validate_and_configure_window(_input->info(), _output->info());
+ ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
+
+ INEKernel::configure(std::get<1>(win_config));
+}
+
+Status NEInstanceNormalizationLayerKernelEx::validate(const ITensorInfo *input,
+ const ITensorInfo *output,
+ const ITensorInfo *gamma,
+ const ITensorInfo *beta, float epsilon)
+{
+ ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, gamma, beta, epsilon));
+ ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(
+ input->clone().get(), (output == nullptr ? input->clone().get() : output->clone().get()))));
+ return Status{};
+}
+
+void NEInstanceNormalizationLayerKernelEx::run(const Window &window, const ThreadInfo &info)
+{
+ ARM_COMPUTE_UNUSED(info);
+ ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
+ ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
+ (*_func)(_input, _output, _gamma, _beta, _epsilon, window);
+}
+} // namespace arm_compute
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ * Copyright (c) 2019 ARM Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include "arm_compute/runtime/NEON/functions/NEInstanceNormalizationLayerEx.h"
+
+#include "arm_compute/core/Helpers.h"
+#include "arm_compute/runtime/NEON/NEScheduler.h"
+
+namespace arm_compute
+{
+NEInstanceNormalizationLayerEx::NEInstanceNormalizationLayerEx(
+ std::shared_ptr<IMemoryManager> memory_manager)
+ : _memory_group(std::move(memory_manager)), _normalization_kernel(), _is_nchw(false),
+ _permute_input(), _permute_output(), _permuted_input(), _permuted_output()
+{
+}
+
+void NEInstanceNormalizationLayerEx::configure(ITensor *input, ITensor *output, ITensor *gamma,
+ ITensor *beta, float epsilon)
+{
+ const DataLayout data_layout = input->info()->data_layout();
+
+ // Configure Kernels
+ _is_nchw = data_layout == DataLayout::NCHW;
+
+ if (!_is_nchw)
+ {
+ _memory_group.manage(&_permuted_input);
+ _memory_group.manage(&_permuted_output);
+
+ // Configure the function to transform the input tensor from NHWC -> NCHW
+ _permute_input.configure(input, &_permuted_input, PermutationVector(1U, 2U, 0U));
+ _permuted_input.info()->set_data_layout(DataLayout::NCHW);
+
+ _normalization_kernel.configure(&_permuted_input, &_permuted_output, gamma, beta, epsilon);
+ _permuted_output.info()->set_data_layout(DataLayout::NCHW);
+
+ _permute_output.configure(&_permuted_output, output != nullptr ? output : input,
+ PermutationVector(2U, 0U, 1U));
+ _permuted_input.allocator()->allocate();
+ _permuted_output.allocator()->allocate();
+ }
+ else
+ {
+ _normalization_kernel.configure(input, output, gamma, beta, epsilon);
+ }
+}
+
+Status NEInstanceNormalizationLayerEx::validate(const ITensorInfo *input, const ITensorInfo *output,
+ const ITensorInfo *gamma, const ITensorInfo *beta,
+ float epsilon)
+{
+ return NEInstanceNormalizationLayerKernelEx::validate(
+ &input->clone()->set_data_layout(DataLayout::NCHW),
+ &output->clone()->set_data_layout(DataLayout::NCHW), gamma, beta, epsilon);
+}
+
+void NEInstanceNormalizationLayerEx::run()
+{
+ MemoryGroupResourceScope scope_mg(_memory_group);
+
+ // Permute input
+ if (!_is_nchw)
+ {
+ _permute_input.run();
+ }
+
+ NEScheduler::get().schedule(&_normalization_kernel, Window::DimZ);
+
+ // Permute output
+ if (!_is_nchw)
+ {
+ _permute_output.run();
+ }
+}
+} // namespace arm_compute
_execution_builder->append(std::move(acl_fn));
}
+void KernelGenerator::visit(const model::operation::InstanceNorm &node)
+{
+ const auto ofm_index{node.getOutputs().at(0)};
+ const auto ifm_index{node.getInputs().at(model::operation::InstanceNorm::Input::INPUT)};
+ const auto gamma_index{node.getInputs().at(model::operation::InstanceNorm::Input::GAMMA)};
+ const auto beta_index{node.getInputs().at(model::operation::InstanceNorm::Input::BETA)};
+
+ auto ofm_alloc = _tensor_builder->at(ofm_index).get();
+ auto ifm_alloc = _tensor_builder->at(ifm_index).get();
+ auto gamma_alloc = _tensor_builder->at(gamma_index).get();
+ auto beta_alloc = _tensor_builder->at(beta_index).get();
+ auto epsilon = node.param().epsilon;
+ auto activation = node.param().activation;
+
+ auto fn = nnfw::cpp14::make_unique<::arm_compute::NEInstanceNormalizationLayerEx>();
+
+ fn->configure(ifm_alloc->handle(), ofm_alloc->handle(), gamma_alloc->handle(),
+ beta_alloc->handle(), epsilon);
+
+ auto acl_fn = asAclFunction(std::move(fn));
+
+ _execution_builder->append(std::move(acl_fn));
+
+ ActivationBuilder{*_execution_builder}.append(activation, ofm_alloc->handle());
+}
+
void KernelGenerator::visit(const model::operation::L2Normalization &node)
{
const auto ofm_index{node.getOutputs().at(0)};
void visit(const model::operation::FullyConnected &) override;
void visit(const model::operation::Gather &) override;
void visit(const model::operation::HashtableLookup &) override;
+ void visit(const model::operation::InstanceNorm &) override;
void visit(const model::operation::L2Normalization &) override;
void visit(const model::operation::L2Pool2D &) override;
void visit(const model::operation::LocalResponseNormalization &) override;
_tensor_builder->dimCorrection(indices_index, false);
}
+void ShapeFixer::visit(const model::operation::InstanceNorm &) { /* DO NOTHING */}
+
void ShapeFixer::visit(const model::operation::L2Normalization &) { /* DO NOTHING */}
void ShapeFixer::visit(const model::operation::L2Pool2D &) { /* DO NOTHING */}
void visit(const model::operation::FullyConnected &) override;
void visit(const model::operation::Gather &) override;
void visit(const model::operation::HashtableLookup &) override;
+ void visit(const model::operation::InstanceNorm &) override;
void visit(const model::operation::L2Normalization &) override;
void visit(const model::operation::L2Pool2D &) override;
void visit(const model::operation::LocalResponseNormalization &) override;