This commit enables `L2Pool2D` op for `acl_cl`.
Signed-off-by: sjsujinkim <sjsujin.kim@samsung.com>
return new operation::SpaceToDepthNode{inputs, outputs, param};
};
+
+ _map[ANEURALNETWORKS_L2_POOL_2D] = [](const OperationFactory::Param &init_param) {
+ assert(init_param.input_count == 10 || init_param.input_count == 7);
+ assert(init_param.output_count == 1);
+
+ operand::IndexSet outputs{init_param.outputs[0]};
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> IFM Tensor Index
+ operand::IndexSet inputs{init_param.inputs[0]};
+
+ operation::L2Pool2DNode::Param param;
+
+ if (init_param.input_count == 7) // Imlicit Padding case
+ {
+ // 1 -> Padding Code (ANEURALNETWORKS_PADDING_SAME or ANEURALNETWORKS_PADDING_VALID) Index
+ // 2 -> Horizontal (over width) Stride Index
+ // 3 -> Vertial (over height) Stride Index
+ // 4 -> Filter Width Index
+ // 5 -> Filter Height Index
+ // 6 -> FuseCode (activation) Index
+ param.padding_index = operand::Index{init_param.inputs[1]};
+ param.hstride_index = operand::Index{init_param.inputs[2]};
+ param.vstride_index = operand::Index{init_param.inputs[3]};
+ param.kw_index = operand::Index{init_param.inputs[4]};
+ param.kh_index = operand::Index{init_param.inputs[5]};
+ param.activation_index = operand::Index{init_param.inputs[6]};
+
+ param.explicit_padding = false;
+ }
+ else // Explicit Padding case
+ {
+ // 1 -> Padding_left index
+ // 2 -> Padding_right index
+ // 3 -> Padding_top index
+ // 4 -> Padding_bottom index
+ // 5 -> Horizontal (over width) Stride Index
+ // 6 -> Vertial (over height) Stride Index
+ // 7 -> Filter Width Index
+ // 8 -> Filter Height Index
+ // 9 -> FuseCode (activation) Index
+
+ param.padding_left_index = operand::Index{init_param.inputs[1]};
+ param.padding_right_index = operand::Index{init_param.inputs[2]};
+ param.padding_top_index = operand::Index{init_param.inputs[3]};
+ param.padding_bottom_index = operand::Index{init_param.inputs[4]};
+ param.hstride_index = operand::Index{init_param.inputs[5]};
+ param.vstride_index = operand::Index{init_param.inputs[6]};
+ param.kw_index = operand::Index{init_param.inputs[7]};
+ param.kh_index = operand::Index{init_param.inputs[8]};
+ param.activation_index = operand::Index{init_param.inputs[9]};
+
+ param.explicit_padding = true;
+ }
+
+ return new operation::L2Pool2DNode{inputs, outputs, param};
+ };
}
neurun::model::operation::Node *OperationFactory::create(ANeuralNetworksOperationType type,
asQuantizationInfo(typeInfo.scale(), typeInfo.offset()));
}
+::arm_compute::PadStrideInfo asPadStrideInfo(const neurun::util::Padding &padding,
+ const neurun::util::Stride &stride)
+{
+ return ::arm_compute::PadStrideInfo{stride.horizontal,
+ stride.vertical,
+ padding.left,
+ padding.right,
+ padding.top,
+ padding.bottom,
+ ::arm_compute::DimensionRoundingType::FLOOR};
+}
+
} // namespace acl_cl
} // namespace backend
} // namespace neurun
#include "misc/feature/Shape.h"
#include "misc/kernel/Shape.h"
+#include "util/Padding.h"
+
namespace neurun
{
namespace backend
const ::neurun::model::operand::TypeInfo &typeInfo,
bool apply_dim_correction = true);
+::arm_compute::PadStrideInfo asPadStrideInfo(const neurun::util::Padding &padding,
+ const neurun::util::Stride &stride);
+
} // namespace acl_cl
} // namespace backend
} // namespace neurun
});
}
+void StageGenerator::visit(const model::operation::L2Pool2DNode &node)
+{
+ const auto ofm_index{node.getOutputs().at(0)};
+ const auto ifm_index{node.getInputs().at(model::operation::L2Pool2DNode::Input::INPUT)};
+
+ const auto kh_index{node.param().kh_index};
+ const auto kw_index{node.param().kw_index};
+
+ const auto vstride_index{node.param().vstride_index};
+ const auto hstride_index{node.param().hstride_index};
+
+ const auto activation_index{node.param().activation_index};
+
+ const auto ofm_shape = _ctx.at(ofm_index).shape().asFeature();
+ const auto ifm_shape = _ctx.at(ifm_index).shape().asFeature();
+
+ const int32_t kh = _ctx.at(kh_index).asScalar<int32_t>();
+ const int32_t kw = _ctx.at(kw_index).asScalar<int32_t>();
+
+ neurun::util::Stride stride;
+
+ stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
+ stride.horizontal = _ctx.at(hstride_index).asScalar<int32_t>();
+
+ // Construct operation parameters
+ struct Param
+ {
+ model::operand::Index ofm_index;
+ model::operand::Index ifm_index;
+
+ uint32_t kw;
+ uint32_t kh;
+
+ neurun::util::Padding padding;
+ neurun::util::Stride stride;
+
+ FuseCode activation;
+ };
+
+ Param param;
+
+ param.ofm_index = ofm_index;
+ param.ifm_index = ifm_index;
+
+ param.kw = kw;
+ param.kh = kh;
+
+ param.stride = stride;
+
+ // TODO : Extract this to a function
+ param.padding = [&]() {
+ if (!node.param().explicit_padding) // implicit padding
+ {
+ const auto padding_index{node.param().padding_index};
+
+ const PaddingCode padding_type =
+ static_cast<PaddingCode>(_ctx.at(padding_index).asScalar<int32_t>());
+
+ assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
+ (ANEURALNETWORKS_PADDING_VALID == padding_type));
+
+ return (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? neurun::util::same_padding(ifm_shape, ofm_shape, stride, kw, kh)
+ : neurun::util::valid_padding();
+ }
+ else // explicit padding
+ {
+ neurun::util::Padding padding;
+ padding.left = _ctx.at({node.param().padding_left_index}).asScalar<int32_t>();
+ padding.right = _ctx.at({node.param().padding_right_index}).asScalar<int32_t>();
+ padding.top = _ctx.at({node.param().padding_top_index}).asScalar<int32_t>();
+ padding.bottom = _ctx.at({node.param().padding_bottom_index}).asScalar<int32_t>();
+
+ return padding;
+ }
+ }();
+
+ param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+
+ auto tensors = _tensor_builder;
+
+ returnStage([tensors, param](IExecutionBuilder &builder) {
+ auto ofm_alloc = tensors->at(param.ofm_index).get();
+ auto ifm_alloc = tensors->at(param.ifm_index).get();
+
+ ::arm_compute::PoolingLayerInfo info{::arm_compute::PoolingType::L2,
+ ::arm_compute::Size2D{param.kw, param.kh},
+ asPadStrideInfo(param.padding, param.stride)};
+
+ std::unique_ptr<::arm_compute::IFunction> fn;
+
+ auto l = make_layer<::arm_compute::CLPoolingLayer>();
+
+ l->configure(ifm_alloc->handle(), ofm_alloc->handle(), info);
+
+ fn = std::move(l);
+
+ auto acl_fn = make_cl_function(std::move(fn));
+
+ builder.append(std::move(acl_fn));
+
+ ActivationBuilder{builder}.append(param.activation, ofm_alloc->handle());
+ });
+}
+
} // namespace acl_cl
} // namespace backend
} // namespace neurun
virtual void visit(const model::operation::ReLU6Node &) override;
virtual void visit(const model::operation::FloorNode &) override;
virtual void visit(const model::operation::SpaceToDepthNode &) override;
+ virtual void visit(const model::operation::L2Pool2DNode &) override;
private:
const neurun::model::operand::Set &_ctx;
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "L2Pool2DNode.h"
+
+#include <cassert>
+
+#include "NodeVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void L2Pool2DNode::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+L2Pool2DNode::L2Pool2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs,
+ const Param ¶m)
+ : model::operation::Node{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_L2_POOL_2D_NODE_H__
+#define __NEURUN_MODEL_OPERATION_L2_POOL_2D_NODE_H__
+
+#include <memory>
+
+#include "model/operation/Node.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class L2Pool2DNode : public model::operation::Node
+{
+public:
+ enum Input
+ {
+ INPUT = 0,
+ };
+
+ struct Param
+ {
+ operand::Index padding_index;
+
+ operand::Index padding_left_index;
+ operand::Index padding_right_index;
+ operand::Index padding_top_index;
+ operand::Index padding_bottom_index;
+
+ operand::Index hstride_index;
+ operand::Index vstride_index;
+
+ operand::Index kw_index;
+ operand::Index kh_index;
+
+ operand::Index activation_index;
+
+ bool explicit_padding;
+ };
+
+public:
+ L2Pool2DNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs,
+ const Param ¶m);
+
+public:
+ virtual void accept(NodeVisitor &&) const override;
+ virtual std::string getName() const override { return "L2Pool2D"; }
+
+public:
+ const Param ¶m() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_L2_POOL_2D_NODE_H__
#include "ReLU6Node.h"
#include "FloorNode.h"
#include "SpaceToDepthNode.h"
+#include "L2Pool2DNode.h"
OP(ReLU6Node , true , RELU6)
OP(FloorNode , true , FLOOR)
OP(SpaceToDepthNode , true , SPACE_TO_DEPTH)
+OP(L2Pool2DNode , true , L2_POOL_2D)
OP(PermuteNode , false , NOT_AVAILABLE)
GeneratedTests.equal_ex*
GeneratedTests.hashtable_lookup*
GeneratedTests.l2_normalization*
-GeneratedTests.l2_pool*
GeneratedTests.local_response_norm*
GeneratedTests.logical_or_ex*
GeneratedTests.lsh_projection*
exp
floor
fullyconnected/fc1
+l2_pool_2d
max_pool_2d
MODELS/mobilenet
mul/broadcast