#include "operation/MeanNode.h"
#include "operation/LocalResponseNormalizationNode.h"
#include "operation/DepthToSpaceNode.h"
+#include "operation/PackNode.h"
#include "operation/ReduceMinNode.h"
#include "operation/SplitNode.h"
#include "operation/UnpackNode.h"
OP(MeanNode , true)
OP(LocalResponseNormalizationNode , true)
OP(DepthToSpaceNode , true)
+OP(PackNode , true)
OP(ReduceMinNode , true)
OP(SplitNode , true)
OP(UnpackNode , true)
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __NEURUN_MODEL_OPERATION_PACK_NODE_H__
+#define __NEURUN_MODEL_OPERATION_PACK_NODE_H__
+
+#include "model/Operation.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+class PackNode : public model::Operation
+{
+public:
+ struct Param
+ {
+ int32_t num;
+ int32_t axis;
+ };
+
+public:
+ PackNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param ¶m);
+
+public:
+ void accept(OperationVisitor &v) const override;
+ std::string getName() const override { return "Pack"; }
+
+public:
+ const Param ¶m() const { return _param; }
+
+private:
+ Param _param;
+};
+} // namespace operation
+} // namespace model
+} // namespace neurun
+#endif // __NEURUN_MODEL_OPERATION_PACK_NODE_H__
}
}
+void OperationValidator::visit(const model::operation::PackNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto num{node.param().num};
+ const auto axis{node.param().axis};
+
+ const auto &output_shape = _ctx.at(output_index).shape();
+ const auto output_rank = static_cast<int32_t>(output_shape.rank());
+
+ const auto input1_index{node.getInputs().at(0)};
+ const auto input_shape = _ctx.at(input1_index).shape();
+
+ UNUSED_RELEASE(num);
+ UNUSED_RELEASE(axis);
+ UNUSED_RELEASE(output_rank);
+
+ assert(num == static_cast<int32_t>(node.getInputs().size()));
+ assert(axis >= -output_rank && axis < output_rank);
+ for (const auto &index : node.getInputs())
+ {
+ UNUSED_RELEASE(index);
+ assert(input_shape == _ctx.at(index).shape());
+ }
+}
+
void OperationValidator::visit(const model::operation::ReduceMinNode &node)
{
const auto ofm_index{node.getOutputs().at(0)};
void visit(const model::operation::DequantizeNode &node) override;
void visit(const model::operation::MeanNode &node) override;
void visit(const model::operation::DepthToSpaceNode &node) override;
+ void visit(const model::operation::PackNode &node) override;
void visit(const model::operation::ReduceMinNode &node) override;
void visit(const model::operation::LSTMNode &node) override;
void visit(const model::operation::UnpackNode &node) override;
VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
}
+void Dumper::visit(const model::operation::PackNode &node)
+{
+ VERBOSE(LIR) << "* Pack" << std::endl;
+ std::string inputs;
+ const auto &input_indices = node.getInputs();
+ for (auto it = std::begin(input_indices); it != std::end(input_indices); ++it)
+ {
+ inputs += std::to_string(it->value());
+ if (std::next(it) != std::end(input_indices))
+ inputs += ", ";
+ }
+ VERBOSE(LIR) << " - Inputs : Inputs(" << inputs << ")" << std::endl;
+ VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
+}
+
void Dumper::visit(const PermuteNode &node)
{
std::string permute_type = "Unknown";
void visit(const model::operation::MeanNode &) override;
void visit(const model::operation::MulNode &) override;
void visit(const model::operation::NegNode &) override;
+ void visit(const model::operation::PackNode &) override;
void visit(const model::operation::PermuteNode &node) override;
void visit(const model::operation::PReLUNode &) override;
void visit(const model::operation::ReduceMaxNode &) override;
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "model/operation/PackNode.h"
+#include "model/OperationVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+void PackNode::accept(OperationVisitor &v) const { v.visit(*this); }
+PackNode::PackNode(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
+ const Param ¶m)
+ : model::Operation{OperandConstraint::createAtLeast(3u), inputs, outputs}, _param{param}
+{
+}
+} // namespace operation
+} // namespace model
+} // namespace neurun
void loadSub(const Operator *op);
void loadMul(const Operator *op);
void loadDiv(const Operator *op);
+ void loadPack(const Operator *op);
void loadRelu(const Operator *op);
void loadRelu6(const Operator *op);
void loadResizeBilinear(const Operator *op);
}
template <typename LoaderDomain, typename SpecificLoader>
+void BaseLoader<LoaderDomain, SpecificLoader>::loadPack(const Operator *op)
+{
+ // This runtime_error will be removed if the one of backend supports this operation
+ throw std::runtime_error("NYI");
+ model::OperandIndexSequence inputs;
+ model::OperandIndexSequence outputs;
+
+ loadOperationIO(op, inputs, outputs);
+
+ model::operation::PackNode::Param param;
+ const auto *options = op->builtin_options_as_PackOptions();
+ param.num = options->values_count();
+ param.axis = options->axis();
+
+ std::unique_ptr<model::Operation> new_op(new model::operation::PackNode(inputs, outputs, param));
+ _graph.addOperation(std::move(new_op));
+}
+
+template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadRelu(const Operator *op)
{
model::OperandIndexSequence inputs;
case BuiltinOperator::BuiltinOperator_DIV:
loadDiv(op);
return;
+ case BuiltinOperator::BuiltinOperator_PACK:
+ loadPack(op);
+ return;
case BuiltinOperator::BuiltinOperator_RELU:
loadRelu(op);
return;
return new operation::DepthToSpaceNode{inputs, outputs, param};
};
+ _map[ANEURALNETWORKS_PACK_EX] = [](const OperationFactory::Param &init_param,
+ neurun::model::Operands &operands) {
+ assert(init_param.input_count >= 3 && init_param.output_count == 1);
+
+ OperandIndexSequence outputs{init_param.outputs[0]};
+ OperandIndexSequence inputs;
+ for (uint32_t n = 0; n < init_param.input_count - 2; ++n)
+ {
+ inputs.append(OperandIndex{init_param.inputs[n]});
+ }
+
+ operation::PackNode::Param param;
+ const auto num_index = OperandIndex{init_param.inputs[init_param.input_count - 2]};
+ const auto axis_index = OperandIndex{init_param.inputs[init_param.input_count - 1]};
+ param.num = operands.at(num_index).asScalar<int32_t>();
+ param.axis = operands.at(axis_index).asScalar<int32_t>();
+
+ return new operation::PackNode{inputs, outputs, param};
+ };
+
_map[ANEURALNETWORKS_REDUCE_MIN_EX] = [](const OperationFactory::Param &init_param,
neurun::model::Operands &) {
assert(init_param.input_count == 2 && init_param.output_count == 1);