#include <arm_compute/runtime/CL/functions/CLStridedSlice.h>
#include <arm_compute/runtime/CL/functions/CLPixelWiseMultiplication.h>
#include <arm_compute/runtime/CL/functions/CLCast.h>
+#include <arm_compute/runtime/CL/functions/CLArithmeticDivision.h>
#include "kernel/ConcatLayer.h"
});
}
+void StageGenerator::visit(const model::operation::DivNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto lhs_index{node.getInputs().at(model::operation::DivNode::Input::LHS)};
+ const auto rhs_index{node.getInputs().at(model::operation::DivNode::Input::RHS)};
+ const auto activation_index{node.param().activation_index};
+
+ if (!(_ctx.at(lhs_index).shape() == _ctx.at(rhs_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(lhs_index).shape().rank(), _ctx.at(rhs_index).shape().rank());
+
+ // TODO remove const_cast later. For example, _ctx may need to be a non const variable or
+ // a node to extend shape may be inserted in front of this operation
+ const_cast<::neurun::model::operand::Shape &>(_ctx.at(lhs_index).shape())
+ .extendRank(broadcast_rank);
+ const_cast<::neurun::model::operand::Shape &>(_ctx.at(rhs_index).shape())
+ .extendRank(broadcast_rank);
+ }
+
+ // Construct operation parameters
+ struct Param
+ {
+ model::operand::Index ofm_index;
+ model::operand::Index lhs_index;
+ model::operand::Index rhs_index;
+
+ FuseCode activation;
+ };
+
+ Param param;
+
+ param.ofm_index = output_index;
+ param.lhs_index = lhs_index;
+ param.rhs_index = rhs_index;
+
+ param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
+
+ auto tensors = _tensor_builder;
+
+ returnStage([tensors, param](IExecutionBuilder &builder) {
+ auto ofm_alloc = tensors->at(param.ofm_index).get();
+ auto lhs_alloc = tensors->at(param.lhs_index).get();
+ auto rhs_alloc = tensors->at(param.rhs_index).get();
+
+ std::unique_ptr<::arm_compute::IFunction> fn;
+
+ auto l = make_layer<::arm_compute::CLArithmeticDivision>();
+
+ l->configure(lhs_alloc->handle(), rhs_alloc->handle(), ofm_alloc->handle());
+
+ fn = std::move(l);
+
+ auto acl_fn = make_cl_function(std::move(fn));
+
+ builder.append(std::move(acl_fn));
+
+ ActivationBuilder{builder}.append(param.activation, ofm_alloc->handle());
+ });
+}
+
} // namespace acl_cl
} // namespace backend
} // namespace neurun
virtual void visit(const model::operation::AddNode &) override;
virtual void visit(const model::operation::SubNode &) override;
virtual void visit(const model::operation::CastNode &) override;
+ virtual void visit(const model::operation::DivNode &) override;
private:
const neurun::model::operand::Set &_ctx;
return new operation::LogisticNode{inputs, outputs};
};
+
+ _map[ANEURALNETWORKS_DIV] = [](const OperationFactory::Param &init_param) {
+ assert(init_param.input_count == 3 && init_param.output_count == 1);
+
+ operand::IndexSet outputs{init_param.outputs[0]};
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> LHS Tensor Index
+ // 1 -> RHS Tensor Index
+ // 2 -> Activation Index
+ operand::IndexSet inputs{init_param.inputs[0], init_param.inputs[1]};
+
+ operation::DivNode::Param param;
+ param.activation_index = operand::Index{init_param.inputs[2]};
+
+ return new operation::DivNode{inputs, outputs, param};
+ };
}
neurun::model::operation::Node *OperationFactory::create(ANeuralNetworksOperationType type,
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "DivNode.h"
+
+#include <cassert>
+
+#include "NodeVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void DivNode::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+DivNode::DivNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs,
+ const Param ¶m)
+ : model::operation::Node{OperandConstraint::createExact(2u), inputs, outputs}, _param{param}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_DIV_NODE_H__
+#define __NEURUN_MODEL_OPERATION_DIV_NODE_H__
+
+#include "model/operation/Node.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class DivNode : public model::operation::Node
+{
+public:
+ enum Input
+ {
+ LHS = 0,
+ RHS
+ };
+
+ struct Param
+ {
+ operand::Index activation_index;
+ };
+
+public:
+ DivNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs, const Param ¶m);
+
+public:
+ virtual void accept(NodeVisitor &&) const override;
+ virtual std::string getName() const override { return "Div"; }
+
+public:
+ const Param ¶m() const { return _param; }
+
+private:
+ Param _param;
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_DIV_NODE_H__
#include "TanhNode.h"
#include "LogisticNode.h"
#include "CastNode.h"
+#include "DivNode.h"
OP(StridedSliceNode , true , STRIDED_SLICE)
OP(TanhNode , true , TANH)
OP(LogisticNode , true , LOGISTIC)
+OP(DivNode , true , DIV)
OP(PermuteNode , false , NOT_AVAILABLE)
GeneratedTests.squared_difference_ex*
GeneratedTests.svdf*
GeneratedTests.batch_to_space*
-GeneratedTests.div_*
GeneratedTests.space_to_batch*
GeneratedTests.squeeze*
GeneratedTests.transpose*
concat
conv_2d
depthwise_conv_2d
+div/broadcast
fullyconnected/fc1
max_pool_2d
MODELS/mobilenet