});
}
+void StageGenerator::visit(const model::operation::LogicalOrNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input0_index{node.getInputs().at(model::operation::LogicalOrNode::Input::INPUT0)};
+ const auto input1_index{node.getInputs().at(model::operation::LogicalOrNode::Input::INPUT1)};
+
+ if (!(_ctx.at(input0_index).shape() == _ctx.at(input1_index).shape()))
+ {
+ const auto broadcast_rank =
+ std::max(_ctx.at(input0_index).shape().rank(), _ctx.at(input1_index).shape().rank());
+ const_cast<::neurun::model::operand::Shape &>(_ctx.at(input0_index).shape())
+ .extendRank(broadcast_rank);
+ const_cast<::neurun::model::operand::Shape &>(_ctx.at(input1_index).shape())
+ .extendRank(broadcast_rank);
+ }
+
+ // Construct operation parameters
+ struct Param
+ {
+ model::operand::Index output_index;
+ model::operand::Index input0_index;
+ model::operand::Index input1_index;
+ };
+
+ Param param;
+
+ param.output_index = output_index;
+ param.input0_index = input0_index;
+ param.input1_index = input1_index;
+
+ auto tensors = _tensor_builder;
+
+ returnStage([tensors, param](IExecutionBuilder &builder) {
+ auto output_alloc = tensors->at(param.output_index).get();
+ auto input0_alloc = tensors->at(param.input0_index).get();
+ auto input1_alloc = tensors->at(param.input1_index).get();
+
+ std::unique_ptr<::arm_compute::IFunction> fn;
+
+ auto l = make_layer<::arm_compute::CLBitwiseOr>();
+
+ l->configure(input0_alloc->handle(), input1_alloc->handle(), output_alloc->handle());
+
+ fn = std::move(l);
+
+ auto acl_fn = make_cl_function(std::move(fn));
+
+ builder.append(std::move(acl_fn));
+ });
+}
+
+void StageGenerator::visit(const model::operation::LogicalNotNode &node)
+{
+ const auto output_index{node.getOutputs().at(0)};
+ const auto input_index{node.getInputs().at(model::operation::LogicalNotNode::Input::INPUT)};
+
+ // Construct operation parameters
+ struct Param
+ {
+ model::operand::Index output_index;
+ model::operand::Index input_index;
+ };
+
+ Param param;
+
+ param.output_index = output_index;
+ param.input_index = input_index;
+
+ auto tensors = _tensor_builder;
+
+ returnStage([tensors, param](IExecutionBuilder &builder) {
+ auto output_alloc = tensors->at(param.output_index).get();
+ auto input_alloc = tensors->at(param.input_index).get();
+
+ std::unique_ptr<::arm_compute::IFunction> fn;
+
+ auto l = make_layer<::arm_compute::CLBitwiseNot>();
+
+ l->configure(input_alloc->handle(), output_alloc->handle());
+
+ fn = std::move(l);
+
+ auto acl_fn = make_cl_function(std::move(fn));
+
+ builder.append(std::move(acl_fn));
+ });
+}
+
} // namespace acl_cl
} // namespace backend
} // namespace neurun
virtual void visit(const model::operation::PReLUNode &) override;
virtual void visit(const model::operation::TransposeConvNode &) override;
virtual void visit(const model::operation::SQRTNode &) override;
+ virtual void visit(const model::operation::LogicalOrNode &) override;
+ virtual void visit(const model::operation::LogicalNotNode &) override;
private:
const neurun::model::operand::Set &_ctx;
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_LOGICAL_NOT_NODE_H__
+#define __NEURUN_MODEL_OPERATION_LOGICAL_NOT_NODE_H__
+
+#include "model/operation/Node.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class LogicalNotNode : public model::operation::Node
+{
+public:
+ enum Input
+ {
+ INPUT = 0,
+ };
+
+public:
+ LogicalNotNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs);
+
+public:
+ virtual void accept(NodeVisitor &&) const override;
+ virtual std::string getName() const override { return "LogicalNot"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_LOGICAL_NOT_NODE_H__
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_MODEL_OPERATION_LOGICAL_OR_NODE_H__
+#define __NEURUN_MODEL_OPERATION_LOGICAL_OR_NODE_H__
+
+#include "model/operation/Node.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+class LogicalOrNode : public model::operation::Node
+{
+public:
+ enum Input
+ {
+ INPUT0 = 0,
+ INPUT1 = 1,
+ };
+
+public:
+ LogicalOrNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs);
+
+public:
+ virtual void accept(NodeVisitor &&) const override;
+ virtual std::string getName() const override { return "LogicalOr"; }
+};
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
+
+#endif // __NEURUN_MODEL_OPERATION_LOGICAL_OR_NODE_H__
#include "ReduceMaxNode.h"
#include "NotEqualNode.h"
#include "LogicalAndNode.h"
+#include "LogicalOrNode.h"
+#include "LogicalNotNode.h"
#include "RSQRTNode.h"
#include "ReLUNode.h"
#include "ResizeBilinearNode.h"
OP(ReduceMaxNode , true)
OP(NotEqualNode , true)
OP(LogicalAndNode , true)
+OP(LogicalOrNode , true)
+OP(LogicalNotNode , true)
OP(RSQRTNode , true)
OP(ReLUNode , true)
OP(ResizeBilinearNode , true)
OP(TransposeConvNode , true)
OP(SQRTNode , true)
OP(PermuteNode , false)
+
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/LogicalNotNode.h"
+
+#include <cassert>
+
+#include "model/operation/NodeVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void LogicalNotNode::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+LogicalNotNode::LogicalNotNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs)
+ : model::operation::Node{OperandConstraint::createExact(1u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "model/operation/LogicalOrNode.h"
+
+#include <cassert>
+
+#include "model/operation/NodeVisitor.h"
+
+namespace neurun
+{
+namespace model
+{
+namespace operation
+{
+
+void LogicalOrNode::accept(NodeVisitor &&v) const { v.visit(*this); }
+
+LogicalOrNode::LogicalOrNode(const operand::IndexSet &inputs, const operand::IndexSet &outputs)
+ : model::operation::Node{OperandConstraint::createExact(2u), inputs, outputs}
+{
+}
+
+} // namespace operation
+} // namespace model
+} // namespace neurun
operand::IndexSet inputs{init_param.inputs[0]};
return new operation::SQRTNode{inputs, outputs};
};
+
+ _map[ANEURALNETWORKS_LOGICAL_OR_EX] = [](const OperationFactory::Param &init_param) {
+ assert(init_param.input_count == 2 && init_param.output_count == 1);
+
+ operand::IndexSet outputs{init_param.outputs[0]};
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> input0 Tensor Index
+ // 1 -> input1 Tensor Index
+ operand::IndexSet inputs{init_param.inputs[0], init_param.inputs[1]};
+
+ return new operation::LogicalOrNode{inputs, outputs};
+ };
+
+ _map[ANEURALNETWORKS_LOGICAL_NOT_EX] = [](const OperationFactory::Param &init_param) {
+ assert(init_param.input_count == 1 && init_param.output_count == 1);
+
+ operand::IndexSet outputs{init_param.outputs[0]};
+
+ // Each input should be interpreted as follows:
+ //
+ // 0 -> input Tensor Index
+ operand::IndexSet inputs{init_param.inputs[0]};
+
+ return new operation::LogicalNotNode{inputs, outputs};
+ };
}
neurun::model::operation::Node *OperationFactory::create(ANeuralNetworksOperationType type,
GeneratedTests.dequantize
GeneratedTests.equal_ex*
GeneratedTests.local_response_norm*
-GeneratedTests.logical_or_ex*
GeneratedTests.lsh_projection*
GeneratedTests.lstm*
GeneratedTests.mobilenet*
GeneratedTests.gather_ex*
GeneratedTests.topk_v2*
# Unexpected result
+GeneratedTests.logical_or_ex_broadcast_4D_2D
GeneratedTests.split*
GeneratedTests.pack*
GeneratedTests.unpack*
-GeneratedTests.logical_not_ex*