It was replaced by several binary elementwise operations.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
void visit(mir::ops::DepthwiseConv2DOp& op) override;
void visit(mir::ops::DivOp& op) override;
void visit(mir::ops::DropoutOp& op) override;
- void visit(mir::ops::ElementwiseOp& op) override;
void visit(mir::ops::EluOp& op) override;
void visit(mir::ops::FullyConnectedOp& op) override;
void visit(mir::ops::GatherOp& op) override;
genActivation(op, "TANH");
}
-void AclCppOpGenerator::visit(ops::ElementwiseOp& op) {
- assert(op.getNumInputs() >= 2);
- const auto& ir_inputs = op.getInputs();
- const auto* ir_output = op.getOutput(0);
-
- // Create the output tensor in the DOM and obtain its identifier.
- auto out = genTensor(ir_output);
- addToPersistentTensors(out);
-
- // Get the identifier of the first input tensor in the DOM.
- auto in1 = AF::id(tensorName(ir_inputs[0].getProducer()));
-
- for (size_t i = 1; i < ir_inputs.size(); ++i) {
- const auto* ir_input = ir_inputs[i].getProducer();
-
- // Get the identifier of the second input tensor in the DOM.
- auto in2 = AF::id(tensorName(ir_input));
-
- // Chaining the partial results of binary operations.
- // On the last iteration the result is saved in the node output.
- // Different ACL layers used to implement different types of elementwise operations.
- switch (op.getOpType()) {
- case ops::ElementwiseOp::OpType::mul:
- in1 = genMultiplication(out->name() + "_" + "multiplication", i - 1, ir_input->getShape(),
- in1, in2, i == ir_inputs.size() - 1 ? out : nullptr);
- break;
- case ops::ElementwiseOp::OpType::add:
- in1 = genAddition(out->name() + "_" + "addition", i - 1, ir_input->getShape(),
- in1, in2, i == ir_inputs.size() - 1 ? out : nullptr);
- break;
- default:
- throw AclCppException("This min elementwise operation is currently not supported");
- }
- }
-}
-
void AclCppOpGenerator::visit(ops::DeConv2DOp& op) {
genConvolution(op, "arm_compute::CLDeconvolutionLayer", "_deconvolution_layer");
}
void visit(mir::ops::DepthwiseConv2DOp& op) override;
void visit(mir::ops::DivOp& op) override;
void visit(mir::ops::DropoutOp& op) override;
- void visit(mir::ops::ElementwiseOp& op) override;
void visit(mir::ops::EluOp& op) override;
void visit(mir::ops::FullyConnectedOp& op) override;
void visit(mir::ops::GatherOp& op) override;
#include "ops/DepthwiseConv2D.h"
#include "ops/Div.h"
#include "ops/Dropout.h"
-#include "ops/Elementwise.h"
#include "ops/FullyConnected.h"
#include "ops/Gather.h"
#include "ops/Gemm.h"
setOutputTensors(op, std::move(outputs));
}
-void NNInterpreter::visit(ops::ElementwiseOp& op) {
- auto inputs = getInputTensors(op);
- auto outputs = Elementwise(inputs, op)();
- setOutputTensors(op, std::move(outputs));
-}
-
void NNInterpreter::visit(ops::DeConv2DOp& op) {
auto inputs = getInputTensors(op);
auto outputs = DeConv2D(inputs[0], inputs[1], op)();
+++ /dev/null
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef _NNC_CORE_BACKEND_INTERPRETER_ELEMENTWISE_
-#define _NNC_CORE_BACKEND_INTERPRETER_ELEMENTWISE_
-
-#include "OperationImpl.h"
-#include "mir/ops/ElementwiseOp.h"
-#include "mir/Tensor.h"
-
-namespace nnc {
-
-class Elementwise : public OperationImpl<float> {
-public:
- Elementwise(const std::vector<std::reference_wrapper<const mir::TensorVariant>>& inputs,
- const mir::ops::ElementwiseOp& op)
- : _inputs(inputs), _op(op) {}
-
- std::vector<mir::TensorVariant> operator()() override {
- std::vector<mir::Tensor<float>> ins;
- // Reserve space for tensor variants to avoid reference invalidation when pushing into vector
- std::vector<mir::TensorVariant> broadcasted{};
- broadcasted.reserve(_op.getNumInputs());
-
- for (auto in : _inputs) {
- if (_op.getBroadcast()) {
- broadcasted.emplace_back(in, _op.getOutputShape(0));
- ins.emplace_back(broadcasted.back());
- } else {
- ins.emplace_back(in);
- }
- }
-
- float (* func)(float, float);
- switch (_op.getOpType()) {
- case mir::ops::ElementwiseOp::OpType::add:
- func = [](float a, float b) { return a + b; };
- break;
- case mir::ops::ElementwiseOp::OpType::mul:
- func = [](float a, float b) { return a * b; };
- break;
- case mir::ops::ElementwiseOp::OpType::max:
- func = [](float a, float b) { return std::max(a, b); };
- break;
- case mir::ops::ElementwiseOp::OpType::div:
- func = [](float a, float b) { return a / b; };
- break;
- case mir::ops::ElementwiseOp::OpType::sub:
- func = [](float a, float b) { return a - b; };
- break;
- default:
- assert(false && "Unsupported Optype");
- break;
- }
-
- auto res = allocate_tensor(_op.getOutputShape(0));
- mir::Tensor<float> res_accessor(res);
-
- for (const auto& index : mir::ShapeRange(_op.getOutputShape(0))) {
- float acc = ins[0].at(index);
- for (std::size_t i = 1; i < ins.size(); ++i) {
- acc = func(acc, ins[i].at(index));
- }
- res_accessor.at(index) = acc;
- }
-
- return {res};
- }
-
-private:
- const std::vector<std::reference_wrapper<const mir::TensorVariant>>& _inputs;
- const mir::ops::ElementwiseOp& _op;
-};
-
-} // namespace nnc
-
-#endif //_NNC_CORE_BACKEND_INTERPRETER_ELEMENTWISE_
appendOperationToInference(&op, "tanhActivation");
}
-void ModelAnalyzer::visit(mir::ops::ElementwiseOp& op) {
- const char* func_name = nullptr;
- switch (op.getOpType()) {
- case ops::ElementwiseOp::OpType::add:
- func_name = "ElementWise<Add>";
- break;
- case ops::ElementwiseOp::OpType::mul:
- func_name = "ElementWise<Mul>";
- break;
- case ops::ElementwiseOp::OpType::max:
- func_name = "ElementWise<Max>";
- break;
- case ops::ElementwiseOp::OpType::div:
- func_name = "ElementWise<Div>";
- break;
- case ops::ElementwiseOp::OpType::sub:
- func_name = "ElementWise<Sub>";
- break;
- default:
- assert(false && "unsupported elementwise operation type");
- }
- appendOperationToInference(&op, func_name);
-}
-
void ModelAnalyzer::visit(mir::ops::EluOp& op) {
appendOperationToInference(&op, "elu");
}
void visit(mir::ops::DepthwiseConv2DOp& op) override;
void visit(mir::ops::DivOp& op) override;
void visit(mir::ops::DropoutOp& op) override;
- void visit(mir::ops::ElementwiseOp& op) override;
void visit(mir::ops::EluOp& op) override;
void visit(mir::ops::FullyConnectedOp& op) override;
void visit(mir::ops::GatherOp& op) override;
// no parameters to dump
}
-void Serializer::visit(mir::ops::ElementwiseOp& op) {
- _curOp->paramStartOffset = _buffer.size();
- // Op type is known at codegen Time
- serializeT<int>(static_cast<int32_t>(op.getBroadcast()));
- serializeShape(op.getOutputShape(0));
-}
-
void Serializer::visit(mir::ops::EluOp& op) {
_curOp->paramStartOffset = _buffer.size();
serializeT<float>(op.getAlpha());
void visit(mir::ops::DepthwiseConv2DOp& op) override;
void visit(mir::ops::DivOp& op) override;
void visit(mir::ops::DropoutOp& op) override;
- void visit(mir::ops::ElementwiseOp& op) override;
void visit(mir::ops::EluOp& op) override;
void visit(mir::ops::FullyConnectedOp& op) override;
void visit(mir::ops::GatherOp& op) override;