*/
#include <cmath>
-
#include "core/modelIR/ShapeInference.h"
#include "core/modelIR/operations/fully_connected_op.h"
#include "core/modelIR/operations/batch_norm.h"
#include "core/modelIR/operations/scale_op.h"
#include "core/modelIR/operations/dropout_op.h"
+#include "core/modelIR/operations/TanhOp.h"
+#include "core/modelIR/operations/ElementwiseOp.h"
namespace nnc
{
op.setOutputShape(0, op.getInputShape(0));
}
+void ShapeInference::visit(INode::Ref node, ops::TanhOp &op) {
+ fillInputShapes(node, op);
+ op.setOutputShape(0, op.getInputShape(0));
+}
+
+void ShapeInference::visit(INode::Ref node, ops::ElementwiseOp &op) {
+ fillInputShapes(node, op);
+ op.setOutputShape(0, op.getInputShape(0));
+}
+
} // namespace mir
} // namespace nnc
*/
#include <iostream>
+#include "core/modelIR/ir_dot_dumper.h"
#include "core/modelIR/Shape.h"
#include "core/modelIR/ir_node.h"
#include "core/modelIR/ir_dot_node_info.h"
-#include "core/modelIR/ir_dot_dumper.h"
namespace nnc
{
auto nodeInfo = DotIrNodeInfo().withType("DropoutOp", node->getName())
.withInShapes(getInputShapes(op))
.withOutShapes(getOutputShapes(op))
- .withMisc("DropRate",op.getRate());
+ .withMisc("DropRate", op.getRate());
dotBuilder.updateWithNode(node, nodeInfo);
}
+void IrDotDumper::visit(INode *node, ops::TanhOp &op) {
+ auto nodeInfo = DotIrNodeInfo().withType("TanhOp", node->getName())
+ .withInShapes(getInputShapes(op))
+ .withOutShapes(getOutputShapes(op));
+
+ dotBuilder.updateWithNode(node, nodeInfo);
+}
+
+void mir::IrDotDumper::visit(INode *node, ops::ElementwiseOp &op) {
+ auto nodeInfo = DotIrNodeInfo().withType("TanhOp", node->getName())
+ .withInShapes(getInputShapes(op))
+ .withOutShapes(getOutputShapes(op))
+ .withMisc("Operation", (int)op.getOpType());
+
+ dotBuilder.updateWithNode(node, nodeInfo);
+}
} // namespace mir
} // namespace nnc
*/
-#include <core/modelIR/visitor.h>
-
#include "core/modelIR/visitor.h"
-namespace nnc
-{
-namespace mir
-{
+
+namespace nnc {
+namespace mir {
void Visitor::visit(INode *node, ops::Conv2DOp &op) {(void)node; (void)op;};
void Visitor::visit(INode *node, ops::DepthwiseConv2DOp &op) {(void)node; (void)op;};
void Visitor::visit(INode *node, ops::ScaleOp &op) {(void)node; (void)op;};
void Visitor::visit(INode *node, ops::BatchNormOp &op) {(void)node; (void)op;};
void Visitor::visit(INode *node, ops::DropoutOp &op) {(void)node; (void)op;};
+void Visitor::visit(INode *node, ops::TanhOp &op) {(void)node; (void)op; };
+void Visitor::visit(INode *node, ops::ElementwiseOp &op) {(void)node; (void)op; };
} // namespace mir
} // namespace nnc
void visit(INode::Ref node, ops::BiasAddOp &op) override;
void visit(INode::Ref node, ops::ReshapeOp &op) override;
void visit(INode::Ref node, ops::VariableOp &op) override;
- void visit(INode *node, ops::ScaleOp &op) override;
- void visit(INode *node, ops::BatchNormOp &op) override;
- void visit(INode *node, ops::DropoutOp &op) override;
+ void visit(INode::Ref node, ops::ScaleOp &op) override;
+ void visit(INode::Ref node, ops::BatchNormOp &op) override;
+ void visit(INode::Ref node, ops::DropoutOp &op) override;
+ void visit(INode::Ref node, ops::TanhOp &op) override;
+ void visit(INode::Ref node, ops::ElementwiseOp &op) override;
protected:
void fillInputShapes(INode::Ref node, OpDescription &op);
#include "core/modelIR/operations/batch_norm.h"
#include "core/modelIR/operations/scale_op.h"
#include "core/modelIR/operations/dropout_op.h"
+#include "core/modelIR/operations/TanhOp.h"
+#include "core/modelIR/operations/ElementwiseOp.h"
#include "core/modelIR/ir_dot_builder.h"
{
/**
- * @breif Model IR visitor that can be used to output Model IR as a .dot graph.
+ * @brief Model IR visitor that can be used to output Model IR as a .dot graph.
* @usage Run on a Model IR graph as a visitor, and then call writeDot passing it a stream
*/
class IrDotDumper : public IVisitor
void visit(INode *node, ops::ScaleOp &op) override;
void visit(INode *node, ops::BatchNormOp &op) override;
void visit(INode *node, ops::DropoutOp &op) override;
+ void visit(INode *node, ops::TanhOp &op) override;
+ void visit(INode *node, ops::ElementwiseOp &op) override;
void writeDot(std::ostream &os) { dotBuilder.writeDot(os); };
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _NNC_CORE_IR_MODEL_ELEMENTWISE_OP_H_
+#define _NNC_CORE_IR_MODEL_ELEMENTWISE_OP_H_
+
+#include "core/modelIR/operations/operation.h"
+
+namespace nnc {
+namespace mir {
+namespace ops {
+
+class ElementwiseOp : public OpDescription {
+public:
+
+ enum class OpType {
+ prod = 0, sum = 1, max = 2
+ };
+
+ /**
+ * Apply 2-arg operation elementwise reducing along input tensors
+ * @param op_type Type of operation to perform
+ * @param num_inputs Number of inputs
+ */
+ explicit ElementwiseOp(OpType op_type, size_t num_inputs) :
+ OpDescription(num_inputs, 1), _opType(op_type) {};
+
+private:
+ OpType _opType;
+public:
+ OpType getOpType() const { return _opType; }
+
+};
+
+} // namespace ops
+} // namespace mir
+} // namespace nnc
+
+
+#endif //_NNC_CORE_IR_MODEL_ELEMENTWISE_OP_H_
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _NNC_CORE_IR_MODEL_TANH_H_
+#define _NNC_CORE_IR_MODEL_TANH_H_
+
+
+#include "core/modelIR/operations/operation.h"
+
+namespace nnc {
+namespace mir {
+namespace ops {
+
+class TanhOp : public OpDescription {
+public:
+ explicit TanhOp() : OpDescription(1, 1) {}
+};
+
+} // namespace ops
+} // namespace mir
+} // namespace nnc
+
+#endif //_NNC_CORE_IR_MODEL_TANH_H_
#ifndef _NNC_CORE_IR_MODEL_VISITOR_H_
#define _NNC_CORE_IR_MODEL_VISITOR_H_
-namespace nnc
-{
-namespace mir
-{
+namespace nnc {
+namespace mir {
class INode;
class ScaleOp;
class BatchNormOp;
class DropoutOp;
+ class TanhOp;
+ class ElementwiseOp;
}
/**
virtual void visit(INode *node, ops::ScaleOp &op) = 0;
virtual void visit(INode *node, ops::BatchNormOp &op) = 0;
virtual void visit(INode *node, ops::DropoutOp &op) = 0;
+ virtual void visit(INode *node, ops::TanhOp &op) = 0;
+ virtual void visit(INode *node, ops::ElementwiseOp &op) = 0;
virtual ~IVisitor() = default;
};
void visit(INode *node, ops::ScaleOp &op) override;
void visit(INode *node, ops::BatchNormOp &op) override;
void visit(INode *node, ops::DropoutOp &op) override;
+ void visit(INode *node, ops::TanhOp &op) override;
+ void visit(INode *node, ops::ElementwiseOp &op) override;
~Visitor() override = default;
void visit(INode::Ref node, ops::ScaleOp &op) override;
void visit(INode::Ref node, ops::BatchNormOp &op) override;
void visit(INode::Ref node, ops::DropoutOp &op) override;
+ void visit(INode::Ref node, ops::TanhOp &op) override;
+ void visit(INode::Ref node, ops::ElementwiseOp &op) override;
void setInput(const std::string &name, const TensorVariant& data);
std::vector<TensorVariant> &getResult(INode::Ref node);
#include <cmath>
#include <cassert>
+#include <vector>
#include "passes/interpreter/Interpreter.h"
#include "core/modelIR/operations/batch_norm.h"
#include "core/modelIR/operations/scale_op.h"
#include "core/modelIR/operations/dropout_op.h"
+#include "core/modelIR/operations/TanhOp.h"
+#include "core/modelIR/operations/ElementwiseOp.h"
#include "ops/Bias.h"
#include "ops/Concat.h"
nodeByName[nodeName] = n;
}
+void NNInterpreter::visit(INode::Ref node, ops::TanhOp &op) {
+ mapByName(node);
+ auto operand = node->getPrevNodes()[0];
+ Tensor<float> input(var(operand.node->getId())[operand.index]);
+ var(node->getId()) = Fill<float>(op.getOutputShape(0), [&input, &op](const Index &id) {
+ return std::tanh(input.at(id));
+ })();
+}
+
+void NNInterpreter::visit(INode::Ref node, ops::ElementwiseOp &op) {
+ mapByName(node);
+ auto operands = node->getPrevNodes();
+ std::vector<Tensor<float>> ins;
+ for (auto &in : operands) {
+ ins.push_back(Tensor<float>(var(in.node->getId())[in.index]));
+ }
+ float (*func)(float,float); // Another dirty hack
+ switch (op.getOpType()) {
+ case ops::ElementwiseOp::OpType::sum:
+ func = [](float a, float b) { return a + b; };
+ break;
+ case ops::ElementwiseOp::OpType::prod:
+ func = [](float a, float b) { return a * b;};
+ break;
+ case ops::ElementwiseOp::OpType::max:
+ func = [](float a, float b) { return std::max(a,b);};
+ break;
+ default:
+ assert(false && "Not supported Optype");
+ }
+ var(node->getId()) = Fill<float>(op.getOutputShape(0), [&func, &ins, &op](const Index &id) {
+ float acc = ins[0].at(id);
+ for (size_t i = 1; i < ins.size() ; i++)
+ acc = func(acc, ins[i].at(id));
+ return acc;
+ })();
+}
+
} // namespace nnc
#include "core/modelIR/operations/batch_norm.h"
#include "core/modelIR/operations/scale_op.h"
#include "core/modelIR/operations/dropout_op.h"
+#include "core/modelIR/operations/TanhOp.h"
+#include "core/modelIR/operations/ElementwiseOp.h"
using namespace std;
addOpDescr(node, "scale");
}
-void ModelAnalyzer::visit(INode *node, ops::BatchNormOp &op)
-{
+void ModelAnalyzer::visit(INode *node, ops::BatchNormOp &op) {
addOpDescr(node, "batchNorm");
}
+void ModelAnalyzer::visit(mir::INode *node, mir::ops::TanhOp &op) {
+ addOpDescr(node, "TanhOp");
+}
+
+void ModelAnalyzer::visit(mir::INode *node, mir::ops::ElementwiseOp &op) {
+ addOpDescr(node, "Elementwise");
+}
} // namespace nnc
void visit(mir::INode *node, mir::ops::ScaleOp &op) override;
void visit(mir::INode *node, mir::ops::BatchNormOp &op) override;
void visit(mir::INode *node, mir::ops::DropoutOp &op) override;
+ void visit(mir::INode *node, mir::ops::TanhOp &op) override;
+ void visit(mir::INode *node, mir::ops::ElementwiseOp &op) override;
/**
* @return vector of id's of network input tensors
#include "core/modelIR/operations/batch_norm.h"
#include "core/modelIR/operations/scale_op.h"
#include "core/modelIR/operations/dropout_op.h"
+#include "core/modelIR/operations/TanhOp.h"
+#include "core/modelIR/operations/ElementwiseOp.h"
#include "core/modelIR/ir_node.h"
#include "pass/PassException.h"
}
}
+void Serializer::visit(mir::INode *node, mir::ops::TanhOp &op) {
+ _curOp->_paramStartOffset = _buffer.size();
+ // no parameters to dump
+}
+
+void Serializer::visit(mir::INode *node, mir::ops::ElementwiseOp &op) {
+ _curOp->_paramStartOffset = _buffer.size();
+ serializeT<int>((int32_t ) op.getOpType());
+ serializeT<int>((int32_t) op.getNumInputs());
+}
+
} // namespace nnc
void visit(mir::INode *node, mir::ops::ScaleOp &op) override;
void visit(mir::INode *node, mir::ops::BatchNormOp &op) override;
void visit(mir::INode *node, mir::ops::DropoutOp &op) override;
+ void visit(mir::INode *node, mir::ops::TanhOp &op) override;
+ void visit(mir::INode *node, mir::ops::ElementwiseOp &op) override;
void serialize(std::list<OpDescr> &inferenceSequence);