#include "ops/Pool.h"
#include "ops/Reshape.h"
#include "ops/Softmax.h"
+#include "ops/Scale.h"
+#include "ops/Dropout.h"
+#include "ops/BatchNorm.h"
namespace nncc
{
mapByName(node);
auto operand = node->getPrevNodes()[0];
TensorVariant input(var(operand.node->getId())[operand.index]);
- (void)input; (void)op;
// TODO implement this
- // var(node->getId()) = impl::BatchNormOp(input, op)();
- assert("BatchNormOp Not implemented yet" == 0);
-
+ var(node->getId()) = impl::BatchNorm<float>(input, op)();
}
void NNInterpreter::visit(ADT::INode *node, ops::ScaleOp &op)
mapByName(node);
auto operand = node->getPrevNodes()[0];
TensorVariant input(var(operand.node->getId())[operand.index]);
- (void)input; (void)op;
// TODO implement this
- // var(node->getId()) = impl::ScaleOp(input, op)();
- assert("ScaleOp Not implemented yet" == 0);
+ var(node->getId()) = impl::Scale(input, op)();
}
void NNInterpreter::visit(ADT::INode *node, ops::DropoutOp &op)
mapByName(node);
auto operand = node->getPrevNodes()[0];
TensorVariant input(var(operand.node->getId())[operand.index]);
- (void)input; (void)op;
// TODO implement this
- // var(node->getId()) = impl::DropoutOp(input, op)();
- assert("DropoutOp Not implemented yet" == 0);
+ var(node->getId()) = impl::Dropout<float>(input, op)();
}
void NNInterpreter::mapByName(ADT::INode::Ref n) {
--- /dev/null
+#include "BatchNorm.h"
+
+//Stub to ensure BatchNorm.h is compiled
--- /dev/null
+#ifndef _NNC_CORE_BACKEND_INTERPRETER_BATCHNORM_IMPL_
+#define _NNC_CORE_BACKEND_INTERPRETER_BATCHNORM_IMPL_
+
+#include "OperationImpl.h"
+#include "Fill.h"
+
+#include "core/modelIR/operations/batch_norm.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace backend
+{
+namespace interpreter
+{
+namespace impl
+{
+
+using nncc::contrib::core::IR::model::ops::BatchNormOp;
+
+/**
+ * @brief Implements DropoutOp for interpreter backend
+ * @note Simply copies input to output
+ * @tparam T type of data in input tensor
+ */
+template<typename T>
+class BatchNorm : public OperationImpl<T>
+{
+public:
+ /**
+ * @param in input data
+ * @param op batch normalization operation description
+ */
+ explicit BatchNorm(const TensorVariant& input, const BatchNormOp& op) : _input(input), _op(op) {}
+
+ /**
+ * @brief computes operation aplication result
+ * @return vector of all outputs from this node
+ */
+ std::vector<TensorVariant> operator()() override
+ {
+ //For now BatchNorm just copies input to output
+ return Fill<T>(_input.getShape(), [this](const Index& idx) {
+ return _input.at(idx);
+ })();
+ }
+
+private:
+ const Tensor<float> _input;
+ const BatchNormOp& _op;
+};
+
+} // namespace impl
+} // namespace interpreter
+} // namespace backend
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_BACKEND_INTERPRETER_BATCHNORM_IMPL_
--- /dev/null
+#include "Dropout.h"
+
+//Stub to ensure Dropout.h is compiled
--- /dev/null
+#ifndef _NNC_CORE_BACKEND_INTERPRETER_DROPOUT_IMPL_
+#define _NNC_CORE_BACKEND_INTERPRETER_DROPOUT_IMPL_
+
+#include "OperationImpl.h"
+#include "Fill.h"
+
+#include "core/modelIR/operations/dropout_op.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace backend
+{
+namespace interpreter
+{
+namespace impl
+{
+
+using nncc::contrib::core::IR::model::ops::DropoutOp;
+
+/**
+ * @brief Implements DropoutOp for interpreter backend
+ * @note Simply copies input to output
+ * @tparam T type of data in input tensor
+ */
+template<typename T>
+class Dropout : public OperationImpl<float> {
+public:
+ /**
+ * @param in input data
+ * @param op dropout operation description
+ */
+ explicit Dropout(const TensorVariant& in, const DropoutOp& op) : _input(in), _op(op) {}
+
+ /**
+ * @brief computes operation aplication result
+ * @return vector of all outputs from this node
+ */
+ std::vector<TensorVariant> operator()() override;
+
+private:
+ const Tensor<float> _input;
+ const DropoutOp& _op;
+};
+
+template<typename T>
+std::vector<TensorVariant> Dropout<T>::operator()()
+{
+ //For now dropout just copies input to output
+ return Fill<T>(_input.getShape(), [this](const Index& idx) {
+ return _input.at(idx);
+ })();
+}
+
+} // namespace impl
+} // namespace interpreter
+} // namespace backend
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_BACKEND_INTERPRETER_DROPOUT_IMPL_
--- /dev/null
+#include "Scale.h"
+
+#include "Fill.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace backend
+{
+namespace interpreter
+{
+namespace impl
+{
+
+std::vector<TensorVariant> Scale::operator()()
+{
+ //For now handles only most common case with scale applied by last dimension
+ Tensor<float> weightsAccessor(_op.getWeights());
+ return Fill<float>(_input.getShape(), [this, weightsAccessor](const Index &idx) {
+ return _input.at(idx) * weightsAccessor.at({idx.at(idx.rank() - 1)});
+ })();
+}
+
+} // namespace impl
+} // namespace interpreter
+} // namespace backend
+} // namespace contrib
+} // namespace nncc
--- /dev/null
+#ifndef _NNC_CORE_BACKEND_INTERPRETER_SCALE_IMPL_
+#define _NNC_CORE_BACKEND_INTERPRETER_SCALE_IMPL_
+
+#include "OperationImpl.h"
+
+#include "core/modelIR/operations/scale_op.h"
+
+namespace nncc
+{
+namespace contrib
+{
+namespace backend
+{
+namespace interpreter
+{
+namespace impl
+{
+/**
+ * @brief Implements ScaleOp for interpreter backend
+ * @todo check if I need support for any datatypes other than DTYPE::FLOAT
+ */
+using nncc::contrib::core::IR::model::ops::ScaleOp;
+
+class Scale : public OperationImpl<float> {
+public:
+ /**
+ * @param in input data
+ * @param op scale operation description
+ */
+ explicit Scale(const TensorVariant& in, const ScaleOp& op) : _input(in), _op(op) {}
+
+ /**
+ * @brief computes operation aplication result
+ * @return vector of all outputs from this node
+ */
+ std::vector<TensorVariant> operator()() override;
+
+private:
+ Tensor<float> _input;
+ const ScaleOp& _op;
+};
+
+} // namespace impl
+} // namespace interpreter
+} // namespace backend
+} // namespace contrib
+} // namespace nncc
+
+#endif // _NNC_CORE_BACKEND_INTERPRETER_SCALE_IMPL_