#include "tflite_op_creator.h"
+#include "nnc/core/IR/model/operations/concat_op.h"
+#include "nnc/core/IR/model/operations/conv_2d_op.h"
+#include "nnc/core/IR/model/operations/depthwise_conv2d_op.h"
#include "nnc/core/IR/model/operations/relu_op.h"
#include "nnc/core/IR/model/operations/capped_relu_op.h"
+#include "nnc/core/IR/model/operations/softmax_op.h"
+#include "nnc/core/IR/model/operations/pool_op.h"
+#include "nnc/core/IR/model/operations/bias_add_op.h"
+#include "nnc/core/IR/model/operations/reshape_op.h"
namespace nncc
{
std::vector<INode::Ref> OpCreator::createConv2D(InputOps inputs, InputParams params,
const Conv2DOptions *opts)
{
- throw std::runtime_error{"Not yet implemented"};
+ auto outputs = createOp<ops::Conv2DOp>(inputs, ActivationFunctionType_NONE, std::move(*params[0]),
+ Shape{static_cast<uint32_t>(opts->stride_h()),
+ static_cast<uint32_t>(opts->stride_w()), 1},
+ paddingMap[opts->padding()]);
+ return createOp<ops::BiasAddOp>(outputs, opts->fused_activation_function(),
+ std::move(*params[1]));
}
std::vector<INode::Ref> OpCreator::createDepthConv2D(InputOps inputs, InputParams params,
const DepthwiseConv2DOptions *opts)
{
- throw std::runtime_error{"Not yet implemented"};
+ auto outputs = createOp<ops::DepthwiseConv2DOp>(
+ inputs, ActivationFunctionType_NONE, std::move(*params[0]),
+ Shape{static_cast<uint32_t>(opts->stride_h()),
+ static_cast<uint32_t>(opts->stride_w()), 1},
+ paddingMap[opts->padding()]);
+ return createOp<ops::BiasAddOp>(outputs, opts->fused_activation_function(),
+ std::move(*params[1]));
}
std::vector<INode::Ref> OpCreator::createConcat(InputOps inputs, InputParams params,
const ConcatenationOptions *opts)
{
- throw std::runtime_error{"Not yet implemented"};
+ // Decrementing axis to account for the unnecessary batch dimension
+ return createOp<ops::ConcatOp>(inputs, opts->fused_activation_function(), inputs.size(),
+ opts->axis() - 1);
}
std::vector<INode::Ref> OpCreator::createMaxPool(InputOps inputs, InputParams params,
const Pool2DOptions *opts)
{
- throw std::runtime_error{"Not yet implemented"};
+ return createOp<ops::PoolOp>(inputs, opts->fused_activation_function(),
+ Shape{static_cast<uint32_t>(opts->filter_height()),
+ static_cast<uint32_t>(opts->filter_width()), 1},
+ Shape{static_cast<uint32_t>(opts->stride_h()),
+ static_cast<uint32_t>(opts->stride_w()), 1},
+ ops::PoolOp::PoolingType::MAX, paddingMap[opts->padding()]);
}
std::vector<INode::Ref> OpCreator::createAvgPool(InputOps inputs, InputParams params,
const Pool2DOptions *opts)
{
- throw std::runtime_error{"Not yet implemented"};
+ return createOp<ops::PoolOp>(inputs, opts->fused_activation_function(),
+ Shape{static_cast<uint32_t>(opts->filter_height()),
+ static_cast<uint32_t>(opts->filter_width()), 1},
+ Shape{static_cast<uint32_t>(opts->stride_h()),
+ static_cast<uint32_t>(opts->stride_w()), 1},
+ ops::PoolOp::PoolingType::AVG, paddingMap[opts->padding()]);
}
std::vector<INode::Ref> OpCreator::createSoftmax(InputOps inputs, InputParams params,
const SoftmaxOptions *opts)
{
- throw std::runtime_error{"Not yet implemented"};
+ // TODO: here assuming that softmax is applied to a 1-d tensor
+ return createOp<ops::SoftmaxOp>(inputs, ActivationFunctionType_NONE, 0);
}
std::vector<INode::Ref> OpCreator::createReshape(InputOps inputs, InputParams params,
const ReshapeOptions *opts)
{
- throw std::runtime_error{"Not yet implemented"};
+ auto outputs = createOp<ops::ReshapeOp>(inputs, ActivationFunctionType_NONE);
+
+ // TODO: we should also support "-1" values in new_shape, which means that correct
+ // shape values must be calculated. Better do it in the shape inference module.
+ Shape newShape = common::ShapeHelper::createShape(*opts->new_shape(), opts->new_shape()->size());
+
+ outputs[0]->getOperation()->setOutputShape(0, newShape);
+ return outputs;
}
INode::Ref OpCreator::addFusedActivation(INode::Ref input, ActivationFunctionType activationType)