#include "tflite_op_creator.h"
#include "schema_generated.h"
+#include "mir/ops/AddOp.h"
#include "mir/ops/CappedReluOp.h"
#include "mir/ops/ConcatOp.h"
#include "mir/ops/ConstantOp.h"
#include "mir/ops/Conv2DOp.h"
#include "mir/ops/Deconv2DOp.h"
#include "mir/ops/DepthwiseConv2DOp.h"
-#include "mir/ops/ElementwiseOp.h"
+#include "mir/ops/DivOp.h"
#include "mir/ops/FullyConnectedOp.h"
#include "mir/ops/LeakyReluOp.h"
+#include "mir/ops/MaxOp.h"
+#include "mir/ops/MulOp.h"
#include "mir/ops/PadOp.h"
#include "mir/ops/PoolOp.h"
#include "mir/ops/ReduceOp.h"
#include "mir/ops/SoftmaxOp.h"
#include "mir/ops/SqrtOp.h"
#include "mir/ops/SqueezeOp.h"
+#include "mir/ops/SubOp.h"
#include "mir/ops/TanhOp.h"
#include "mir/ops/TransposeOp.h"
auto result =
createOp<ops::Conv2DOp>(input, kernel, strides, padding_before, padding_after)->getOutput(0);
- result = createAdd(result, bias);
+ result = createOp<ops::AddOp>(result, bias)->getOutput(0);
return {addFusedActivation(result, opts->fused_activation_function())};
}
auto result =
createOp<ops::DepthwiseConv2DOp>(input, kernel, strides, padding_before, padding_after)
->getOutput(0);
- result = createAdd(result, bias);
+ result = createOp<ops::AddOp>(result, bias)->getOutput(0);
return {addFusedActivation(result, opts->fused_activation_function())};
}
TFLiteOpCreator::convertAdd(const ::tflite::AddOptions *opts,
const std::vector<mir::Operation::Output *> &inputs)
{
- auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::add);
- return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
+ assert(inputs.size() == 2);
+ auto result = createOp<ops::AddOp>(inputs[0], inputs[1])->getOutput(0);
+ return {addFusedActivation(result, opts->fused_activation_function())};
}
std::vector<mir::Operation::Output *>
TFLiteOpCreator::convertSub(const ::tflite::SubOptions *opts,
const std::vector<mir::Operation::Output *> &inputs)
{
- auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::sub);
- return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
+ assert(inputs.size() == 2);
+ auto result = createOp<ops::SubOp>(inputs[0], inputs[1])->getOutput(0);
+ return {addFusedActivation(result, opts->fused_activation_function())};
}
std::vector<mir::Operation::Output *>
TFLiteOpCreator::convertMul(const ::tflite::MulOptions *opts,
const std::vector<mir::Operation::Output *> &inputs)
{
+ assert(inputs.size() == 2);
// Try to constant fold the operation in some cases.
- if (inputs.size() == 2 && inputs[0]->getShape() == inputs[1]->getShape() &&
+ if (inputs[0]->getShape() == inputs[1]->getShape() &&
opts->fused_activation_function() == ActivationFunctionType_NONE)
{
auto constant1_op = dynamic_cast<const ops::ConstantOp *>(inputs[0]->getNode());
}
}
- auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::mul);
- return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
+ auto result = createOp<ops::MulOp>(inputs[0], inputs[1])->getOutput(0);
+ return {addFusedActivation(result, opts->fused_activation_function())};
}
std::vector<mir::Operation::Output *>
TFLiteOpCreator::convertDiv(const ::tflite::DivOptions *opts,
const std::vector<mir::Operation::Output *> &inputs)
{
- auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::div);
- return {addFusedActivation(result->getOutput(0), opts->fused_activation_function())};
+ assert(inputs.size() == 2);
+ auto result = createOp<ops::DivOp>(inputs[0], inputs[1])->getOutput(0);
+ return {addFusedActivation(result, opts->fused_activation_function())};
}
std::vector<mir::Operation::Output *>
TFLiteOpCreator::convertMax(const std::vector<mir::Operation::Output *> &inputs)
{
- auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::max);
- return {result->getOutput(0)};
+ assert(inputs.size() == 2);
+ auto result = createOp<ops::MaxOp>(inputs[0], inputs[1])->getOutput(0);
+ return {result};
}
std::vector<mir::Operation::Output *>
TFLiteOpCreator::convertSquaredDifference(const std::vector<mir::Operation::Output *> &inputs)
{
- auto result = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::sub)->getOutput(0);
- result = createMul(result, result);
+ assert(inputs.size() == 2);
+ auto result = createOp<ops::SubOp>(inputs[0], inputs[1])->getOutput(0);
+ result = createOp<ops::MulOp>(result, result)->getOutput(0);
return {result};
}
weights = createOp<ops::ConstantOp>(weights_tensor)->getOutput(0);
auto result = createOp<ops::FullyConnectedOp>(flatten->getOutput(0), weights)->getOutput(0);
- result = createAdd(result, bias);
+ result = createOp<ops::AddOp>(result, bias)->getOutput(0);
return {addFusedActivation(result, opts->fused_activation_function())};
}
}
}
-mir::Operation::Output *TFLiteOpCreator::createAdd(mir::Operation::Output *arg1,
- mir::Operation::Output *arg2)
-{
- std::vector<mir::Operation::Output *> inputs{arg1, arg2};
- auto op = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::add);
- return op->getOutput(0);
-}
-
-mir::Operation::Output *TFLiteOpCreator::createMul(mir::Operation::Output *arg1,
- mir::Operation::Output *arg2)
-{
- std::vector<mir::Operation::Output *> inputs{arg1, arg2};
- auto op = createOp<ops::ElementwiseOp>(inputs, ops::ElementwiseOp::OpType::mul);
- return op->getOutput(0);
-}
-
std::vector<mir::Operation::Output *>
TFLiteOpCreator::convertSqueeze(const ::tflite::SqueezeOptions *opts,
const std::vector<mir::Operation::Output *> &inputs)