`BiasAdd` and `Scale` are restricted versions of equivalent Elementwise ops and are going to be removed.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
#include "mir/ShapeRange.h"
#include "mir/ops/TransposeOp.h"
+#include "mir/ops/ElementwiseOp.h"
#include "onnx/onnx.pb.h"
->getOutput(0);
}
+inline mir::Operation::Output *createAdd(mir::Graph *graph, mir::Operation::Output *arg1,
+ mir::Operation::Output *arg2)
+{
+ std::vector<mir::Operation::Output *> inputs{arg1, arg2};
+ return graph->create<mir::ops::ElementwiseOp>("", inputs, mir::ops::ElementwiseOp::OpType::add)
+ ->getOutput(0);
+}
+
+inline mir::Operation::Output *createMul(mir::Graph *graph, mir::Operation::Output *arg1,
+ mir::Operation::Output *arg2)
+{
+ std::vector<mir::Operation::Output *> inputs{arg1, arg2};
+ return graph->create<mir::ops::ElementwiseOp>("", inputs, mir::ops::ElementwiseOp::OpType::mul)
+ ->getOutput(0);
+}
+
} // namespace mir_onnx
#endif // __MIR_ONNX_HELPERS_H__
#include "mir/ShapeRange.h"
#include "mir/Tensor.h"
-#include "mir/ops/BiasAddOp.h"
#include "mir/ops/ConstantOp.h"
-#include "mir/ops/ScaleOp.h"
#include <cmath>
auto data = convertONNXToMIR(graph, inputs[0]);
auto mean = createOp<mir::ops::ConstantOp>(graph, mean_tensor)->getOutput(0);
- auto result = createOp<mir::ops::BiasAddOp>(graph, data, mean);
+ auto result = createAdd(graph, data, mean);
// res2 = res1 * scale / (var + epsilon)
mir::Tensor<float> multiplier(scale_tensor);
for (auto &idx : mir::ShapeRange(scale_tensor.getShape()))
multiplier.at(idx) /= std::sqrt(var_accessor.at(idx) + epsilon);
auto scale = createOp<mir::ops::ConstantOp>(graph, scale_tensor)->getOutput(0);
- result = createOp<mir::ops::ScaleOp>(graph, result->getOutput(0), scale);
+ result = createMul(graph, result, scale);
// overall_res = res2 + bias
auto bias = createOp<mir::ops::ConstantOp>(graph, bias_tensor)->getOutput(0);
- result = createOp<mir::ops::BiasAddOp>(graph, result->getOutput(0), bias);
+ result = createAdd(graph, result, bias);
- return {convertMIRToONNX(graph, result->getOutput(0))};
+ return {convertMIRToONNX(graph, result)};
}
} // namespace mir_onnx
#include "mir/TensorUtil.h"
-#include "mir/ops/BiasAddOp.h"
#include "mir/ops/ConstantOp.h"
#include "mir/ops/Conv2DOp.h"
#include "mir/ops/DepthwiseConv2DOp.h"
+#include "mir/ops/ElementwiseOp.h"
namespace mir_onnx
{
num_groups = 1;
bool is_depthwise = (num_groups != 1) && (in_group_size == 1) && (out_channels == num_groups);
- mir::Operation *result;
+ mir::Operation::Output *result;
auto transposed_input = convertONNXToMIR(graph, inputs[0]);
if (is_depthwise)
{
auto kernel = createOp<mir::ops::ConstantOp>(graph, transposed_tensor)->getOutput(0);
result =
createOp<mir::ops::DepthwiseConv2DOp>(graph, transposed_input, kernel, cdata.strides_shape,
- cdata.padding_before, cdata.padding_after);
+ cdata.padding_before, cdata.padding_after)
+ ->getOutput(0);
}
else
{
kernel_tensor = mir::transposeTensor<3, 0, 1, 2>(kernel_tensor);
auto kernel = createOp<mir::ops::ConstantOp>(graph, kernel_tensor)->getOutput(0);
result = createOp<mir::ops::Conv2DOp>(graph, transposed_input, kernel, cdata.strides_shape,
- cdata.padding_before, cdata.padding_after);
+ cdata.padding_before, cdata.padding_after)
+ ->getOutput(0);
}
if (inputs.size() > 2)
- result = createOp<mir::ops::BiasAddOp>(graph, result->getOutput(0), inputs[2]);
+ {
+ result = createAdd(graph, result, inputs[2]);
+ }
- return {convertMIRToONNX(graph, result->getOutput(0))};
+ return {convertMIRToONNX(graph, result)};
}
} // namespace mir_onnx
#include "mir/TensorVariant.h"
#include "mir/ops/ConstantOp.h"
-#include "mir/ops/ElementwiseOp.h"
#include "mir/ops/GemmOp.h"
#include "mir/ops/ReshapeOp.h"
-#include "mir/ops/ScaleOp.h"
#include "mir/ops/TransposeOp.h"
namespace mir_onnx
{
auto alpha_tensor = createScalarTensor(alpha_val, input_a->getShape());
auto alpha = createOp<mir::ops::ConstantOp>(graph, alpha_tensor)->getOutput(0);
- input_a = createOp<mir::ops::ScaleOp>(graph, input_a, alpha)->getOutput(0);
+ input_a = createMul(graph, input_a, alpha);
}
// 2. Prepare input matrix B
}
auto beta = createOp<mir::ops::ConstantOp>(graph, beta_tensor)->getOutput(0);
std::vector<mir::Operation::Output *> mul_inputs = {beta, input_c};
- auto c_mult =
- createOp<mir::ops::ElementwiseOp>(graph, mul_inputs, mir::ops::ElementwiseOp::OpType::mul)
- ->getOutput(0);
+ auto c_mult = createMul(graph, beta, input_c);
assert(c_mult->getShape() == mult_a_b);
auto result = createOp<mir::ops::GemmOp>(graph, input_a, input_b, c_mult);
return {result->getOutput(0)};
#include "ONNXHelpers.h"
#include "mir/ops/ConstantOp.h"
-#include "mir/ops/ScaleOp.h"
namespace mir_onnx
{
const auto &shape = inputs[0]->getShape();
auto scale_tensor = createScalarTensor(scale_val, shape);
auto scale = createOp<mir::ops::ConstantOp>(graph, scale_tensor)->getOutput(0);
- auto result = createOp<mir::ops::ScaleOp>(graph, inputs[0], scale);
- return {result->getOutput(0)};
+ auto result = createMul(graph, inputs[0], scale);
+ return {result};
}
} // namespace mir_onnx