mir::Tensor<int32_t> paddings_tensor(extractTensor(inputs.at(1)));
const auto &input_shape = input->getShape();
- int32_t num_dims = input_shape.rank();
+ const int num_dims = input_shape.rank();
- std::vector<std::pair<int32_t, int32_t>> paddings;
- paddings.reserve(static_cast<uint64_t>(num_dims));
- for (int axis = 0; axis < num_dims; axis++)
+ std::vector<std::int32_t> padding_before(num_dims);
+ std::vector<std::int32_t> padding_after(num_dims);
+ for (int i = 0; i < num_dims; i++)
{
- paddings.emplace_back(paddings_tensor.at(mir::Index({axis, 0})),
- paddings_tensor.at(mir::Index({axis, 1})));
+ padding_before[i] = paddings_tensor.at(mir::Index({i, 0}));
+ padding_after[i] = paddings_tensor.at(mir::Index({i, 1}));
}
- float filler_value = 0.0;
- mir::Scalar filler(reinterpret_cast<char *>(&filler_value), mir::DataType::FLOAT32,
- sizeof(filler_value));
+ const float padding_value = 0.0f;
- // FIXME Do we really need num_dims as an argument? It looks redundant.
- auto result = createOp<ops::PadOp>(input, num_dims, paddings, filler);
- return {result->getOutput(0)};
+ auto result =
+ createOp<ops::PadOp>(input, padding_before, padding_after, padding_value)->getOutput(0);
+ return {result};
}
std::vector<mir::Operation::Output *>