Use output shape stored in tflite model as it's impossible to compute it from operation parameters
Signed-off-by: Vladimir Plazun <v.plazun@partner.samsung.com>
namespace mir {
namespace ops {
+void DeConv2DOp::inferPaddings() {
+ auto& input_shape = getInputShape(0);
+ auto& kernel_shape = _kernel.getShape();
+ auto output_shape = getOutputShape(0);
+
+ // As stupid as it sounds but it seems like there is no difference in padding calculation
+ // between SAME and VALID padding types ( at least for tflite )
+ for (int d = 0; d < 2; ++d) {
+ //See `ComputePadding` in tflite sources
+ int pad = (input_shape.dim(d + 1) - 1) * _strides.dim(d)
+ + kernel_shape.dim(d) - output_shape.dim(d + 1);
+
+ _paddingBefore[d] = pad / 2;
+ _paddingAfter[d] = pad - _paddingBefore[d];
+ }
+}
+
// See https://github.com/tensorflow/tensorflow/issues/2118
// VALID: output = input * stride + filter - stride
// SAME: output = input * stride - stride + 1
inferOutputShapes();
}
+ DeConv2DOp(const IODescriptor& arg,
+ const TensorVariant& kernel,
+ const Shape& strides,
+ PaddingType padding_type,
+ const Shape& output_shape)
+ : Operation(Type::deConv2D, {arg}),
+ _kernel(kernel),
+ _strides(strides),
+ _paddingType(padding_type),
+ _paddingBefore(2),
+ _paddingAfter(2) {
+ assert(_paddingType != PaddingType::Custom);
+ setOutputShape(0, output_shape);
+ inferPaddings();
+ }
+
const TensorVariant& getKernel() const { return _kernel; }
const Shape& getStrides() const { return _strides; }
private:
void inferOutputShapes();
+ /**
+ * @brief Compute paddings based on input shape, kernel shape and strides
+ */
+ void inferPaddings();
+
const TensorVariant _kernel;
Shape _strides;
PaddingType _paddingType;
case BuiltinOperator_SQUARED_DIFFERENCE:
outputs = _opCreator->createSquaredDifference(inputs, params); // no activation
break;
- case BuiltinOperator_TRANSPOSE_CONV:
+ case BuiltinOperator_TRANSPOSE_CONV: {
+ auto tensor = (*_tensors)[op->outputs()->Get(0)];
+ auto out_shape = ShapeHelper::createShape(*tensor->shape(), tensor->shape()->size());
outputs = _opCreator->createTransposeConv(
- inputs, params,op->builtin_options_as<TransposeConvOptions>());
+ inputs, params, op->builtin_options_as<TransposeConvOptions>(), out_shape);
break;
+ }
case BuiltinOperator_PAD:
outputs = _opCreator->createPad(inputs, params, op->builtin_options_as<PadOptions>());
break;
// TODO: Currently this is only used by the interpreter and shape inference,
// don't forget to change this if tensor shape processing architecture changes.
params_for_op.emplace_back(mir::transposeTensor<1, 2, 3, 0>(tensor));
+ } else if (opcode == BuiltinOperator_TRANSPOSE_CONV && t->shape()->size() == 4) {
+ //Tflite uses [in, H, W, out] and we expect kernel to be [H, W, in, out]
+ params_for_op.emplace_back(mir::transposeTensor<1, 2, 0, 3>(tensor));
} else if (opcode == BuiltinOperator_FULLY_CONNECTED && t->shape()->size() == 2) {
params_for_op.emplace_back(mir::transposeTensor<1, 0>(tensor));
} else {
std::vector<mir::Operation*>
TFLiteOpCreator::createTransposeConv(InputOps& inputs, const InputParams& params,
- const ::tflite::TransposeConvOptions* opts) {
+ const ::tflite::TransposeConvOptions* opts,
+ const Shape& output_shape) {
Shape strides{opts->stride_h(), opts->stride_w()};
+
return createOp<ops::DeConv2DOp>(ActivationFunctionType_NONE, inputs[0]->getOutput(0), params[1],
- strides, paddingMap[opts->padding()]);
+ strides, paddingMap[opts->padding()], output_shape);
}
std::vector<mir::Operation*>
* @brief Creates a Transposed convolution
* @param params 0 - output shape (unused), 1 - kernel, 2- input
*/
- std::vector<mir::Operation*> createTransposeConv(InputOps&, const InputParams&,
- const ::tflite::TransposeConvOptions*);
+ std::vector<mir::Operation*> createTransposeConv(
+ InputOps&, const InputParams&,
+ const ::tflite::TransposeConvOptions*,
+ const Shape&);
/**
* @brief Create a Pad operation