#include "exec/DynamicShapeInference.h"
#include "util/ShapeInference.h"
+#include <assert.h>
namespace onert
{
namespace exec
{
+inline backend::IDynamicTensorManager *
+dynamicTensorManagerOf(const std::shared_ptr<backend::ITensor> &tensor)
+{
+ if (!tensor->dynamic_tensor_manager())
+ throw std::runtime_error{"Dynamic Tensor Manager is not available for this tensor."};
+ return tensor->dynamic_tensor_manager();
+}
+
void DynamicShapeInferer::handleBinaryArithmeticOp(const ir::Operation &op,
const ir::OperandIndex lhs_idx,
const ir::OperandIndex rhs_idx)
ir::Shape new_shape = shape_inference::inferEltwiseShape(lhs_shape, rhs_shape);
- _dynamic_tensor_manager->applyShape(output_idx, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_idx, new_shape);
assert(output->buffer() != nullptr);
}
auto output_ind = op.getOutputs().at(0);
auto output = _tensor_registry->getITensor(output_ind);
- _dynamic_tensor_manager->applyShape(output_ind, output_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape);
assert(output->buffer() != nullptr);
}
ir::Shape new_shape = shape_inference::inferArgMaxShape(input_shape, axis, rank);
- _dynamic_tensor_manager->applyShape(output_ind, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape);
assert(output->buffer() != nullptr);
}
// TODO
auto new_shape = shape_inference::inferBatchMatMulShape(lhs_shape, rhs_shape, op.param());
- _dynamic_tensor_manager->applyShape(output_index, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_index, new_shape);
}
void DynamicShapeInferer::visit(const ir::operation::BroadcastTo &op)
shape->getShape(), reinterpret_cast<const int32_t *>(shape->buffer()));
// set output shape and output buffer
- _dynamic_tensor_manager->applyShape(output_ind, output_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape);
assert(output->buffer() != nullptr);
}
auto output = _tensor_registry->getITensor(output_ind);
auto output_shape = shape_inference::inferConcatShape(in_shapes, op.param());
- _dynamic_tensor_manager->applyShape(output_ind, output_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape);
}
void DynamicShapeInferer::visit(const ir::operation::Conv2D &op)
ir::Shape output_shape = shape_inference::inferConv2DShape(input_shape, ker_shape, op.param());
- _dynamic_tensor_manager->applyShape(output_ind, output_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape);
assert(output->buffer() != nullptr);
}
auto output_shape = shape_inference::inferExpandDimsShape(input_shape, axis_buf[0]);
- _dynamic_tensor_manager->applyShape(output_ind, output_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape);
assert(output->buffer() != nullptr);
}
auto output_shape = shape_inference::inferFillShape(input_shape, input_buf);
- _dynamic_tensor_manager->applyShape(output_ind, output_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape);
assert(output->buffer() != nullptr);
}
auto output_ind = op.getOutputs().at(0);
auto output = _tensor_registry->getITensor(output_ind);
- _dynamic_tensor_manager->applyShape(output_ind, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape);
assert(output->buffer() != nullptr);
}
auto output_ind = op.getOutputs().at(0);
auto output = _tensor_registry->getITensor(output_ind);
- _dynamic_tensor_manager->applyShape(output_ind, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape);
assert(output->buffer() != nullptr);
}
handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::Logistic::INPUT));
}
+void DynamicShapeInferer::visit(const ir::operation::L2Normalization &op)
+{
+ handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::L2Normalization::INPUT));
+}
+
void DynamicShapeInferer::visit(const ir::operation::MatrixBandPart &op)
{
handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::MatrixBandPart::INPUT));
const auto axis_val = op.param().axis;
ir::Shape new_shape = shape_inference::inferOnehotShape(indices_shape, *depth_buf, axis_val);
- _dynamic_tensor_manager->applyShape(output_ind, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape);
assert(output->buffer() != nullptr);
}
ir::Shape new_shape = shape_inference::inferPackShape(input_shape, axis, rank, num);
- _dynamic_tensor_manager->applyShape(output_ind, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape);
assert(output->buffer() != nullptr);
}
shape_inference::inferPadShape(input->getShape(), pad_buf, pad->getShape().num_elements());
// change output shape and reallocate output tensor memory
- _dynamic_tensor_manager->applyShape(output_ind, output_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape);
assert(output->buffer() != nullptr);
}
*reinterpret_cast<int32_t *>(limit_tensor->buffer()),
*reinterpret_cast<int32_t *>(delta_tensor->buffer()));
}
- _dynamic_tensor_manager->applyShape(output_ind, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape);
assert(output->buffer() != nullptr);
}
ir::Shape new_shape = shape_inference::inferReduceShape(input_shape, axes_vec, keep_dims);
- _dynamic_tensor_manager->applyShape(output_ind, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape);
assert(output->buffer() != nullptr);
}
if (output_shape != output->getShape() || output->buffer() == nullptr)
{
// change on output shape
- _dynamic_tensor_manager->applyShape(output_ind, output_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape);
}
assert(output->buffer() != nullptr);
}
if (output_shape != output->getShape() || output->buffer() == nullptr)
{
// change on output shape
- _dynamic_tensor_manager->applyShape(output_ind, output_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape);
}
assert(output->buffer() != nullptr);
}
}
}
+void DynamicShapeInferer::visit(const ir::operation::ResizeBilinear &op)
+{
+ // check if output is not dynamic
+ auto output_ind = op.getOutputs().at(0);
+ auto output = _tensor_registry->getITensor(output_ind);
+
+ auto input_ind = op.getInputs().at(ir::operation::Reshape::Input::INPUT);
+ auto input = _tensor_registry->getITensor(input_ind);
+
+ if ((!input->is_dynamic()) && (!output->is_dynamic()))
+ return;
+
+ // getting output shape from input shape and Params
+ auto output_shape = shape_inference::inferResizeBilinearShape(
+ input->getShape(), op.param().height_out, op.param().width_out);
+
+ // if shape is changed, change output shape and reallocate output tensor memory
+ if (output_shape != output->getShape() || output->buffer() == nullptr)
+ {
+ // change on output shape
+ _dynamic_tensor_manager->applyShape(output_ind, output_shape);
+ }
+ assert(output->buffer() != nullptr);
+}
+
void DynamicShapeInferer::visit(const ir::operation::Reverse &op)
{
handleSimpleUnaryOp(op, op.getInputs().at(ir::operation::Reverse::INPUT));
auto output_ind = op.getOutputs().at(0);
auto output = _tensor_registry->getITensor(output_ind);
- _dynamic_tensor_manager->applyShape(output_ind, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape);
assert(output->buffer() != nullptr);
}
ir::Shape output_shape;
output_shape.append(input_shape.rank());
- _dynamic_tensor_manager->applyShape(output_ind, output_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape);
assert(output->buffer() != nullptr);
}
ir::Shape new_shape = shape_inference::inferSliceShape(input_shape, begins_buf, sizes_buf);
- _dynamic_tensor_manager->applyShape(output_index, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_index, new_shape);
assert(output->buffer() != nullptr);
}
ir::Shape new_shape = shape_inference::inferSpaceToBatchNDShape(
input_shape, block_shape_shape, padding_shape, block_shape_data, padding_data);
- _dynamic_tensor_manager->applyShape(output_idx, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_idx, new_shape);
assert(output->buffer() != nullptr);
}
auto output_ind = op.getOutputs().at(out_tensor_idx);
auto output = _tensor_registry->getITensor(output_ind);
- _dynamic_tensor_manager->applyShape(output_ind, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape);
assert(output->buffer() != nullptr);
}
}
auto output_ind = op.getOutputs().at(0);
auto output = _tensor_registry->getITensor(output_ind);
- _dynamic_tensor_manager->applyShape(output_ind, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape);
assert(output->buffer() != nullptr);
}
ir::Shape output_shape =
onert::shape_inference::inferStridedSliceShape(input_shape, op_params, rank);
- _dynamic_tensor_manager->applyShape(output_index, output_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_index, output_shape);
assert(output->buffer() != nullptr);
}
auto output_shape = shape_inference::inferTileShape(input_shape, multiplier_buffer);
// set output shape and output buffer
- _dynamic_tensor_manager->applyShape(output_ind, output_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, output_shape);
assert(output->buffer() != nullptr);
}
// set output shape, based on input and params
ir::Shape new_shape = shape_inference::inferTransposeShape(input_shape, perm);
- _dynamic_tensor_manager->applyShape(output_ind, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape);
assert(output->buffer() != nullptr);
}
auto output_ind = op.getOutputs().at(out_tensor_idx);
auto output = _tensor_registry->getITensor(output_ind);
- _dynamic_tensor_manager->applyShape(output_ind, new_shape);
+ dynamicTensorManagerOf(output)->applyShape(output_ind, new_shape);
assert(output->buffer() != nullptr);
}