2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef __ONERT_EXEC_DYNAMIC_SHAPE_INFERER_H__
18 #define __ONERT_EXEC_DYNAMIC_SHAPE_INFERER_H__
20 #include "ir/Operands.h"
21 #include "ir/OperationVisitor.h"
23 #include "backend/ITensorRegistry.h"
33 * @brief Class to infer shape of output tensor at execution time and
34 * allocate memory fo output tensor if needed
36 class DynamicShapeInferer : public ir::OperationVisitor
39 DynamicShapeInferer(const ir::Operands &operands,
40 const std::shared_ptr<backend::ITensorRegistry> &tensor_registry)
41 : _operands(operands), _tensor_registry(tensor_registry)
43 UNUSED_RELEASE(_operands);
44 UNUSED_RELEASE(_tensor_registry);
48 // TODO Define visitors for operations. List them in alphabetic order.
49 // Remove TODO when any op starting from the alphabet is added
50 void visit(const ir::operation::ArgMinMax &op) override;
51 void visit(const ir::operation::BatchMatMul &op) override;
52 void visit(const ir::operation::BCQFullyConnected &op) override;
53 void visit(const ir::operation::BCQGather &op) override;
54 void visit(const ir::operation::BinaryArithmetic &op) override;
55 void visit(const ir::operation::BroadcastTo &op) override;
56 void visit(const ir::operation::Comparison &op) override;
57 void visit(const ir::operation::Concat &op) override;
58 void visit(const ir::operation::Conv2D &op) override;
59 void visit(const ir::operation::ElementwiseActivation &op) override;
60 void visit(const ir::operation::ElementwiseBinary &op) override;
61 void visit(const ir::operation::ElementwiseUnary &op) override;
62 void visit(const ir::operation::ExpandDims &op) override;
63 void visit(const ir::operation::Fill &op) override;
64 void visit(const ir::operation::FullyConnected &op) override;
65 void visit(const ir::operation::FusedBatchNorm &op) override;
66 void visit(const ir::operation::Gather &op) override;
67 void visit(const ir::operation::L2Normalization &op) override;
68 void visit(const ir::operation::LSTM &op) override;
69 void visit(const ir::operation::MatrixBandPart &op) override;
70 void visit(const ir::operation::DetectionPostProcess &op) override;
71 void visit(const ir::operation::OneHot &op) override;
72 void visit(const ir::operation::Pack &op) override;
73 void visit(const ir::operation::Pad &op) override;
74 void visit(const ir::operation::Permute &op) override;
75 void visit(const ir::operation::Pow &op) override;
76 // TODO write op starting from Q
77 void visit(const ir::operation::Range &op) override;
78 void visit(const ir::operation::Reduce &op) override;
79 void visit(const ir::operation::Reshape &op) override;
80 void visit(const ir::operation::ResizeBilinear &op) override;
81 void visit(const ir::operation::Reverse &op) override;
82 void visit(const ir::operation::Select &op) override;
83 void visit(const ir::operation::Shape &op) override;
84 void visit(const ir::operation::Slice &op) override;
85 void visit(const ir::operation::Softmax &op) override;
86 void visit(const ir::operation::SpaceToBatchND &op) override;
87 void visit(const ir::operation::Split &op) override;
88 void visit(const ir::operation::Squeeze &op) override;
89 void visit(const ir::operation::StridedSlice &op) override;
90 void visit(const ir::operation::SquaredDifference &op) override;
91 void visit(const ir::operation::Tile &op) override;
92 void visit(const ir::operation::Transpose &op) override;
93 void visit(const ir::operation::Unpack &op) override;
94 // TODO write op starting from V
98 * @brief Performs shape inference and memory allocation for arithmetic operation
100 void handleBinaryArithmeticOp(const ir::Operation &op, const ir::OperandIndex lhs_idx,
101 const ir::OperandIndex rhs_idx);
103 * @brief Performs shape inference and memory allocation for unary op whose output shape is
104 * always same with input shape
106 void handleSimpleUnaryOp(const ir::Operation &op, const ir::OperandIndex input_idx);
108 // in case of output tensor of an op, it is possible that
109 // the output became dynamic although it had been static before.
110 // Once a tensor becomes dynamic, it will lost memory allocated for static.
111 // Therefore once output is dynamic, it should be treated as dynamic tensor. (memory should be
112 // allocated at runtime) `previously` means `dynamic` or `static` has been set in previous loop in
113 // WHILE of previous call of `nnfw_run()`
114 bool previously_static(backend::ITensor *op_output) { return !op_output->is_dynamic(); }
116 // helper function that check if op's input is static
117 // Note that input of n'th op has been set to static or dynamic by (n-1)th op.
118 // That's why it is called `currently_static`
119 bool currently_static(backend::ITensor *op_input) { return !op_input->is_dynamic(); }
123 * @brief To get operand-level info, e.g., ir::Operand::isConstant()
125 const ir::Operands &_operands;
127 * @brief To get tensor object and access tensor-level info, e.g., ITensor::buffer()
129 std::shared_ptr<backend::ITensorRegistry> _tensor_registry;
135 #endif // __ONERT_EXEC_DYNAMIC_SHAPE_INFERER_H__