3 # Copyright (c) 2022 Samsung Electronics Co., Ltd. All Rights Reserved
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
19 __slots__ = () # This prevents access via __dict__.
21 # Basic optimization passes
22 # These passes do not change the execution result of the model
30 'fold_fully_connected',
32 'fold_sparse_to_dense',
35 'fuse_add_with_tconv',
36 'fuse_add_with_fully_connected',
37 'fuse_batchnorm_with_conv',
38 'fuse_batchnorm_with_dwconv',
39 'fuse_batchnorm_with_tconv',
40 'fuse_activation_function',
43 'fuse_mean_with_mean',
44 'fuse_transpose_with_mean',
45 'transform_min_max_to_relu6',
46 'transform_min_relu_to_relu6',
48 # Remove redundant operators
49 'remove_redundant_reshape',
50 'remove_redundant_transpose',
51 'remove_unnecessary_reshape',
52 'remove_unnecessary_slice',
53 'remove_unnecessary_strided_slice',
54 'remove_unnecessary_split',
57 # (passes to help further optimization)
58 'resolve_customop_add',
59 'resolve_customop_batchmatmul',
60 'resolve_customop_matmul',
61 'resolve_customop_max_pool_with_argmax',
62 'resolve_customop_splitv',
63 'substitute_pack_to_reshape',
64 'substitute_padv2_to_pad',
65 'substitute_splitv_to_split',
66 'substitute_squeeze_to_reshape',
67 'substitute_strided_slice_to_reshape',
68 'substitute_transpose_to_reshape',
69 'forward_reshape_to_unaryop',
70 'forward_transpose_op',
71 'replace_non_const_fc_with_batch_matmul', # For quantization
75 # (OPTION_NAME, HELP_MESSAGE)
76 ('convert_nchw_to_nhwc',
77 'Experimental: This will convert NCHW operators to NHWC under the assumption that input model is NCHW.'
79 ('expand_broadcast_const', 'expand broadcastable constant node inputs'),
80 ('nchw_to_nhwc_input_shape',
81 'convert the input shape of the model (argument for convert_nchw_to_nhwc)'),
82 ('nchw_to_nhwc_output_shape',
83 'convert the output shape of the model (argument for convert_nchw_to_nhwc)'),
84 ('fold_add_v2', 'fold AddV2 op with constant inputs'),
85 ('fold_cast', 'fold Cast op with constant input'),
86 ('fold_densify', 'fold Densify op with sparse constant input'),
87 ('fold_dequantize', 'fold Dequantize op'),
88 ('fold_dwconv', 'fold Depthwise Convolution op with constant inputs'),
89 ('fold_fully_connected', 'fold FullyConnected op with constant inputs'),
90 ('fold_gather', 'fold Gather op'),
91 ('fold_sparse_to_dense', 'fold SparseToDense op'),
92 ('forward_reshape_to_unaryop', 'Forward Reshape op'),
93 ('forward_transpose_op', 'Forward Transpose op'),
94 ('fuse_add_with_tconv', 'fuse Add op to Transposed'),
95 ('fuse_add_with_fully_connected', 'fuse Add op to FullyConnected op'),
96 ('fuse_batchnorm_with_conv', 'fuse BatchNorm op to Convolution op'),
97 ('fuse_batchnorm_with_dwconv', 'fuse BatchNorm op to Depthwise Convolution op'),
98 ('fuse_batchnorm_with_tconv', 'fuse BatchNorm op to Transposed Convolution op'),
99 ('fuse_bcq', 'apply Binary Coded Quantization'),
100 ('fuse_preactivation_batchnorm',
101 'fuse BatchNorm operators of pre-activations to Convolution op'),
102 ('fuse_mean_with_mean', 'fuse two consecutive Mean ops'),
103 ('fuse_transpose_with_mean',
104 'fuse Mean with a preceding Transpose under certain conditions'),
105 ('make_batchnorm_gamma_positive',
106 'make negative gamma of BatchNorm to a small positive value (1e-10).'
107 ' Note that this pass can change the execution result of the model.'
108 ' So, use it only when the impact is known to be acceptable.'),
109 ('fuse_activation_function', 'fuse Activation function to a preceding operator'),
110 ('fuse_instnorm', 'fuse ops to InstanceNorm operator'),
111 ('fuse_prelu', 'fuse ops to PReLU operator'),
112 ('replace_cw_mul_add_with_depthwise_conv',
113 'replace channel-wise Mul/Add with DepthwiseConv2D'),
114 ('remove_fakequant', 'remove FakeQuant ops'),
115 ('remove_quantdequant', 'remove Quantize-Dequantize sequence'),
116 ('remove_redundant_quantize', 'remove redundant Quantize ops'),
117 ('remove_redundant_reshape', 'fuse or remove subsequent Reshape ops'),
118 ('remove_redundant_transpose', 'fuse or remove subsequent Transpose ops'),
119 ('remove_unnecessary_reshape', 'remove unnecessary reshape ops'),
120 ('remove_unnecessary_slice', 'remove unnecessary slice ops'),
121 ('remove_unnecessary_strided_slice', 'remove unnecessary strided slice ops'),
122 ('remove_unnecessary_split', 'remove unnecessary split ops'),
123 ('replace_non_const_fc_with_batch_matmul',
124 'replace FullyConnected op with non-const weights to BatchMatMul op'),
125 ('replace_sub_with_add', 'replace Sub op with Add op'),
126 ('resolve_customop_add', 'convert Custom(Add) op to Add op'),
127 ('resolve_customop_batchmatmul',
128 'convert Custom(BatchMatmul) op to BatchMatmul op'),
129 ('resolve_customop_matmul', 'convert Custom(Matmul) op to Matmul op'),
130 ('resolve_customop_max_pool_with_argmax',
131 'convert Custom(MaxPoolWithArgmax) to net of builtin operators'),
132 ('resolve_customop_splitv', 'convert Custom(SplitV) op to SplitV op'),
133 ('shuffle_weight_to_16x1float32',
134 'convert weight format of FullyConnected op to SHUFFLED16x1FLOAT32.'
135 ' Note that it only converts weights whose row is a multiple of 16'),
136 ('substitute_pack_to_reshape', 'convert single input Pack op to Reshape op'),
137 ('substitute_padv2_to_pad', 'convert certain condition PadV2 to Pad'),
138 ('substitute_splitv_to_split', 'convert certain condition SplitV to Split'),
139 ('substitute_squeeze_to_reshape', 'convert certain condition Squeeze to Reshape'),
140 ('substitute_strided_slice_to_reshape',
141 'convert certain condition StridedSlice to Reshape'),
142 ('substitute_transpose_to_reshape',
143 'convert certain condition Transpose to Reshape'),
144 ('transform_min_max_to_relu6', 'transform Minimum-Maximum pattern to Relu6 op'),
145 ('transform_min_relu_to_relu6', 'transform Minimum(6)-Relu pattern to Relu6 op'),
146 ('unroll_unidirseqlstm', 'unroll UnidirectionalSequenceLSTM op'))
149 CONSTANT = CONSTANT()