2 * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "luci/CircleOptimizer.h"
19 #include "luci/Pass/ConvertNCHWToNHWCPass.h"
20 #include "luci/Pass/ExpandBroadcastConstPass.h"
21 #include "luci/Pass/FoldAddV2Pass.h"
22 #include "luci/Pass/FoldCastPass.h"
23 #include "luci/Pass/FoldDepthwiseConv2DPass.h"
24 #include "luci/Pass/FoldDequantizePass.h"
25 #include "luci/Pass/FoldSparseToDensePass.h"
26 #include "luci/Pass/ForwardReshapeToUnaryOpPass.h"
27 #include "luci/Pass/ForceQuantParamPass.h"
28 #include "luci/Pass/FuseActivationFunctionPass.h"
29 #include "luci/Pass/FuseAddWithFullyConnectedPass.h"
30 #include "luci/Pass/FuseAddWithTConvPass.h"
31 #include "luci/Pass/FuseBatchNormWithConvPass.h"
32 #include "luci/Pass/FuseBatchNormWithDwConvPass.h"
33 #include "luci/Pass/FuseBatchNormWithTConvPass.h"
34 #include "luci/Pass/FuseBCQPass.h"
35 #include "luci/Pass/FuseInstanceNormPass.h"
36 #include "luci/Pass/FuseMeanWithMeanPass.h"
37 #include "luci/Pass/FusePreActivationBatchNormPass.h"
38 #include "luci/Pass/FuseTransposeWithMeanPass.h"
39 #include "luci/Pass/MakeBatchNormGammaPositivePass.h"
40 #include "luci/Pass/PropagateQuantParamPass.h"
41 #include "luci/Pass/RemoveFakeQuantPass.h"
42 #include "luci/Pass/RemoveQuantDequantSeqPass.h"
43 #include "luci/Pass/RemoveRedundantReshapePass.h"
44 #include "luci/Pass/RemoveRedundantTransposePass.h"
45 #include "luci/Pass/RemoveUnnecessaryReshapePass.h"
46 #include "luci/Pass/RemoveUnnecessarySlicePass.h"
47 #include "luci/Pass/RemoveUnnecessaryStridedSlicePass.h"
48 #include "luci/Pass/RemoveUnnecessarySplitPass.h"
49 #include "luci/Pass/ReplaceMulAddWithDepthwiseConvPass.h"
50 #include "luci/Pass/ReplaceSubWithAddPass.h"
51 #include "luci/Pass/ResolveCustomOpAddPass.h"
52 #include "luci/Pass/ResolveCustomOpBatchMatMulPass.h"
53 #include "luci/Pass/ResolveCustomOpMatMulPass.h"
54 #include "luci/Pass/ResolveCustomOpMaxPoolWithArgmaxPass.h"
55 #include "luci/Pass/RequantizePass.h"
56 #include "luci/Pass/QuantizeWithMinMaxPass.h"
57 #include "luci/Pass/QuantizeDequantizeWeightsPass.h"
58 #include "luci/Pass/SparsifyTensorPass.h"
59 #include "luci/Pass/ShuffleWeightTo16x1Float32Pass.h"
60 #include "luci/Pass/SubstitutePackToReshapePass.h"
61 #include "luci/Pass/SubstitutePadV2ToPadPass.h"
62 #include "luci/Pass/SubstituteSplitVToSplitPass.h"
63 #include "luci/Pass/SubstituteSqueezeToReshapePass.h"
64 #include "luci/Pass/SubstituteStridedSliceToReshapePass.h"
65 #include "luci/Pass/SubstituteTransposeToReshapePass.h"
66 #include "luci/Pass/TransformMinMaxToRelu6Pass.h"
67 #include "luci/Pass/TransformMinReluToRelu6Pass.h"
68 // TODO add more passes
70 #include "luci/Pass/CircleShapeInferencePass.h"
71 #include "luci/Pass/CircleTypeInferencePass.h"
74 #include <logo/RemoveDeadNodeWithQueryPass.h>
76 #include "ModulePhase.h"
77 #include "ProgressReporter.h"
78 #include "helpers/Strings.h"
80 #include "QuantizedModelVerifier.h"
82 #include <luci/IR/CircleNodes.h>
83 #include <logo/Phase.h>
84 #include <pepper/csv2vec.h>
94 template <typename T> T lexical_cast(const std::string &str)
96 std::istringstream ss;
103 template <typename T> std::vector<T> lexical_cast(std::vector<std::string> &sv)
105 std::vector<T> result;
106 std::transform(sv.begin(), sv.end(), std::back_inserter(result),
107 [](std::string str) -> T { return lexical_cast<T>(str); });
111 class OptimizeOptionsImpl final : public luci::CircleOptimizer::Options
114 void enable(Algorithm) final;
115 void param(AlgorithmParameters, const std::string &) final;
116 const std::string param(AlgorithmParameters) const final;
117 void params(AlgorithmParameters, std::vector<std::string> &) final;
118 std::vector<std::string> params(AlgorithmParameters) const final;
119 bool query(Algorithm) final;
122 std::vector<Algorithm> _algorithms;
123 std::map<AlgorithmParameters, const std::string> _algorithm_params;
124 std::map<AlgorithmParameters, std::vector<std::string>> _multiple_params;
127 void OptimizeOptionsImpl::enable(Algorithm algo) { _algorithms.push_back(algo); }
129 void OptimizeOptionsImpl::param(AlgorithmParameters param, const std::string &str)
131 _algorithm_params.insert(std::pair<AlgorithmParameters, const std::string>(param, str));
134 const std::string OptimizeOptionsImpl::param(AlgorithmParameters param) const
136 auto param_str = _algorithm_params.find(param);
137 if (param_str != _algorithm_params.end())
139 return param_str->second;
143 return std::string();
147 void OptimizeOptionsImpl::params(AlgorithmParameters param, std::vector<std::string> &vec)
149 _multiple_params[param] = vec;
152 std::vector<std::string> OptimizeOptionsImpl::params(AlgorithmParameters param) const
154 auto param_vec = _multiple_params.find(param);
155 if (param_vec != _multiple_params.end())
157 return param_vec->second;
161 return std::vector<std::string>();
165 bool OptimizeOptionsImpl::query(Algorithm algo)
167 std::vector<Algorithm>::iterator it = std::find(_algorithms.begin(), _algorithms.end(), algo);
168 if (it == _algorithms.end())
174 void convert_nchw_to_nhwc(loco::Graph *g, bool preserve_input, bool preserve_output)
178 phase.emplace_back(std::make_unique<logo::RemoveDeadNodeWithQueryPass>());
179 phase.emplace_back(std::make_unique<luci::CircleShapeInferencePass>());
180 phase.emplace_back(std::make_unique<luci::CircleTypeInferencePass>());
183 std::make_unique<luci::ConvertNCHWToNHWCPass>(preserve_input, preserve_output));
185 ProgressReporter prog(g, logo::PhaseStrategy::Restart);
186 logo::PhaseRunner<logo::PhaseStrategy::Restart> phase_runner{g};
187 phase_runner.attach(&prog);
188 phase_runner.run(phase);
196 CircleOptimizer::Options *CircleOptimizer::options(void)
198 if (_options == nullptr)
200 _options = std::make_unique<OptimizeOptionsImpl>();
203 return _options.get();
206 void CircleOptimizer::optimize(luci::Module *m) const
210 // Following passes are needed everytime when other passes create new node or modify some nodes.
211 phase.emplace_back(std::make_unique<luci::CircleShapeInferencePass>());
212 phase.emplace_back(std::make_unique<luci::CircleTypeInferencePass>());
214 if (_options->query(Options::Algorithm::FuseBCQ))
216 phase.emplace_back(std::make_unique<FuseBCQPass>());
219 ModuleProgressReporter prog(m, logo::PhaseStrategy::Restart);
220 PhaseRunner<logo::PhaseStrategy::Restart> phase_runner{m};
221 phase_runner.attach(&prog);
222 phase_runner.run(phase);
225 void CircleOptimizer::optimize(loco::Graph *g) const
229 // Conversion from NCHW to NHWC is done first to avoid interference with other optimizations.
230 if (_options->query(Options::Algorithm::ConvertNCHWToNHWC))
232 bool preserve_input =
233 _options->param(Options::AlgorithmParameters::NCHW_to_NHWC_input_shape) != "true";
234 bool preserve_output =
235 _options->param(Options::AlgorithmParameters::NCHW_to_NHWC_output_shape) != "true";
237 convert_nchw_to_nhwc(g, preserve_input, preserve_output);
240 /* TRANSFORM DECLARATION BEGIN */
241 phase.emplace_back(std::make_unique<logo::RemoveDeadNodeWithQueryPass>());
243 // Following passes are needed everytime when other passes create new node or modify some nodes.
244 phase.emplace_back(std::make_unique<luci::CircleShapeInferencePass>());
245 phase.emplace_back(std::make_unique<luci::CircleTypeInferencePass>());
247 if (_options->query(Options::Algorithm::ResolveCustomOpAdd))
249 phase.emplace_back(std::make_unique<luci::ResolveCustomOpAddPass>());
251 if (_options->query(Options::Algorithm::ResolveCustomOpBatchMatMul))
253 phase.emplace_back(std::make_unique<luci::ResolveCustomOpBatchMatMulPass>());
255 if (_options->query(Options::Algorithm::ResolveCustomOpMatMul))
257 phase.emplace_back(std::make_unique<luci::ResolveCustomOpMatMulPass>());
259 if (_options->query(Options::Algorithm::FuseMeanWithMean))
261 phase.emplace_back(std::make_unique<FuseMeanWithMeanPass>());
263 if (_options->query(Options::Algorithm::ResolveCustomOpMaxPoolWithArgmax))
265 phase.emplace_back(std::make_unique<luci::ResolveCustomOpMaxPoolWithArgmaxPass>());
267 if (_options->query(Options::Algorithm::FuseInstanceNorm))
269 phase.emplace_back(std::make_unique<FuseInstanceNormPass>());
271 if (_options->query(Options::Algorithm::FuseBatchNormWithConv))
273 phase.emplace_back(std::make_unique<FuseBatchNormWithConvPass>());
275 if (_options->query(Options::Algorithm::FuseBatchNormWithDwConv))
277 phase.emplace_back(std::make_unique<FuseBatchNormWithDwConvPass>());
279 if (_options->query(Options::Algorithm::FuseBatchNormWithTConv))
281 phase.emplace_back(std::make_unique<FuseBatchNormWithTConvPass>());
283 if (_options->query(Options::Algorithm::FuseAddWithFullyConnected))
285 phase.emplace_back(std::make_unique<FuseAddWithFullyConnectedPass>());
287 if (_options->query(Options::Algorithm::FuseAddWithTConv))
289 phase.emplace_back(std::make_unique<FuseAddWithTConvPass>());
291 if (_options->query(Options::Algorithm::FuseActivationFunction))
293 phase.emplace_back(std::make_unique<FuseActivationFunctionPass>());
295 if (_options->query(Options::Algorithm::FuseTransposeWithMean))
297 phase.emplace_back(std::make_unique<FuseTransposeWithMeanPass>());
299 if (_options->query(Options::Algorithm::FoldAddV2))
301 phase.emplace_back(std::make_unique<luci::FoldAddV2Pass>());
303 if (_options->query(Options::Algorithm::FoldCast))
305 phase.emplace_back(std::make_unique<luci::FoldCastPass>());
307 if (_options->query(Options::Algorithm::FoldDepthwiseConv2D))
309 phase.emplace_back(std::make_unique<luci::FoldDepthwiseConv2DPass>());
311 if (_options->query(Options::Algorithm::FoldDequantize))
313 phase.emplace_back(std::make_unique<luci::FoldDequantizePass>());
315 if (_options->query(Options::Algorithm::FoldSparseToDense))
317 phase.emplace_back(std::make_unique<luci::FoldSparseToDensePass>());
319 if (_options->query(Options::Algorithm::ForwardReshapeToUnaryOp))
321 phase.emplace_back(std::make_unique<luci::ForwardReshapeToUnaryOpPass>());
323 if (_options->query(Options::Algorithm::FusePreActivationBatchNorm))
325 phase.emplace_back(std::make_unique<luci::FusePreActivationBatchNormPass>());
327 if (_options->query(Options::Algorithm::MakeBatchNormGammaPositive))
329 phase.emplace_back(std::make_unique<luci::MakeBatchNormGammaPositivePass>());
331 if (_options->query(Options::Algorithm::ShuffleWeightTo16x1Float32))
333 phase.emplace_back(std::make_unique<luci::ShuffleWeightTo16x1Float32Pass>());
335 if (_options->query(Options::Algorithm::ExpandBroadcastConst))
337 phase.emplace_back(std::make_unique<luci::ExpandBroadcastConstPass>());
339 if (_options->query(Options::Algorithm::RemoveFakeQuant))
341 phase.emplace_back(std::make_unique<luci::RemoveFakeQuantPass>());
343 if (_options->query(Options::Algorithm::RemoveQuantDequantSeq))
345 phase.emplace_back(std::make_unique<luci::RemoveQuantDequantSeqPass>());
347 if (_options->query(Options::Algorithm::RemoveUnnecessaryReshape))
349 phase.emplace_back(std::make_unique<luci::RemoveUnnecessaryReshapePass>());
351 if (_options->query(Options::Algorithm::RemoveUnnecessarySlice))
353 phase.emplace_back(std::make_unique<luci::RemoveUnnecessarySlicePass>());
355 if (_options->query(Options::Algorithm::RemoveUnnecessaryStridedSlice))
357 phase.emplace_back(std::make_unique<luci::RemoveUnnecessaryStridedSlicePass>());
359 if (_options->query(Options::Algorithm::RemoveUnnecessarySplit))
361 phase.emplace_back(std::make_unique<luci::RemoveUnnecessarySplitPass>());
363 if (_options->query(Options::Algorithm::RemoveRedundantReshape))
365 phase.emplace_back(std::make_unique<luci::RemoveRedundantReshapePass>());
367 if (_options->query(Options::Algorithm::RemoveRedundantTranspose))
369 phase.emplace_back(std::make_unique<luci::RemoveRedundantTransposePass>());
371 if (_options->query(Options::Algorithm::ReplaceMulAddWithDepthwiseConv))
373 phase.emplace_back(std::make_unique<luci::ReplaceMulAddWithDepthwiseConvPass>());
375 if (_options->query(Options::Algorithm::ReplaceSubWithAdd))
377 phase.emplace_back(std::make_unique<luci::ReplaceSubWithAddPass>());
379 if (_options->query(Options::Algorithm::SubstitutePackToReshape))
381 phase.emplace_back(std::make_unique<luci::SubstitutePackToReshapePass>());
383 if (_options->query(Options::Algorithm::SubstitutePadV2ToPad))
385 phase.emplace_back(std::make_unique<luci::SubstitutePadV2ToPadPass>());
387 if (_options->query(Options::Algorithm::SubstituteSplitVToSplit))
389 phase.emplace_back(std::make_unique<luci::SubstituteSplitVToSplitPass>());
391 if (_options->query(Options::Algorithm::SubstituteSqueezeToReshape))
393 phase.emplace_back(std::make_unique<luci::SubstituteSqueezeToReshapePass>());
395 if (_options->query(Options::Algorithm::SubstituteStridedSliceToReshape))
397 phase.emplace_back(std::make_unique<luci::SubstituteStridedSliceToReshapePass>());
399 if (_options->query(Options::Algorithm::SubstituteTransposeToReshape))
401 phase.emplace_back(std::make_unique<luci::SubstituteTransposeToReshapePass>());
403 if (_options->query(Options::Algorithm::TransformMinMaxToRelu6Pass))
405 phase.emplace_back(std::make_unique<luci::TransformMinMaxToRelu6Pass>());
407 if (_options->query(Options::Algorithm::TransformMinReluToRelu6Pass))
409 phase.emplace_back(std::make_unique<luci::TransformMinReluToRelu6Pass>());
412 /* TRANSFORM DECLARATION END */
414 ProgressReporter prog(g, logo::PhaseStrategy::Restart);
415 logo::PhaseRunner<logo::PhaseStrategy::Restart> phase_runner{g};
416 phase_runner.attach(&prog);
417 phase_runner.run(phase);
420 void CircleOptimizer::quantize(loco::Graph *g) const
422 // Fake quantization of weights
423 if (_options->query(Options::Algorithm::QuantizeDequantizeWeights))
425 static const std::vector<std::string> fakeq_supported_input_model_dtype{"float32"};
426 static const std::vector<std::string> fakeq_supported_output_model_dtype{"uint8", "int16"};
427 static const std::vector<std::string> fakeq_supported_granularity{"layer", "channel"};
429 auto input_model_dtype =
430 _options->param(Options::AlgorithmParameters::Quantize_input_model_dtype);
431 auto output_model_dtype =
432 _options->param(Options::AlgorithmParameters::Quantize_output_model_dtype);
433 auto granularity = _options->param(Options::AlgorithmParameters::Quantize_granularity);
435 if (!in_array(to_lower_case(input_model_dtype), fakeq_supported_input_model_dtype))
436 throw std::runtime_error("Unsupported input type. List of supported input type: " +
437 to_string(fakeq_supported_input_model_dtype));
439 if (!in_array(to_lower_case(output_model_dtype), fakeq_supported_output_model_dtype))
440 throw std::runtime_error("Unsupported output type. List of supported output type: " +
441 to_string(fakeq_supported_output_model_dtype));
443 if (!in_array(to_lower_case(granularity), fakeq_supported_granularity))
444 throw std::runtime_error("Unsupported granularity. List of supported granularity: " +
445 to_string(fakeq_supported_granularity));
447 if (str_to_granularity(granularity) == QuantizationGranularity::LayerWise &&
448 str_to_dtype(output_model_dtype) != loco::DataType::U8)
449 throw std::runtime_error("Layer-wise quantization only supports uint8 dtype.");
451 // Clear existing quantparams before doing fake quantization
452 for (auto node : loco::active_nodes(loco::output_nodes(g)))
454 auto circle_node = loco::must_cast<luci::CircleNode *>(node);
455 if (circle_node->quantparam() != nullptr)
456 circle_node->quantparam(nullptr);
459 luci::QuantizeDequantizeWeightsPass fake_quantizer(str_to_dtype(input_model_dtype),
460 str_to_dtype(output_model_dtype),
461 str_to_granularity(granularity));
462 fake_quantizer.run(g);
465 // Actual quantization of weights, bias, and activation
466 if (_options->query(Options::Algorithm::QuantizeWithMinMax))
468 static const std::vector<std::string> qwmm_supported_input_model_dtype{"float32"};
469 static const std::vector<std::string> qwmm_supported_output_model_dtype{"uint8", "int16"};
470 static const std::vector<std::string> qwmm_supported_granularity{"layer", "channel"};
472 auto input_model_dtype =
473 _options->param(Options::AlgorithmParameters::Quantize_input_model_dtype);
474 auto output_model_dtype =
475 _options->param(Options::AlgorithmParameters::Quantize_output_model_dtype);
476 auto granularity = _options->param(Options::AlgorithmParameters::Quantize_granularity);
478 if (!in_array(to_lower_case(input_model_dtype), qwmm_supported_input_model_dtype))
479 throw std::runtime_error("Unsupported input type. List of supported input types: " +
480 to_string(qwmm_supported_input_model_dtype));
482 if (!in_array(to_lower_case(output_model_dtype), qwmm_supported_output_model_dtype))
483 throw std::runtime_error("Unsupported output type. List of supported output types: " +
484 to_string(qwmm_supported_output_model_dtype));
486 if (!in_array(to_lower_case(granularity), qwmm_supported_granularity))
487 throw std::runtime_error("Unsupported granularity. List of supported granularity: " +
488 to_string(qwmm_supported_granularity));
490 if (str_to_granularity(granularity) == QuantizationGranularity::LayerWise &&
491 str_to_dtype(output_model_dtype) != loco::DataType::U8)
492 throw std::runtime_error("Layer-wise quantization only supports uint8 dtype.");
494 luci::QuantizeWithMinMaxPass quantizer(str_to_dtype(input_model_dtype),
495 str_to_dtype(output_model_dtype),
496 str_to_granularity(granularity));
499 // Post-quantization optimizations
502 phase.emplace_back(std::make_unique<luci::PropagateQuantParamPass>());
504 phase.emplace_back(std::make_unique<luci::CircleShapeInferencePass>());
505 phase.emplace_back(std::make_unique<luci::CircleTypeInferencePass>());
506 phase.emplace_back(std::make_unique<logo::RemoveDeadNodeWithQueryPass>());
508 ProgressReporter prog(g, logo::PhaseStrategy::Saturate);
509 logo::PhaseRunner<logo::PhaseStrategy::Saturate> phase_runner{g};
510 phase_runner.attach(&prog);
511 phase_runner.run(phase);
513 // Verify the type/granularity of the quantized model
514 luci::QuantizedModelVerifier verifier(str_to_dtype(output_model_dtype),
515 str_to_granularity(granularity));
520 if (_options->query(Options::Algorithm::Requantize))
522 static const std::vector<std::string> rq_supported_input_model_dtype{"int8"};
523 static const std::vector<std::string> rq_supported_output_model_dtype{"uint8"};
525 auto input_model_dtype =
526 _options->param(Options::AlgorithmParameters::Quantize_input_model_dtype);
527 auto output_model_dtype =
528 _options->param(Options::AlgorithmParameters::Quantize_output_model_dtype);
530 if (!in_array(to_lower_case(input_model_dtype), rq_supported_input_model_dtype))
531 throw std::runtime_error("Unsupported input type. List of supported input types: " +
532 to_string(rq_supported_input_model_dtype));
534 if (!in_array(to_lower_case(output_model_dtype), rq_supported_output_model_dtype))
535 throw std::runtime_error("Unsupported output type. List of supported output types: " +
536 to_string(rq_supported_output_model_dtype));
538 luci::RequantizePass requantizer(str_to_dtype(input_model_dtype),
539 str_to_dtype(output_model_dtype));
543 // Force to write quantparam to specified tensors
544 // NOTE Only per-tensor (not per-channel) qparam can be written
545 if (_options->query(Options::Algorithm::ForceQuantParam))
547 ForceQuantParamPass::TensorVector tensors =
548 _options->params(Options::AlgorithmParameters::Quantize_tensor_names);
549 auto str_scales = _options->params(Options::AlgorithmParameters::Quantize_scales);
550 auto str_zero_points = _options->params(Options::AlgorithmParameters::Quantize_zero_points);
552 // Cast scales/zero_points to proper types
553 ForceQuantParamPass::ScaleVector scales = lexical_cast<float>(str_scales);
554 ForceQuantParamPass::ZPVector zero_points = lexical_cast<int64_t>(str_zero_points);
556 ForceQuantParamPass fq(tensors, scales, zero_points);
562 // Do Shape/Type inference
563 phase.emplace_back(std::make_unique<luci::CircleShapeInferencePass>());
564 phase.emplace_back(std::make_unique<luci::CircleTypeInferencePass>());
566 ProgressReporter prog(g, logo::PhaseStrategy::Saturate);
567 logo::PhaseRunner<logo::PhaseStrategy::Saturate> phase_runner{g};
568 phase_runner.attach(&prog);
569 phase_runner.run(phase);
572 void CircleOptimizer::sparsify(loco::Graph *g) const
574 if (_options->query(Options::Algorithm::SparsifyTensorPass))
576 std::string tensor_name = _options->param(Options::AlgorithmParameters::Sparsify_tensor_name);
577 std::string str_tarversal_order =
578 _options->param(Options::AlgorithmParameters::Sparsify_traversal_order);
579 std::string str_format = _options->param(Options::AlgorithmParameters::Sparsify_format);
580 std::string str_block_size = _options->param(Options::AlgorithmParameters::Sparsify_block_size);
581 std::string str_block_map = _options->param(Options::AlgorithmParameters::Sparsify_block_map);
584 std::vector<int32_t> traversal_order = pepper::csv_to_vector<int32_t>(str_tarversal_order);
586 std::vector<DimensionType> format;
587 std::istringstream is(str_format);
588 for (char c; is >> c;)
592 format.push_back(DimensionType::DENSE);
594 format.push_back(DimensionType::SPARSE_CSR);
595 if (is.peek() == ',')
599 std::vector<int32_t> block_size = pepper::csv_to_vector<int32_t>(str_block_size);
601 std::vector<int32_t> block_map = pepper::csv_to_vector<int32_t>(str_block_map);
603 luci::SparsifyTensorPass sparsifier{tensor_name, traversal_order, format, block_size,