c34e83678bd25159552cd03562e183fba6bec298
[platform/core/ml/nnfw.git] / tests / scripts / test_scheduler_with_profiling.sh
1 #!/bin/bash
2
3 MY_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
4 source $MY_PATH/common.sh
5
6 BACKEND_CNT=3
7 # Run profiler BACKEND_CNT+1 times: on each run of the first BACKEND_CNT runs it will
8 #     collect metrics for one unmeasured backend. On the last run metrics for data transfer
9 PROFILING_RUN_CNT=$((BACKEND_CNT+1))
10 TEST_DRIVER_DIR="$( cd "$( dirname "${BASH_SOURCE}" )" && pwd )"
11 ARTIFACT_PATH="$TEST_DRIVER_DIR/../.."
12 BENCHMARK_DRIVER_BIN=$ARTIFACT_PATH/Product/out/bin/tflite_run
13 REPORT_DIR=$ARTIFACT_PATH/report
14 RUN_TEST_SH=$ARTIFACT_PATH/tests/scripts/models/run_test.sh
15 BENCHMARK_MODEL_LIST="MODELS/inception_nonslim MODELS/inception_slim MODELS/mobilenet"
16
17 if [ ! -e "$RUN_TEST_SH" ]; then
18     echo "Cannot find $RUN_TEST_SH"
19     exit 1
20 fi
21
22
23 BENCHMARK_REPORT_DIR=$REPORT_DIR/benchmark
24 BENCHMARK_MODELS_FILE=$BENCHMARK_REPORT_DIR/benchmark_models.txt
25
26 function run_without_sched()
27 {
28     local RESULT_SCH_INT=$1
29     local REPORT_MODEL_DIR=$2
30     local MODEL=$3
31     local EXECUTOR=$4
32     local BACKEND=$5
33
34     LOG_FILE=$REPORT_MODEL_DIR/tflite_${EXECUTOR,,}_$BACKEND.txt
35     export OP_BACKEND_ALLOPS=$BACKEND
36     export EXECUTOR=$EXECUTOR
37
38     print_with_dots "$EXECUTOR $BACKEND without scheduler"
39
40     RESULT=$(get_result_of_benchmark_test $BENCHMARK_DRIVER_BIN $MODEL $LOG_FILE)
41
42     printf -v RESULT_INT '%d' $RESULT 2>/dev/null
43     PERCENTAGE=$((100-RESULT_SCH_INT*100/RESULT_INT))
44     echo "$RESULT ms. Parallel scheduler is $PERCENTAGE% faster"
45 }
46
47 function run_benchmark_test()
48 {
49     local LOG_FILE=
50     local RESULT=
51     local REPORT_MODEL_DIR=
52
53     export COUNT=5
54     echo "============================================"
55     local i=0
56     export USE_NNAPI=1
57     export BACKENDS="acl_cl;acl_neon;cpu"
58     # Remove metrics so that profiler can get metrics for operations
59     #      with input&output sizes the same as the model
60     rm "exec_time.json" 2>/dev/null
61     for MODEL in $BENCHMARK_MODEL_LIST; do
62
63         echo "Benchmark test with `basename $BENCHMARK_DRIVER_BIN` & `echo $MODEL`"
64         echo $MODEL >> $BENCHMARK_MODELS_FILE
65
66         REPORT_MODEL_DIR=$BENCHMARK_REPORT_DIR/scheduler_benchmark/$MODEL
67         mkdir -p $REPORT_MODEL_DIR
68
69 ##################################################################################
70         # Get metrics by running profiler
71 ##################################################################################
72         export USE_SCHEDULER=1
73         export PROFILING_MODE=1
74         export EXECUTOR="Dataflow"
75         export ONERT_LOG_ENABLE=1
76         for ((j = 1 ; j <= $PROFILING_RUN_CNT ; j++)); do
77             # Save the verbose log of each run
78             LOG_FILE=$REPORT_MODEL_DIR/tflite_profiling_$j.txt
79
80             print_with_dots "Profiling run #$j out of $PROFILING_RUN_CNT"
81
82             $RUN_TEST_SH --driverbin=$BENCHMARK_DRIVER_BIN $MODEL > $LOG_FILE 2>&1
83             RET=$?
84             if [[ $RET -ne 0 ]]; then
85                 echo "Profiling $MODEL aborted in run#$j... exit code: $RET"xX
86                 exit $RET
87             fi
88             echo "finished"
89             # Save the exec_time.json of each run
90             cp "exec_time.json" $REPORT_MODEL_DIR/"exec_time_$j.json"
91         done
92         unset ONERT_LOG_ENABLE
93
94
95 ##################################################################################
96         # Turn off profiling
97 ##################################################################################
98         export PROFILING_MODE=0
99
100 ##################################################################################
101         # Run ParallelExecutor with scheduler
102 ##################################################################################
103         LOG_FILE=$REPORT_MODEL_DIR/tflite_parallel_with_scheduler.txt
104         export EXECUTOR="Parallel"
105         export GRAPH_DOT_DUMP=1
106         print_with_dots "Parallel with scheduler"
107
108         RESULT=$(get_result_of_benchmark_test $BENCHMARK_DRIVER_BIN $MODEL $LOG_FILE)
109         echo "$RESULT ms"
110
111         printf -v RESULT_SCH_INT '%d' $RESULT 2>/dev/null
112
113         mv "after_lower_subg-0.dot" $REPORT_MODEL_DIR/"after_lower_subg-0_parallel.dot"
114
115 ##################################################################################
116         # Run Linear executor with scheduler
117 ##################################################################################
118         LOG_FILE=$REPORT_MODEL_DIR/tflite_linear_with_scheduler.txt
119         export EXECUTOR="Linear"
120         export GRAPH_DOT_DUMP=1
121         print_with_dots "Linear with scheduler"
122
123         RESULT=$(get_result_of_benchmark_test $BENCHMARK_DRIVER_BIN $MODEL $LOG_FILE)
124
125         printf -v RESULT_INT '%d' $RESULT 2>/dev/null
126         PERCENTAGE=$((100-RESULT_SCH_INT*100/RESULT_INT))
127         echo "$RESULT ms. Parallel scheduler is $PERCENTAGE% faster"
128
129         # Remove metrics so that for next model in profiler can get metrics
130         #   for operations with input&output sizes the same as the model
131         mv "exec_time.json" $REPORT_MODEL_DIR
132         # Save the dot graph
133         mv "after_lower_subg-0.dot" $REPORT_MODEL_DIR/"after_lower_subg-0_linear.dot"
134
135 ##################################################################################
136         # Turn off scheduler
137 ##################################################################################
138         export USE_SCHEDULER=0
139
140         # Run LinearExecutor on acl_cl without scheduler
141         run_without_sched $RESULT_SCH_INT $REPORT_MODEL_DIR $MODEL "Linear" "acl_cl"
142         mv "after_lower_subg-0.dot" $REPORT_MODEL_DIR/"after_lower_subg-0_linear_acl_cl.dot"
143
144         # Run LinearExecutor on acl_neon without scheduler
145         run_without_sched $RESULT_SCH_INT $REPORT_MODEL_DIR $MODEL "Linear" "acl_neon"
146         mv "after_lower_subg-0.dot" $REPORT_MODEL_DIR/"after_lower_subg-0_linear_acl_neon.dot"
147
148         # Run ParallelExecutor on acl_cl without scheduler
149         run_without_sched $RESULT_SCH_INT $REPORT_MODEL_DIR $MODEL "Parallel" "acl_cl"
150         mv "after_lower_subg-0.dot" $REPORT_MODEL_DIR/"after_lower_subg-0_parallel_acl_cl.dot"
151
152         # Run ParallelExecutor on acl_neon without scheduler
153         run_without_sched $RESULT_SCH_INT $REPORT_MODEL_DIR $MODEL "Parallel" "acl_neon"
154         mv "after_lower_subg-0.dot" $REPORT_MODEL_DIR/"after_lower_subg-0_parallel_acl_neon.dot"
155
156         unset GRAPH_DOT_DUMP
157
158         if command -v dot;
159         then
160             dot -Tpng $REPORT_MODEL_DIR/"after_lower_subg-0_parallel.dot" -o $REPORT_MODEL_DIR/"parallel.png"
161             dot -Tpng $REPORT_MODEL_DIR/"after_lower_subg-0_linear.dot" -o $REPORT_MODEL_DIR/"linear.png"
162             dot -Tpng $REPORT_MODEL_DIR/"after_lower_subg-0_linear_acl_cl.dot" -o $REPORT_MODEL_DIR/"linear_acl_cl.png"
163             dot -Tpng $REPORT_MODEL_DIR/"after_lower_subg-0_linear_acl_neon.dot" -o $REPORT_MODEL_DIR/"linear_acl_neon.png"
164             dot -Tpng $REPORT_MODEL_DIR/"after_lower_subg-0_parallel_acl_cl.dot" -o $REPORT_MODEL_DIR/"paralle_acl_cl.png"
165             dot -Tpng $REPORT_MODEL_DIR/"after_lower_subg-0_parallel_acl_neon.dot" -o $REPORT_MODEL_DIR/"parallel_acl_neon.png"
166         fi
167
168         if [[ $i -ne $(echo $BENCHMARK_MODEL_LIST | wc -w)-1 ]]; then
169             echo ""
170         fi
171         i=$((i+1))
172
173         unset USE_SCHEDULER
174         unset PROFILING_MODE
175         unset EXECUTOR
176         unset OP_BACKEND_ALLOPS
177     done
178     unset BACKENDS
179     echo "============================================"
180     unset COUNT
181     unset USE_NNAPI
182 }
183
184 echo ""
185 run_benchmark_test
186 echo ""