3 # Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
17 MY_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
19 source $MY_PATH/common.sh
23 BENCHMARK_MODELS_FILE=
26 BENCHMARK_MODEL_LIST="MODELS/inception_nonslim MODELS/inception_slim MODELS/mobilenet"
27 BACKEND_LIST="acl_cl acl_neon cpu" #TODO: accept this list as argument
28 EXECUTORS="Linear Parallel" #TODO: accept this list as argument
32 echo "Usage: ./$0 --reportdir=. --driverbin=Product/out/bin/tflite_run"
46 BENCHMARK_DRIVER_BIN=${i#*=}
49 BENCHMARK_REPORT_DIR=${i#*=}
50 BENCHMARK_MODELS_FILE=$BENCHMARK_REPORT_DIR/benchmark_models.txt
53 TEST_LIST_PATH=${i#*=}
54 MODEL_TEST_ROOT_PATH=$TEST_LIST_PATH/tests
60 function get_benchmark_op_list()
65 if [[ $# -eq 0 ]]; then
71 pushd $MODEL_TEST_ROOT_PATH > /dev/null
72 for DIR in $TEST_DIRS; do
73 if [ -d "$DIR" ]; then
74 TESTS_FOUND=$(find "$DIR" -type f -name 'config.sh' -exec dirname {} \;| sed 's|^./||' | grep -v '^MODELS/' | sort)
75 TESTS_TO_RUN="$TESTS_TO_RUN $TESTS_FOUND"
80 BENCHMARK_MODEL_LIST=$(echo "${TESTS_TO_RUN}")
83 function profile_for_he_shed()
86 local REPORT_MODEL_DIR=$1
88 local BENCHMARK_DRIVER_BIN=$3
90 local PROFILING_RUN_CNT=$5
92 export USE_SCHEDULER=1
93 export PROFILING_MODE=1
94 export EXECUTOR="Dataflow"
95 export ONERT_LOG_ENABLE=1
97 rm "exec_time.json" 2>/dev/null
98 for ((j = 1 ; j <= $PROFILING_RUN_CNT ; j++)); do
99 # Save the verbose log of each run
100 LOG_FILE=$REPORT_MODEL_DIR/tflite_profiling_$j.txt
102 print_with_dots "Profiling run #$j out of $PROFILING_RUN_CNT"
104 $RUN_TEST_SH --driverbin=$BENCHMARK_DRIVER_BIN $MODEL > $LOG_FILE 2>&1
106 if [[ $RET -ne 0 ]]; then
107 echo "Profiling $MODEL aborted in run#$j... exit code: $RET"
111 # Save the exec_time.json of each run
112 cp "exec_time.json" $REPORT_MODEL_DIR/"exec_time_$j.json"
114 unset USE_SCHEDULER PROFILING_MODE EXECUTOR ONERT_LOG_ENABLE
117 function run_with_he_scheduler()
119 local REPORT_MODEL_DIR=$1
121 local BENCHMARK_DRIVER_BIN=$3
125 LOG_FILE=$REPORT_MODEL_DIR/tflite_onert_with_he_scheduler_in_$EXECUTOR.txt
126 export EXECUTOR=$EXECUTOR
127 export GRAPH_DOT_DUMP=1
128 export USE_SCHEDULER=1
129 export ONERT_LOG_ENABLE=1
131 print_with_dots "TFLite onert $EXECUTOR with HEScheduler"
133 RESULT=$(get_result_of_benchmark_test $RUN_TEST_SH $BENCHMARK_DRIVER_BIN $MODEL $LOG_FILE)
136 mv "after_lower.dot" $REPORT_MODEL_DIR/"after_lower_$EXECUTOR.dot"
137 unset EXECUTOR GRAPH_DOT_DUMP USE_SCHEDULER ONERT_LOG_ENABLE
140 function run_onert_with_all_config()
143 local REPORT_MODEL_DIR=$2
144 local PAUSE_TIME_IN_SEC=$3
145 local BENCHMARK_DRIVER_BIN=$4
147 local BACKEND_LIST=$6
151 # Run profiler BACKEND_CNT+1 times: on each run of the first BACKEND_CNT runs it will
152 # collect metrics for one unmeasured backend. On the last run metrics for data transfer
155 for backend in $BACKEND_LIST; do
156 BACKENDS_TO_USE+=$backend';'
157 ((++PROFILING_RUN_CNT))
159 export BACKENDS=$BACKENDS_TO_USE
160 if [ "$TEST_OP" == "false" ]; then
161 profile_for_he_shed $REPORT_MODEL_DIR $BENCHMARK_DRIVER_BIN $MODEL $PROFILING_RUN_CNT
164 for executor in $EXECUTORS; do
165 export EXECUTOR=$executor
166 if [ "$TEST_OP" == "false" ]; then
167 run_with_he_scheduler $REPORT_MODEL_DIR $BENCHMARK_DRIVER_BIN $MODEL $executor
169 for backend in $BACKEND_LIST; do
170 export OP_BACKEND_ALLOPS=$backend
171 run_benchmark_and_print "tflite_onert_"$executor"_executor_$backend" "TFLite onert $executor Executor $backend"\
172 $MODEL $REPORT_MODEL_DIR 0 $BENCHMARK_DRIVER_BIN
175 unset USE_NNAPI EXECUTOR OP_BACKEND_ALLOPS BACKENDS
178 function run_benchmark_test()
183 local REPORT_MODEL_DIR=
186 export ONERT_LOG_ENABLE=1
188 echo "============================================"
190 date +'%Y-%m-%d %H:%M:%S %s'
193 for MODEL in $BENCHMARK_MODEL_LIST; do
196 if [ "$TEST_OP" == "true" ]; then
197 source $MODEL_TEST_ROOT_PATH/$MODEL/config.sh
200 # Skip 'disabled' tests
201 if [ $(tr '[:upper:]' '[:lower:]' <<< "$STATUS") == "disabled" ]; then
205 echo "Benchmark test with `basename $BENCHMARK_DRIVER_BIN` & `echo $MODEL`"
206 echo $MODEL >> $BENCHMARK_MODELS_FILE
208 REPORT_MODEL_DIR=$BENCHMARK_REPORT_DIR/$MODEL
209 mkdir -p $REPORT_MODEL_DIR
213 run_benchmark_and_print "tflite_cpu" "TFLite CPU" $MODEL $REPORT_MODEL_DIR 0 $BENCHMARK_DRIVER_BIN
216 if [ "$TEST_OP" == "true" ]; then
217 # Operation test don't need to test each scheduler
218 run_onert_with_all_config $MODEL $REPORT_MODEL_DIR 0 $BENCHMARK_DRIVER_BIN "Linear" "$BACKEND_LIST"
220 run_onert_with_all_config $MODEL $REPORT_MODEL_DIR 0 $BENCHMARK_DRIVER_BIN "$EXECUTORS" "$BACKEND_LIST"
223 if [[ $i -ne $(echo $BENCHMARK_MODEL_LIST | wc -w)-1 ]]; then
228 echo "============================================"
232 if [ ! -e "$BENCHMARK_REPORT_DIR" ]; then
233 mkdir -p $BENCHMARK_REPORT_DIR
236 if [ "$TEST_OP" == "true" ]; then
237 get_benchmark_op_list
240 rm -rf $BENCHMARK_MODELS_FILE
243 # print the result AND append to log file
244 run_benchmark_test 2>&1 | tee -a onert_benchmarks.txt