+++ /dev/null
-# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import os.path
-
-mypath = os.path.abspath(os.path.dirname(__file__))
-
-
-def switch_nnfw_pacl_kernel_env(mode):
- # mode : "acl" or "neon" or ""
-
- # TODO: Handle whether there is nnfw_kernel_env_list.txt or not
- # FIXME: Now nnfw_kernel_env_list.txt is parent dir of current dir
- filename = "nnfw_kernel_env_list.txt"
- envfilename = mypath + "/../{filename}".format(filename=filename)
-
- with open(envfilename) as envfile:
- for env in envfile:
- env = env[:-1] # env has new line at the end
- os.environ[env] = mode
-
-
-if __name__ == "__main__":
- # for test
- switch_nnfw_pacl_kernel_env("acl")
- switch_nnfw_pacl_kernel_env("neon")
- switch_nnfw_pacl_kernel_env("")
+++ /dev/null
-#!/usr/bin/env python
-
-# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import sys
-import argparse
-
-
-def get_parsed_options():
- parser = argparse.ArgumentParser(
- prog='run_frameworktest.py', usage='%(prog)s [options]')
-
- parser.add_argument(
- "--runtestsh",
- action="store",
- type=str,
- dest="fwtest_runtestsh",
- required=True,
- help="(Usually : tests/framework/run_test.sh) run test shell for framework test")
-
- parser.add_argument(
- "--driverbin",
- action="store",
- type=str,
- dest="fwtest_driverbin",
- required=True,
- help="(Usually in Product/out/bin/) driver bin for framework test")
-
- parser.add_argument(
- "--tapname",
- action="store",
- type=str,
- dest="fwtest_tapname",
- help="tap name for framework test")
-
- parser.add_argument(
- "--logname",
- action="store",
- type=str,
- dest="fwtest_logname",
- help="log name for framework test")
-
- parser.add_argument(
- "--testname",
- action="store",
- type=str,
- dest="fwtest_testname",
- help="test name of framework test")
-
- parser.add_argument(
- "--frameworktest_list_file",
- action="store",
- type=str,
- dest="frameworktest_list_file",
- help="list of files to run framework test")
-
- parser.add_argument(
- "--reportdir",
- action="store",
- type=str,
- dest="fwtest_reportdir",
- default="report",
- help="(default=report) directory that each test result will be stored")
-
- parser.add_argument(
- "--ldlibrarypath",
- action="store",
- type=str,
- dest="ldlibrarypath",
- help=
- "(usually : ARTIFACT_PATH/Product/out/lib) path that you want to include libraries"
- )
-
- options = parser.parse_args()
- return options
-
-
-# Check each parameters if they are valid or not
-def check_params(fwtest_runtestsh, fwtest_driverbin, fwtest_reportdir, fwtest_tapname,
- fwtest_logname, fwtest_testname, frameworktest_list_file,
- ldlibrary_path):
- if fwtest_runtestsh == "" or fwtest_runtestsh == None:
- print("Fail : runtestsh is not given")
- print("(Usually runtestsh for framework test is tests/framework/run_test.sh)")
- sys.exit(1)
-
- if os.path.isfile(fwtest_runtestsh) == False:
- print("Fail : runtestsh is not valid")
- sys.exit(1)
-
- if fwtest_driverbin == "" or fwtest_driverbin == None:
- print("Fail : driverbin is not given")
- print("(Usually driverbin for framework test is in Product/out/bin/)")
- sys.exit(1)
-
- if os.path.isfile(fwtest_driverbin) == False:
- print("Fail : driverbin is not valid")
- sys.exit(1)
-
- if fwtest_testname == "" or fwtest_testname == None:
- print("Fail : testname is not given")
- sys.exit(1)
-
- if fwtest_tapname == "" or fwtest_tapname == None:
- print("Fail : tapname is not given")
- sys.exit(1)
-
- if fwtest_logname == "" or fwtest_logname == None:
- print("Fail : logname is not given")
- sys.exit(1)
-
- if fwtest_reportdir == "" or fwtest_reportdir == None:
- print("Fail : report directory is not given")
- sys.exit(1)
-
- if type(ldlibrary_path) is str and ldlibrary_path != "":
- os.environ["LD_LIBRARY_PATH"] = ldlibrary_path
-
-
-# Just call this function when running framework test in test_driver.py
-def run_frameworktest(fwtest_runtestsh, fwtest_driverbin, fwtest_reportdir,
- fwtest_tapname, fwtest_logname, fwtest_testname,
- frameworktest_list_file, ldlibrary_path):
-
- # Handling exceptions for parameters
- check_params(fwtest_runtestsh, fwtest_driverbin, fwtest_reportdir, fwtest_tapname,
- fwtest_logname, fwtest_testname, frameworktest_list_file, ldlibrary_path)
-
- os.makedirs(fwtest_reportdir, exist_ok=True)
-
- print("")
- print("============================================")
- print("{fwtest_testname} with {fwtest_driverbin_name} ...".format(
- fwtest_testname=fwtest_testname,
- fwtest_driverbin_name=fwtest_driverbin[fwtest_driverbin.rfind('/') + 1:]))
-
- # Run framework test using models in model_list
- model_list = ""
- if frameworktest_list_file != None and frameworktest_list_file != "":
- fwtest_list_file = open(frameworktest_list_file, "r")
- for line in fwtest_list_file:
- model_list += (line[:-1] + " ")
- fwtest_list_file.close()
-
- # If model_list is empty, all possible models will be found automatically by fwtest_runtestsh
- cmd = "{fwtest_runtestsh} --driverbin={fwtest_driverbin} \
- --reportdir={fwtest_reportdir} \
- --tapname={fwtest_tapname} \
- {model_list} \
- > {fwtest_reportdir}/{fwtest_logname} 2>&1".format(
- fwtest_runtestsh=fwtest_runtestsh,
- fwtest_driverbin=fwtest_driverbin,
- fwtest_reportdir=fwtest_reportdir,
- fwtest_tapname=fwtest_tapname,
- model_list=model_list,
- fwtest_logname=fwtest_logname)
- fwtest_result = os.system(cmd)
-
- print("")
- tap_file_path = "{fwtest_reportdir}/{fwtest_tapname}".format(
- fwtest_reportdir=fwtest_reportdir, fwtest_tapname=fwtest_tapname)
- tap_file = open(tap_file_path, "r")
- tap_data = tap_file.read()
- print(tap_data)
- tap_file.close()
-
- if fwtest_result != 0:
- print("")
- print("{fwtest_testname} failed... exit code: {fwtest_result}".format(
- fwtest_testname=fwtest_testname, fwtest_result=fwtest_result))
- print("============================================")
- print("")
- sys.exit(1)
-
- print("============================================")
- print("")
- sys.exit(0)
-
-
-if __name__ == "__main__":
- options = get_parsed_options()
- sys.exit(
- run_frameworktest(options.fwtest_runtestsh, options.fwtest_driverbin,
- options.fwtest_reportdir, options.fwtest_tapname,
- options.fwtest_logname, options.fwtest_testname,
- options.frameworktest_list_file, options.ldlibrarypath))
+++ /dev/null
-#!/usr/bin/env python
-
-# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import sys
-import argparse
-import subprocess
-
-
-def get_parsed_options():
- parser = argparse.ArgumentParser(prog='run_unittest.py', usage='%(prog)s [options]')
-
- parser.add_argument(
- "--reportdir",
- action="store",
- type=str,
- dest="reportdir",
- default="report",
- help="(default=report) directory that each test result will be stored")
-
- parser.add_argument(
- "--unittestdir",
- action="store",
- type=str,
- dest="unittestdir",
- required=True,
- help="directory that unittests are included")
-
- parser.add_argument(
- "--ldlibrarypath",
- action="store",
- type=str,
- dest="ldlibrarypath",
- help=
- "(usually : ARTIFACT_PATH/Product/out/lib) path that you want to include libraries"
- )
-
- parser.add_argument(
- "--runall",
- action="store_true",
- dest="runall",
- default=False,
- help="run all unittest and ignore skiplist")
-
- options = parser.parse_args()
- return options
-
-
-def get_gtest_option(report_dir, test_bin, unittest_dir=None):
- # Set path to save test result
- output_option = "--gtest_output=xml:{report_dir}/{test_bin}.xml".format(
- report_dir=report_dir, test_bin=test_bin)
-
- # Set filter to run only one unit test, for runall unittest
- if '.' in test_bin:
- return output_option + " " + "--gtest_filter={test_list_item}".format(
- test_list_item=test_bin)
-
- # Set filter not to run *.skip unit tests
- filter_option = ""
- skiplist_path = "{unittest_dir}/{test_bin}.skip".format(
- unittest_dir=unittest_dir, test_bin=test_bin)
- if os.path.exists(skiplist_path):
- filter_option = "--gtest_filter=-"
- skiplist_file = open(skiplist_path, "r")
- filter_option = filter_option + ':'.join(line[:-1] for line in skiplist_file
- if line[0] != '#')
- skiplist_file.close()
-
- return output_option + " " + filter_option
-
-
-def get_test_list_items(unittest_dir, test_bin):
- cmd_output = subprocess.check_output(
- "{unittestdir}/{testbin} --gtest_list_tests".format(
- unittestdir=unittest_dir, testbin=test_bin),
- shell=True)
- all_test_list = str(cmd_output).replace('\\n', ' ').split()
- all_test_list[0] = all_test_list[0][2:]
-
- category = ""
- item = ""
- test_list_items = []
- for verbose_line in all_test_list:
- if verbose_line[-1] == '.':
- category = verbose_line
- else:
- item = "{category}{verbose_line}".format(
- category=category, verbose_line=verbose_line)
- test_list_items.append(item)
-
- return test_list_items
-
-
-# Just call this function when running unit test in test_driver.py
-def run_unittest(unittest_dir, report_dir, ldlibrary_path, runall):
- if unittest_dir == "" or unittest_dir == None:
- print("Fail : unittestdir is not given")
- print("(Usually unit test directory is Product/out/unittest)")
- sys.exit(1)
-
- if report_dir == "" or report_dir == None:
- print("Info : 'report' folder of current path will be used as report directory")
- report_dir = "report"
-
- if type(ldlibrary_path) is str and ldlibrary_path != "":
- os.environ["LD_LIBRARY_PATH"] = ldlibrary_path
-
- print("")
- print("============================================")
- print("Unittest start")
- print("============================================")
-
- # Run all unit tests in unittest_dir
- unittest_result = 0
- all_test_bin = (t for t in os.listdir(unittest_dir)
- if len(t) < 5 or t[-5:] != ".skip")
-
- for idx, test_bin in enumerate(all_test_bin):
- num_unittest = idx + 1
- print("============================================")
- print("Starting set {num_unittest}: {test_bin}...".format(
- num_unittest=num_unittest, test_bin=test_bin))
- print("============================================")
-
- ret = 0
-
- # Run all unit tests ignoring skip list
- if runall:
- test_list_items = get_test_list_items(unittest_dir, test_bin)
- for test_item in test_list_items:
- cmd = "{unittest_dir}/{test_bin} {gtest_option}".format(
- unittest_dir=unittest_dir,
- test_bin=test_bin,
- gtest_option=get_gtest_option(report_dir, test_item))
- os.system(cmd)
- # Run all unit tests except skip list
- else:
- cmd = "{unittest_dir}/{test_bin} {gtest_option}".format(
- unittest_dir=unittest_dir,
- test_bin=test_bin,
- gtest_option=get_gtest_option(report_dir, test_bin, unittest_dir))
- ret = os.system(cmd)
-
- if ret != 0:
- unittest_result = ret
- print("{test_bin} failed... return code: {unittest_result}".format(
- test_bin=test_bin, unittest_result=unittest_result))
-
- print("============================================")
- print("Finishing set {num_unittest}: {test_bin}...".format(
- num_unittest=num_unittest, test_bin=test_bin))
- print("============================================")
-
- if unittest_result != 0:
- print("============================================")
- print("Failed unit test... exit code: {unittest_result}".format(
- unittest_result=unittest_result))
- print("============================================")
- sys.exit(1)
-
- print("============================================")
- print("Completed total {num_unittest} set of unittest".format(
- num_unittest=num_unittest))
- print("Unittest end")
- print("============================================")
- sys.exit(0)
-
-
-if __name__ == "__main__":
- options = get_parsed_options()
- sys.exit(
- run_unittest(options.unittestdir, options.reportdir, options.ldlibrarypath,
- options.runall))
+++ /dev/null
-#!/usr/bin/env python
-
-# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# TODO: remove since no needed and old-dated
-import os
-import shutil
-import argparse
-import common
-import subprocess
-import sys
-
-mypath = os.path.abspath(os.path.dirname(__file__))
-
-
-def get_parsed_options():
- parser = argparse.ArgumentParser(prog='test_driver.py', usage='%(prog)s [options]')
-
- # artifactpath
- parser.add_argument(
- "--artifactpath",
- action="store",
- type=str,
- dest="artifactpath",
- default=".",
- help="(should be passed) path that has tests/ and Product/")
-
- # test
- parser.add_argument(
- "--unittest",
- action="store_true",
- dest="unittest_on",
- default=False,
- help="(default=on) run unit test")
- parser.add_argument(
- "--unittestall",
- action="store_true",
- dest="unittestall_on",
- default=False,
- help="((default=off) run all unit test without skip, overrite --unittest option")
- parser.add_argument(
- "--verification",
- action="store_true",
- dest="verification_on",
- default=False,
- help="(default=on) run verification")
- parser.add_argument(
- "--frameworktest",
- action="store_true",
- dest="frameworktest_on",
- default=False,
- help="(default=off)run framework test")
-
- # benchmark
- parser.add_argument(
- "--benchmark",
- action="store_true",
- dest="benchmark_on",
- default=False,
- help="(default=off) run benchmark")
- # TODO Remove deprecated --benchmark_acl option
- parser.add_argument(
- "--benchmark_acl",
- action="store_true",
- dest="benchmarkacl_on",
- default=False,
- help="(default=off) run benchmark-acl")
- parser.add_argument(
- "--benchmark_op",
- action="store_true",
- dest="benchmarkop_on",
- default=False,
- help="(default=off) run benchmark per operation")
-
- # profile
- parser.add_argument(
- "--profile",
- action="store_true",
- dest="profile_on",
- default=False,
- help="(default=off) run profiling")
-
- # driverbin
- parser.add_argument(
- "--framework_driverbin",
- action="store",
- type=str,
- dest="framework_driverbin",
- help=
- "(default=../../Product/out/bin/tflite_run) runner for runnning framework tests")
- parser.add_argument(
- "--verification_driverbin",
- action="store",
- type=str,
- dest="verification_driverbin",
- help=
- "(default=../../Product/out/bin/nnapi_test) runner for runnning verification tests"
- )
- parser.add_argument(
- "--benchmark_driverbin",
- action="store",
- type=str,
- dest="benchmark_driverbin",
- help=
- "(default=../../Product/out/bin/tflite_benchmark) runner for runnning benchmark")
-
- # etc.
- parser.add_argument(
- "--runtestsh",
- action="store",
- type=str,
- dest="runtestsh",
- help=
- "(default=ARTIFACT_PATH/tests/framework/run_test.sh) run_test.sh with path where it is for framework test and verification"
- )
- parser.add_argument(
- "--unittestdir",
- action="store",
- type=str,
- dest="unittestdir",
- help=
- "(default=ARTIFACT_PATH/Product/out/unittest) directory that has unittest binaries for unit test"
- )
- parser.add_argument(
- "--ldlibrarypath",
- action="store",
- type=str,
- dest="ldlibrarypath",
- help=
- "(default=ARTIFACT_PATH/Product/out/lib) path that you want to include libraries")
- parser.add_argument(
- "--frameworktest_list_file",
- action="store",
- type=str,
- dest="frameworktest_list_file",
- help=
- "(default=ARTIFACT_PATH/tests/scripts/pureacl_frameworktest_list.txt) filepath of model list for test"
- )
- parser.add_argument(
- "--reportdir",
- action="store",
- type=str,
- dest="reportdir",
- help="(default=ARTIFACT_PATH/report) directory to save report")
-
- # env
- parser.add_argument(
- "--usennapi",
- action="store_true",
- dest="usennapi_on",
- default=True,
- help="(default=on) declare USE_NNAPI=1")
- parser.add_argument(
- "--nousennapi",
- action="store_false",
- dest="usennapi_on",
- help="(default=off) declare nothing about USE_NNAPI")
- parser.add_argument(
- "--acl_envon",
- action="store_true",
- dest="aclenv_on",
- default=False,
- help="(default=off) declare envs for ACL")
-
- options = parser.parse_args()
- return options
-
-
-def run_unittest(options):
- cmd = "{artifactpath}/tests/scripts/run_unittest.sh \
- --reportdir={reportdir} \
- --unittestdir={unittestdir}".format(
- artifactpath=options.artifactpath,
- reportdir=options.reportdir,
- unittestdir=options.unittestdir)
- if options.unittestall_on:
- cmd += " --runall"
- os.system(cmd)
-
-
-def run_frameworktest(options):
- if type(options.framework_driverbin) is not str:
- options.framework_driverbin = options.artifactpath + "/Product/out/bin/tflite_run"
- if (os.path.exists(options.framework_driverbin) == False):
- print("Cannot find {driverbin}".format(driverbin=options.framework_driverbin))
- sys.exit(1)
-
- cmd = "{artifactpath}/tests/scripts/run_frameworktest.sh \
- --runtestsh={runtestsh} \
- --driverbin={driverbin} \
- --reportdir={reportdir} \
- --tapname=framework_test.tap \
- --logname=framework_test.log \
- --testname='Frameworktest'".format(
- runtestsh=options.runtestsh,
- driverbin=options.framework_driverbin,
- reportdir=options.reportdir,
- artifactpath=options.artifactpath)
- os.system(cmd)
-
-
-def run_verification(options):
- if type(options.verification_driverbin) is not str:
- options.verification_driverbin = options.artifactpath + "/Product/out/bin/nnapi_test"
- if (os.path.exists(options.verification_driverbin) == False):
- print("Cannot find {driverbin}".format(
- driverbin=options.verification_driverbin))
- sys.exit(1)
-
- cmd = "{artifactpath}/tests/scripts/run_frameworktest.sh \
- --runtestsh={runtestsh} \
- --driverbin={driverbin} \
- --reportdir={reportdir} \
- --tapname=verification_test.tap \
- --logname=verification_test.log \
- --testname='Verification'".format(
- runtestsh=options.runtestsh,
- driverbin=options.verification_driverbin,
- reportdir=options.reportdir,
- artifactpath=options.artifactpath)
- os.system(cmd)
-
-
-def run_benchmark(options):
- if type(options.benchmark_driverbin) is not str:
- options.benchmark_driverbin = options.artifactpath + "/Product/out/bin/tflite_benchmark"
- if (os.path.exists(options.benchmark_driverbin) == False):
- print("Cannot find {driverbin}".format(driverbin=options.benchmark_driverbin))
- sys.exit(1)
-
- cmd = "{artifactpath}/tests/scripts/run_benchmark.sh \
- --runtestsh={runtestsh} \
- --driverbin={driverbin} \
- --reportdir={reportdir}/benchmark".format(
- runtestsh=options.runtestsh,
- driverbin=options.benchmark_driverbin,
- reportdir=options.reportdir,
- artifactpath=options.artifactpath)
- os.system(cmd)
-
-
-def run_benchmarkop(options):
- if type(options.benchmark_driverbin) is not str:
- options.benchmark_driverbin = options.artifactpath + "/Product/out/bin/tflite_benchmark"
- if (os.path.exists(options.benchmark_driverbin) == False):
- print("Cannot find {driverbin}".format(driverbin=options.benchmark_driverbin))
- sys.exit(1)
-
- cmd = "{artifactpath}/tests/scripts/run_benchmark_op.sh \
- --runtestsh={runtestsh} \
- --driverbin={driverbin} \
- --reportdir={reportdir}/benchmark_op \
- --modelfilepath={artifactpath}/tests/framework \
- --frameworktest_list_file={frameworktest_list_file}".format(
- runtestsh=options.runtestsh,
- driverbin=options.benchmark_driverbin,
- artifactpath=options.artifactpath,
- reportdir=options.reportdir,
- frameworktest_list_file=options.frameworktest_list_file)
- os.system(cmd)
-
-
-def make_json_for_benchmark_result(options):
- cmd = "source {artifactpath}/tests/scripts/print_to_json.sh && ".format(
- artifactpath=options.artifactpath)
- if options.benchmarkop_on:
- cmd += "print_to_json {artifactpath}/report/benchmark_op \
- {reportdir} \"benchmark_op_result.json\"".format(
- reportdir=options.reportdir, artifactpath=options.artifactpath)
- else:
- cmd += "print_to_json {artifactpath}/report/benchmark \
- {reportdir} \"benchmark_result.json\"".format(
- reportdir=options.reportdir, artifactpath=options.artifactpath)
- sp = subprocess.Popen(["/bin/bash", "-i", "-c", cmd])
- sp.communicate()
-
-
-def run_profile(options):
- # FIXME: These driver and tflite test are set temporarily. Fix these to support flexibility
- driver_bin = options.artifactpath + "/Product/out/bin/tflite_run"
- tflite_test = options.artifactpath + "/tests/framework/cache/inceptionv3/inception_module/inception_test.tflite"
-
- # TODO: Enable operf to set directory where sample data puts on
- shutil.rmtree("oprofile_data", ignore_errors=True)
-
- print("")
- print("============================================")
- cmd = "operf -g {driver_bin} {tflite_test}".format(
- driver_bin=driver_bin, tflite_test=tflite_test)
- os.system(cmd)
- print("============================================")
- print("")
-
-
-def main():
- options = get_parsed_options()
-
- alltest_on = True
- if True in [
- options.unittest_on, options.frameworktest_on, options.verification_on,
- options.benchmark_on, options.benchmarkacl_on, options.benchmarkop_on,
- options.profile_on
- ]:
- alltest_on = False
-
- # artifactpath
- if os.path.isdir(options.artifactpath) and os.path.isdir(
- options.artifactpath + "/tests") and os.path.isdir(options.artifactpath +
- "/Product"):
- options.artifactpath = os.path.abspath(options.artifactpath)
- else:
- print("Pass on with proper arifactpath")
- sys.exit(1)
-
- # run_test.sh
- if type(options.runtestsh) is not str or options.runtestsh == "":
- options.runtestsh = options.artifactpath + "/tests/framework/run_test.sh"
-
- if (os.path.exists(options.runtestsh) == False):
- print("Cannot find {runtestsh}".format(runtestsh=options.runtestsh))
- sys.exit(1)
-
- # unittest dir
- if type(options.unittestdir) is not str or options.unittestdir == "":
- options.unittestdir = options.artifactpath + "/Product/out/unittest"
-
- # LD_LIBRARY_PATH
- if type(options.ldlibrarypath) is not str or options.ldlibrarypath == "":
- options.ldlibrarypath = options.artifactpath + "/Product/out/lib"
-
- # report dir
- if type(options.reportdir) is not str or options.reportdir == "":
- options.reportdir = options.artifactpath + "/report"
-
- # set LD_LIBRARY_PATH
- os.environ["LD_LIBRARY_PATH"] = options.ldlibrarypath
-
- # set USE_NNAPI
- if options.usennapi_on == True:
- os.environ["USE_NNAPI"] = "1"
-
- # set acl
- if options.aclenv_on:
- common.switch_nnfw_pacl_kernel_env("acl")
-
- # unittest
- if alltest_on or options.unittest_on:
- run_unittest(options)
-
- # frameworktest
- if options.frameworktest_on:
- run_frameworktest(options)
-
- # verification
- if alltest_on or options.verification_on:
- run_verification(options)
-
- # benchmark
- if options.benchmark_on:
- run_benchmark(options)
-
- # benchmark_op
- if options.benchmarkop_on:
- run_benchmarkop(options)
-
- # make json file for benchmark result on ci
- if options.benchmark_on or options.benchmarkacl_on or options.benchmarkop_on:
- make_json_for_benchmark_result(options)
-
- # run profile
- if options.profile_on:
- run_profile(options)
-
-
-if __name__ == "__main__":
- main()