# limitations under the License.
import os
-import os.path
import shutil
-import optparse
+import argparse
import common
import subprocess
+import sys
mypath = os.path.abspath(os.path.dirname(__file__))
def get_parsed_options():
- parser = optparse.OptionParser()
+ parser = argparse.ArgumentParser(prog='test_driver.py', usage='%(prog)s [options]')
# artifactpath
- parser.add_option(
+ parser.add_argument(
"--artifactpath",
action="store",
- type="string",
+ type=str,
dest="artifactpath",
default=".",
help="(should be passed) path that has tests/ and Product/")
# test
- parser.add_option(
+ parser.add_argument(
"--unittest",
action="store_true",
dest="unittest_on",
default=False,
help="(default=on) run unit test")
- parser.add_option(
+ parser.add_argument(
"--verification",
action="store_true",
dest="verification_on",
default=False,
help="(default=on) run verification")
- parser.add_option(
+ parser.add_argument(
"--frameworktest",
action="store_true",
dest="frameworktest_on",
help="(default=off)run framework test")
# benchmark
- parser.add_option(
+ parser.add_argument(
"--benchmark",
action="store_true",
dest="benchmark_on",
default=False,
help="(default=off) run benchmark")
- parser.add_option(
+ parser.add_argument(
"--benchmark_acl",
action="store_true",
dest="benchmarkacl_on",
default=False,
help="(default=off) run benchmark-acl")
- parser.add_option(
+ parser.add_argument(
"--benchmark_op",
action="store_true",
dest="benchmarkop_on",
help="(default=off) run benchmark per operation")
# profile
- parser.add_option(
+ parser.add_argument(
"--profile",
action="store_true",
dest="profile_on",
help="(default=off) run profiling")
# driverbin
- parser.add_option(
+ parser.add_argument(
"--framework_driverbin",
action="store",
- type="string",
+ type=str,
dest="framework_driverbin",
help=
"(default=../../Product/out/bin/tflite_run) runner for runnning framework tests")
- parser.add_option(
+ parser.add_argument(
"--verification_driverbin",
action="store",
- type="string",
+ type=str,
dest="verification_driverbin",
help=
"(default=../../Product/out/bin/nnapi_test) runner for runnning verification tests"
)
- parser.add_option(
+ parser.add_argument(
"--benchmark_driverbin",
action="store",
- type="string",
+ type=str,
dest="benchmark_driverbin",
help=
"(default=../../Product/out/bin/tflite_benchmark) runner for runnning benchmark")
# etc.
- parser.add_option(
+ parser.add_argument(
"--runtestsh",
action="store",
- type="string",
+ type=str,
dest="runtestsh",
help=
"(default=ARTIFACT_PATH/tests/framework/run_test.sh) run_test.sh with path where it is for framework test and verification"
)
- parser.add_option(
+ parser.add_argument(
"--unittestdir",
action="store",
- type="string",
+ type=str,
dest="unittestdir",
help=
"(default=ARTIFACT_PATH/Product/out/unittest) directory that has unittest binaries for unit test"
)
- parser.add_option(
+ parser.add_argument(
"--ldlibrarypath",
action="store",
- type="string",
+ type=str,
dest="ldlibrarypath",
help=
"(default=ARTIFACT_PATH/Product/out/lib) path that you want to include libraries")
- parser.add_option(
+ parser.add_argument(
"--frameworktest_list_file",
action="store",
- type="string",
+ type=str,
dest="frameworktest_list_file",
help=
"(default=ARTIFACT_PATH/tools/test_driver/pureacl_frameworktest_list.txt) filepath of model list for test"
)
+ parser.add_argument(
+ "--reportdir",
+ action="store",
+ type=str,
+ dest="reportdir",
+ help="(default=ARTIFACT_PATH/report) directory to save report")
# env
- parser.add_option(
+ parser.add_argument(
"--usennapi",
action="store_true",
dest="usennapi_on",
default=True,
help="(default=on) declare USE_NNAPI=1")
- parser.add_option(
+ parser.add_argument(
"--nousennapi",
action="store_false",
dest="usennapi_on",
help="(default=off) declare nothing about USE_NNAPI")
- parser.add_option(
+ parser.add_argument(
"--acl_envon",
action="store_true",
dest="aclenv_on",
default=False,
help="(default=off) declare envs for ACL")
- (options, args) = parser.parse_args()
+ options = parser.parse_args()
return options
def run_unittest(options):
cmd = "{artifactpath}/tools/test_driver/run_unittest.sh \
- --reportdir={artifactpath}/report \
+ --reportdir={reportdir} \
--unittestdir={unittestdir}".format(
- artifactpath=options.artifactpath, unittestdir=options.unittestdir)
- ret = os.system(cmd)
- # TODO: handle return value
+ artifactpath=options.artifactpath,
+ reportdir=options.reportdir,
+ unittestdir=options.unittestdir)
+ os.system(cmd)
def run_frameworktest(options):
options.framework_driverbin = options.artifactpath + "/Product/out/bin/tflite_run"
if (os.path.exists(options.framework_driverbin) == False):
print("Cannot find {driverbin}".format(driverbin=options.framework_driverbin))
- os._exit(1)
+ sys.exit(1)
cmd = "{artifactpath}/tools/test_driver/run_frameworktest.sh \
--runtestsh={runtestsh} \
--driverbin={driverbin} \
- --reportdir={artifactpath}/report \
+ --reportdir={reportdir} \
--tapname=framework_test.tap \
--logname=framework_test.log \
--testname='Frameworktest'".format(
runtestsh=options.runtestsh,
driverbin=options.framework_driverbin,
+ reportdir=options.reportdir,
artifactpath=options.artifactpath)
- ret = os.system(cmd)
- # TODO: handle return value
+ os.system(cmd)
def run_verification(options):
if (os.path.exists(options.verification_driverbin) == False):
print("Cannot find {driverbin}".format(
driverbin=options.verification_driverbin))
- os._exit(1)
+ sys.exit(1)
cmd = "{artifactpath}/tools/test_driver/run_frameworktest.sh \
--runtestsh={runtestsh} \
--driverbin={driverbin} \
- --reportdir={artifactpath}/report \
+ --reportdir={reportdir} \
--tapname=verification_test.tap \
--logname=verification_test.log \
--testname='Verification'".format(
runtestsh=options.runtestsh,
driverbin=options.verification_driverbin,
+ reportdir=options.reportdir,
artifactpath=options.artifactpath)
- ret = os.system(cmd)
- # TODO: handle return value
+ os.system(cmd)
def run_benchmark(options):
options.benchmark_driverbin = options.artifactpath + "/Product/out/bin/tflite_benchmark"
if (os.path.exists(options.benchmark_driverbin) == False):
print("Cannot find {driverbin}".format(driverbin=options.benchmark_driverbin))
- os._exit(1)
+ sys.exit(1)
cmd = "{artifactpath}/tools/test_driver/run_benchmark.sh \
--runtestsh={runtestsh} \
--driverbin={driverbin} \
- --reportdir={artifactpath}/report/benchmark".format(
+ --reportdir={reportdir}/benchmark".format(
runtestsh=options.runtestsh,
driverbin=options.benchmark_driverbin,
+ reportdir=options.reportdir,
artifactpath=options.artifactpath)
- ret = os.system(cmd)
- # TODO: handle return value
+ os.system(cmd)
def run_benchmarkop(options):
options.benchmark_driverbin = options.artifactpath + "/Product/out/bin/tflite_benchmark"
if (os.path.exists(options.benchmark_driverbin) == False):
print("Cannot find {driverbin}".format(driverbin=options.benchmark_driverbin))
- os._exit(1)
+ sys.exit(1)
cmd = "{artifactpath}/tools/test_driver/run_benchmark_op.sh \
--runtestsh={runtestsh} \
--driverbin={driverbin} \
- --reportdir={artifactpath}/report/benchmark_op \
+ --reportdir={reportdir}/benchmark_op \
--modelfilepath={artifactpath}/tests/framework \
--frameworktest_list_file={frameworktest_list_file}".format(
runtestsh=options.runtestsh,
driverbin=options.benchmark_driverbin,
artifactpath=options.artifactpath,
+ reportdir=options.reportdir,
frameworktest_list_file=options.frameworktest_list_file)
- ret = os.system(cmd)
- # TODO: handle return value
+ os.system(cmd)
def run_benchmarkacl(options):
cmd = "{artifactpath}/tools/test_driver/run_benchmark_acl.sh \
- --reportdir={artifactpath}/report/benchmark \
+ --reportdir={reportdir}/benchmark \
--bindir={artifactpath}/Product/out/bin".format(
- artifactpath=options.artifactpath)
- ret = os.system(cmd)
- # TODO: handle return value
+ reportdir=options.reportdir, artifactpath=options.artifactpath)
+ os.system(cmd)
def make_json_for_benchmark_result(options):
artifactpath=options.artifactpath)
if options.benchmarkop_on:
cmd += "print_to_json {artifactpath}/report/benchmark_op \
- {artifactpath}/report \"benchmark_op_result.json\"".format(
- artifactpath=options.artifactpath)
+ {reportdir} \"benchmark_op_result.json\"".format(
+ reportdir=options.reportdir, artifactpath=options.artifactpath)
else:
cmd += "print_to_json {artifactpath}/report/benchmark \
- {artifactpath}/report \"benchmark_result.json\"".format(
- artifactpath=options.artifactpath)
+ {reportdir} \"benchmark_result.json\"".format(
+ reportdir=options.reportdir, artifactpath=options.artifactpath)
sp = subprocess.Popen(["/bin/bash", "-i", "-c", cmd])
sp.communicate()
alltest_on = False
# artifactpath
- options.artifactpath = os.path.abspath(options.artifactpath)
+ if os.path.isdir(options.artifactpath) and os.path.isdir(
+ options.artifactpath + "/tests") and os.path.isdir(options.artifactpath +
+ "/Product"):
+ options.artifactpath = os.path.abspath(options.artifactpath)
+ else:
+ print("Pass on with proper arifactpath")
+ sys.exit(1)
# run_test.sh
- if type(options.runtestsh) is not str:
+ if type(options.runtestsh) is not str or options.runtestsh == "":
options.runtestsh = options.artifactpath + "/tests/framework/run_test.sh"
if (os.path.exists(options.runtestsh) == False):
print("Cannot find {runtestsh}".format(runtestsh=options.runtestsh))
- os._exit(1)
+ sys.exit(1)
# unittest dir
- if type(options.unittestdir) is not str:
+ if type(options.unittestdir) is not str or options.unittestdir == "":
options.unittestdir = options.artifactpath + "/Product/out/unittest"
# LD_LIBRARY_PATH
- if type(options.ldlibrarypath) is not str:
+ if type(options.ldlibrarypath) is not str or options.ldlibrarypath == "":
options.ldlibrarypath = options.artifactpath + "/Product/out/lib"
+ # report dir
+ if type(options.reportdir) is not str or options.reportdir == "":
+ options.reportdir = options.artifactpath + "/report"
+
# set LD_LIBRARY_PATH
- os.environ["LD_LIBRARY_PATH"] = "{path}".format(path=options.ldlibrarypath)
+ os.environ["LD_LIBRARY_PATH"] = options.ldlibrarypath
# set USE_NNAPI
if options.usennapi_on == True: