From b30c3de07bba44ef1e345a89711c8f95fa4c2cdd Mon Sep 17 00:00:00 2001 From: Matthew Brookhart Date: Wed, 16 Sep 2020 16:37:36 -0600 Subject: [PATCH] black format master (#6494) --- python/tvm/contrib/cc.py | 1 + python/tvm/micro/artifact.py | 61 +++--- python/tvm/micro/build.py | 63 +++--- python/tvm/micro/class_factory.py | 31 +-- python/tvm/micro/compiler.py | 78 ++++--- python/tvm/micro/debugger.py | 53 +++-- python/tvm/micro/micro_binary.py | 27 ++- python/tvm/micro/micro_library.py | 40 ++-- python/tvm/micro/session.py | 18 +- python/tvm/micro/transport.py | 51 +++-- python/tvm/relay/frontend/pytorch.py | 8 +- python/tvm/relay/op/_transform.py | 2 + python/tvm/relay/op/nn/_nn.py | 11 +- python/tvm/relay/op/vision/_vision.py | 4 +- python/tvm/rpc/client.py | 3 +- python/tvm/target/target.py | 2 + python/tvm/topi/x86/conv2d.py | 1 - tests/lint/add_asf_header.py | 40 ++-- tests/lint/check_file_type.py | 20 +- tests/python/frontend/pytorch/test_forward.py | 2 +- tests/python/relay/test_any.py | 30 +-- tests/python/topi/python/test_topi_pooling.py | 300 +++++++++++++++----------- tests/python/unittest/test_crt.py | 145 +++++++------ tests/python/unittest/test_runtime_rpc.py | 3 + tutorials/micro/micro_tflite.py | 24 ++- 25 files changed, 588 insertions(+), 430 deletions(-) diff --git a/python/tvm/contrib/cc.py b/python/tvm/contrib/cc.py index ecefe56..1b6a62f 100644 --- a/python/tvm/contrib/cc.py +++ b/python/tvm/contrib/cc.py @@ -204,6 +204,7 @@ def _linux_compile(output, objects, options, compile_cmd="g++", compile_shared=F msg += "\nCommand line: " + " ".join(cmd) raise RuntimeError(msg) + def _windows_shared(output, objects, options): cmd = ["clang"] cmd += ["-O2", "-flto=full", "-fuse-ld=lld-link"] diff --git a/python/tvm/micro/artifact.py b/python/tvm/micro/artifact.py index 5f887db..7893976 100644 --- a/python/tvm/micro/artifact.py +++ b/python/tvm/micro/artifact.py @@ -63,10 +63,10 @@ class Artifact: The unarchived artifact. """ if os.path.exists(base_dir): - raise ValueError(f'base_dir exists: {base_dir}') + raise ValueError(f"base_dir exists: {base_dir}") base_dir_parent, base_dir_name = os.path.split(base_dir) - temp_dir = os.path.join(base_dir_parent, f'__tvm__{base_dir_name}') + temp_dir = os.path.join(base_dir_parent, f"__tvm__{base_dir_name}") os.mkdir(temp_dir) try: with tarfile.open(archive_path) as tar_f: @@ -75,32 +75,36 @@ class Artifact: temp_dir_contents = os.listdir(temp_dir) if len(temp_dir_contents) != 1: raise ArtifactBadArchiveError( - 'Expected exactly 1 subdirectory at root of archive, got ' - f'{temp_dir_contents!r}') + "Expected exactly 1 subdirectory at root of archive, got " + f"{temp_dir_contents!r}" + ) - metadata_path = os.path.join(temp_dir, temp_dir_contents[0], 'metadata.json') + metadata_path = os.path.join(temp_dir, temp_dir_contents[0], "metadata.json") if not metadata_path: - raise ArtifactBadArchiveError('No metadata.json found in archive') + raise ArtifactBadArchiveError("No metadata.json found in archive") with open(metadata_path) as metadata_f: metadata = json.load(metadata_f) - version = metadata.get('version') + version = metadata.get("version") if version != cls.ENCODING_VERSION: raise ArtifactBadArchiveError( - f'archive version: expect {cls.EXPECTED_VERSION}, found {version}') + f"archive version: expect {cls.EXPECTED_VERSION}, found {version}" + ) os.rename(os.path.join(temp_dir, temp_dir_contents[0]), base_dir) artifact_cls = cls for sub_cls in cls.__subclasses__(): - if (sub_cls.ARTIFACT_TYPE is not None and - sub_cls.ARTIFACT_TYPE == metadata.get('artifact_type')): + if sub_cls.ARTIFACT_TYPE is not None and sub_cls.ARTIFACT_TYPE == metadata.get( + "artifact_type" + ): artifact_cls = sub_cls break return artifact_cls.from_unarchived( - base_dir, metadata['labelled_files'], metadata['metadata']) + base_dir, metadata["labelled_files"], metadata["metadata"] + ) finally: shutil.rmtree(temp_dir) @@ -128,7 +132,7 @@ class Artifact: for f in files: f_path = os.path.join(self.base_dir, f) if not os.path.lexists(f_path): - raise ArtifactFileNotFoundError(f'{f} (label {label}): not found at {f_path}') + raise ArtifactFileNotFoundError(f"{f} (label {label}): not found at {f_path}") if os.path.islink(f_path): link_path = os.path.readlink(f_path) @@ -140,7 +144,8 @@ class Artifact: link_fullpath = os.path.realpath(link_fullpath) if not link_fullpath.startswith(self.base_dir): raise ArtifactBadSymlinkError( - f'{f} (label {label}): symlink points outside artifact tree') + f"{f} (label {label}): symlink points outside artifact tree" + ) def abspath(self, rel_path): """Return absolute path to the member with the given relative path.""" @@ -168,29 +173,37 @@ class Artifact: The value of archive_path, after potentially making the computation describe above. """ if os.path.isdir(archive_path): - archive_path = os.path.join(archive_path, f'{os.path.basename(self.base_dir)}.tar') + archive_path = os.path.join(archive_path, f"{os.path.basename(self.base_dir)}.tar") archive_name = os.path.splitext(os.path.basename(archive_path))[0] - with tarfile.open(archive_path, 'w') as tar_f: + with tarfile.open(archive_path, "w") as tar_f: + def _add_file(name, data, f_type): tar_info = tarfile.TarInfo(name=name) tar_info.type = f_type - data_bytes = bytes(data, 'utf-8') + data_bytes = bytes(data, "utf-8") tar_info.size = len(data) tar_f.addfile(tar_info, io.BytesIO(data_bytes)) - _add_file(f'{archive_name}/metadata.json', - json.dumps({'version': self.ENCODING_VERSION, - 'labelled_files': self.labelled_files, - 'metadata': self.metadata}, - indent=2, - sort_keys=True), - tarfile.REGTYPE) + _add_file( + f"{archive_name}/metadata.json", + json.dumps( + { + "version": self.ENCODING_VERSION, + "labelled_files": self.labelled_files, + "metadata": self.metadata, + }, + indent=2, + sort_keys=True, + ), + tarfile.REGTYPE, + ) for dir_path, _, files in os.walk(self.base_dir): for f in files: file_path = os.path.join(dir_path, f) archive_file_path = os.path.join( - archive_name, os.path.relpath(file_path, self.base_dir)) + archive_name, os.path.relpath(file_path, self.base_dir) + ) if not os.path.islink(file_path): tar_f.add(file_path, archive_file_path, recursive=False) continue diff --git a/python/tvm/micro/build.py b/python/tvm/micro/build.py index 203b396..7b881db 100644 --- a/python/tvm/micro/build.py +++ b/python/tvm/micro/build.py @@ -34,7 +34,7 @@ class Workspace: if debug or root is not None: with util.TempDirectory.set_keep_for_debug(): self.tempdir = util.tempdir(custom_path=root) - _LOG.info('Created debug mode workspace at: %s', self.tempdir.temp_dir) + _LOG.info("Created debug mode workspace at: %s", self.tempdir.temp_dir) else: self.tempdir = util.tempdir() @@ -50,50 +50,49 @@ class Workspace: # Required C runtime libraries, in link order. -CRT_RUNTIME_LIB_NAMES = ['utvm_rpc_server', 'utvm_rpc_common', 'common'] +CRT_RUNTIME_LIB_NAMES = ["utvm_rpc_server", "utvm_rpc_common", "common"] -TVM_ROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..', '..')) +TVM_ROOT_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "..")) -CRT_ROOT_DIR = os.path.join(TVM_ROOT_DIR, 'src', 'runtime', 'crt') +CRT_ROOT_DIR = os.path.join(TVM_ROOT_DIR, "src", "runtime", "crt") -RUNTIME_LIB_SRC_DIRS = ( - [os.path.join(CRT_ROOT_DIR, n) for n in CRT_RUNTIME_LIB_NAMES] + - [os.path.join(TVM_ROOT_DIR, - '3rdparty/mbed-os/targets/TARGET_NORDIC/TARGET_NRF5x/TARGET_SDK_11/' - 'libraries/crc16')]) +RUNTIME_LIB_SRC_DIRS = [os.path.join(CRT_ROOT_DIR, n) for n in CRT_RUNTIME_LIB_NAMES] + [ + os.path.join( + TVM_ROOT_DIR, + "3rdparty/mbed-os/targets/TARGET_NORDIC/TARGET_NRF5x/TARGET_SDK_11/" "libraries/crc16", + ) +] -RUNTIME_SRC_REGEX = re.compile(r'^.*\.cc?$', re.IGNORECASE) +RUNTIME_SRC_REGEX = re.compile(r"^.*\.cc?$", re.IGNORECASE) _CRT_DEFAULT_OPTIONS = { - 'ccflags': ['-std=c++11'], - 'ldflags': ['-std=gnu++14'], - 'include_dirs': [ - f'{TVM_ROOT_DIR}/include', - f'{TVM_ROOT_DIR}/3rdparty/dlpack/include', - f'{TVM_ROOT_DIR}/3rdparty/mbed-os/targets/TARGET_NORDIC/TARGET_NRF5x/' - 'TARGET_SDK_11/libraries/crc16/', - f'{TVM_ROOT_DIR}/3rdparty/dmlc-core/include', - f'{CRT_ROOT_DIR}/include' + "ccflags": ["-std=c++11"], + "ldflags": ["-std=gnu++14"], + "include_dirs": [ + f"{TVM_ROOT_DIR}/include", + f"{TVM_ROOT_DIR}/3rdparty/dlpack/include", + f"{TVM_ROOT_DIR}/3rdparty/mbed-os/targets/TARGET_NORDIC/TARGET_NRF5x/" + "TARGET_SDK_11/libraries/crc16/", + f"{TVM_ROOT_DIR}/3rdparty/dmlc-core/include", + f"{CRT_ROOT_DIR}/include", ], - 'profile': { - 'common': ['-Wno-unused-variable'] - } + "profile": {"common": ["-Wno-unused-variable"]}, } def default_options(target_include_dir): """Return default opts passed to Compile commands.""" bin_opts = copy.deepcopy(_CRT_DEFAULT_OPTIONS) - bin_opts['include_dirs'].append(target_include_dir) + bin_opts["include_dirs"].append(target_include_dir) lib_opts = copy.deepcopy(bin_opts) - lib_opts['profile']['common'].append('-Werror') - lib_opts['cflags'] = ['-Wno-error=incompatible-pointer-types'] - return {'bin_opts': bin_opts, 'lib_opts': lib_opts} + lib_opts["profile"]["common"].append("-Werror") + lib_opts["cflags"] = ["-Wno-error=incompatible-pointer-types"] + return {"bin_opts": bin_opts, "lib_opts": lib_opts} def build_static_runtime(workspace, compiler, module, lib_opts=None, bin_opts=None): @@ -121,17 +120,17 @@ def build_static_runtime(workspace, compiler, module, lib_opts=None, bin_opts=No lib_opts = _CRT_DEFAULT_OPTIONS if lib_opts is None else lib_opts bin_opts = _CRT_DEFAULT_OPTIONS if bin_opts is None else bin_opts - mod_build_dir = workspace.relpath(os.path.join('build', 'module')) + mod_build_dir = workspace.relpath(os.path.join("build", "module")) os.makedirs(mod_build_dir) - mod_src_dir = workspace.relpath(os.path.join('src', 'module')) + mod_src_dir = workspace.relpath(os.path.join("src", "module")) os.makedirs(mod_src_dir) - mod_src_path = os.path.join(mod_src_dir, 'module.c') - module.save(mod_src_path, 'cc') + mod_src_path = os.path.join(mod_src_dir, "module.c") + module.save(mod_src_path, "cc") libs = [] for lib_src_dir in RUNTIME_LIB_SRC_DIRS: lib_name = os.path.basename(lib_src_dir) - lib_build_dir = workspace.relpath(f'build/{lib_name}') + lib_build_dir = workspace.relpath(f"build/{lib_name}") os.makedirs(lib_build_dir) lib_srcs = [] @@ -143,6 +142,6 @@ def build_static_runtime(workspace, compiler, module, lib_opts=None, bin_opts=No libs.append(compiler.library(mod_build_dir, [mod_src_path], lib_opts)) - runtime_build_dir = workspace.relpath(f'build/runtime') + runtime_build_dir = workspace.relpath(f"build/runtime") os.makedirs(runtime_build_dir) return compiler.binary(runtime_build_dir, libs, bin_opts) diff --git a/python/tvm/micro/class_factory.py b/python/tvm/micro/class_factory.py index 3d00636..9fac63c 100644 --- a/python/tvm/micro/class_factory.py +++ b/python/tvm/micro/class_factory.py @@ -35,8 +35,12 @@ class ClassFactory: # When not None, the superclass from which all cls must derive. SUPERCLASS = None - def __init__(self, cls: typing.Callable, init_args: typing.List[JsonSerializable], - init_kw: typing.Dict[str, JsonSerializable]): + def __init__( + self, + cls: typing.Callable, + init_args: typing.List[JsonSerializable], + init_kw: typing.Dict[str, JsonSerializable], + ): self.cls = cls self.init_args = init_args self.init_kw = init_kw @@ -55,13 +59,15 @@ class ClassFactory: @property def to_json(self): - return json.dumps({ - 'cls': '.'.join([self.cls.__module__, self.cls.__name__]), - 'init_args': self.init_args, - 'init_kw': self.init_kw, - }) + return json.dumps( + { + "cls": ".".join([self.cls.__module__, self.cls.__name__]), + "init_args": self.init_args, + "init_kw": self.init_kw, + } + ) - EXPECTED_KEYS = ('cls', 'init_args', 'init_kw') + EXPECTED_KEYS = ("cls", "init_args", "init_kw") @classmethod def from_json(cls, data): @@ -84,14 +90,15 @@ class ClassFactory: """ obj = json.loads(data) if not isinstance(obj, dict): - raise SerializedFactoryError(f'deserialized json payload: want dict, got: {obj!r}') + raise SerializedFactoryError(f"deserialized json payload: want dict, got: {obj!r}") for key in cls.EXPECTED_KEYS: if key not in obj: raise SerializedFactoryError( - f'deserialized json payload: expect key {key}, got: {obj!r}') + f"deserialized json payload: expect key {key}, got: {obj!r}" + ) - cls_package_name, cls_name = obj['cls'].rsplit('.', 1) + cls_package_name, cls_name = obj["cls"].rsplit(".", 1) cls_package = importlib.import_module(cls_package_name) cls_obj = getattr(cls_package, cls_name) - return cls(cls_obj, obj['init_args'], obj['init_kw']) + return cls(cls_obj, obj["init_args"], obj["init_kw"]) diff --git a/python/tvm/micro/compiler.py b/python/tvm/micro/compiler.py index f29925a..fa5887c 100644 --- a/python/tvm/micro/compiler.py +++ b/python/tvm/micro/compiler.py @@ -41,7 +41,7 @@ class NoDefaultToolchainMatchedError(Exception): class Compiler(metaclass=abc.ABCMeta): """The compiler abstraction used with micro TVM.""" - TVM_TARGET_RE = re.compile(r'^// tvm target: (.*)$') + TVM_TARGET_RE = re.compile(r"^// tvm target: (.*)$") @classmethod def _target_from_sources(cls, sources): @@ -68,36 +68,39 @@ class Compiler(metaclass=abc.ABCMeta): if len(target_strs) != 1: raise DetectTargetError( - 'autodetecting cross-compiler: could not extract TVM target from C source; regex ' - f'{cls.TVM_TARGET_RE.pattern} does not match any line in sources: ' - f'{", ".join(sources)}') + "autodetecting cross-compiler: could not extract TVM target from C source; regex " + f"{cls.TVM_TARGET_RE.pattern} does not match any line in sources: " + f'{", ".join(sources)}' + ) target_str = next(iter(target_strs)) return tvm.target.create(target_str) # Maps regexes identifying CPUs to the default toolchain prefix for that CPU. TOOLCHAIN_PREFIX_BY_CPU_REGEX = { - r'cortex-[am].*': 'arm-none-eabi-', - 'x86[_-]64': '', - 'native': '', + r"cortex-[am].*": "arm-none-eabi-", + "x86[_-]64": "", + "native": "", } def _autodetect_toolchain_prefix(self, target): matches = [] for regex, prefix in self.TOOLCHAIN_PREFIX_BY_CPU_REGEX.items(): - if re.match(regex, target.attrs['mcpu']): + if re.match(regex, target.attrs["mcpu"]): matches.append(prefix) if matches: if len(matches) != 1: raise NoDefaultToolchainMatchedError( f'{opt} matched more than 1 default toolchain prefix: {", ".join(matches)}. ' - 'Specify cc.cross_compiler to create_micro_library()') + "Specify cc.cross_compiler to create_micro_library()" + ) return matches[0] raise NoDefaultToolchainMatchedError( - f'target {str(target)} did not match any default toolchains') + f"target {str(target)} did not match any default toolchains" + ) def _defaults_from_target(self, target): """Determine the default compiler options from the target specified. @@ -113,9 +116,9 @@ class Compiler(metaclass=abc.ABCMeta): """ opts = [] # TODO use march for arm(https://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html)? - if target.attrs.get('mcpu'): + if target.attrs.get("mcpu"): opts.append(f'-march={target.attrs["mcpu"]}') - if target.attrs.get('mfpu'): + if target.attrs.get("mfpu"): opts.append(f'-mfpu={target.attrs["mfpu"]}') return opts @@ -203,61 +206,64 @@ class DefaultCompiler(Compiler): except DetectTargetError: assert self.target is not None, ( "Must specify target= to constructor when compiling sources which don't specify a " - "target") + "target" + ) target = self.target if self.target is not None and str(self.target) != str(target): raise IncompatibleTargetError( - f'auto-detected target {target} differs from configured {self.target}') + f"auto-detected target {target} differs from configured {self.target}" + ) prefix = self._autodetect_toolchain_prefix(target) outputs = [] for src in sources: src_base, src_ext = os.path.splitext(os.path.basename(src)) - compiler_name = {'.c': 'gcc', '.cc': 'g++', '.cpp': 'g++'}[src_ext] - args = [prefix + compiler_name, '-g'] + compiler_name = {".c": "gcc", ".cc": "g++", ".cpp": "g++"}[src_ext] + args = [prefix + compiler_name, "-g"] args.extend(self._defaults_from_target(target)) - args.extend(options.get(f'{src_ext[1:]}flags', [])) + args.extend(options.get(f"{src_ext[1:]}flags", [])) - for include_dir in options.get('include_dirs', []): - args.extend(['-I', include_dir]) + for include_dir in options.get("include_dirs", []): + args.extend(["-I", include_dir]) - output_filename = f'{src_base}.o' + output_filename = f"{src_base}.o" output_abspath = os.path.join(output, output_filename) - binutil.run_cmd(args + ['-c', '-o', output_abspath, src]) + binutil.run_cmd(args + ["-c", "-o", output_abspath, src]) outputs.append(output_abspath) - output_filename = f'{os.path.basename(output)}.a' + output_filename = f"{os.path.basename(output)}.a" output_abspath = os.path.join(output, output_filename) - binutil.run_cmd([prefix + 'ar', '-r', output_abspath] + outputs) - binutil.run_cmd([prefix + 'ranlib', output_abspath]) + binutil.run_cmd([prefix + "ar", "-r", output_abspath] + outputs) + binutil.run_cmd([prefix + "ranlib", output_abspath]) return tvm.micro.MicroLibrary(output, [output_filename]) def binary(self, output, objects, options=None, link_main=True, main_options=None): assert self.target is not None, ( - 'must specify target= to constructor, or compile sources which specify the target ' - 'first') + "must specify target= to constructor, or compile sources which specify the target " + "first" + ) - args = [self._autodetect_toolchain_prefix(self.target) + 'g++'] + args = [self._autodetect_toolchain_prefix(self.target) + "g++"] args.extend(self._defaults_from_target(self.target)) if options is not None: - args.extend(options.get('ldflags', [])) + args.extend(options.get("ldflags", [])) - for include_dir in options.get('include_dirs', []): - args.extend(['-I', include_dir]) + for include_dir in options.get("include_dirs", []): + args.extend(["-I", include_dir]) output_filename = os.path.basename(output) output_abspath = os.path.join(output, output_filename) - args.extend(['-g', '-o', output_abspath]) + args.extend(["-g", "-o", output_abspath]) if link_main: - host_main_srcs = glob.glob(os.path.join(build.CRT_ROOT_DIR, 'host', '*.cc')) + host_main_srcs = glob.glob(os.path.join(build.CRT_ROOT_DIR, "host", "*.cc")) if main_options: - main_lib = self.library(os.path.join(output, 'host'), host_main_srcs, main_options) + main_lib = self.library(os.path.join(output, "host"), host_main_srcs, main_options) for lib_name in main_lib.library_files: args.append(main_lib.abspath(lib_name)) else: @@ -311,8 +317,10 @@ class HostFlasher(Flasher): def flash(self, micro_binary): if self.debug: gdb_wrapper = debugger.GdbTransportDebugger( - [micro_binary.abspath(micro_binary.binary_file)]) + [micro_binary.abspath(micro_binary.binary_file)] + ) return transport.DebugWrapperTransport( - debugger=gdb_wrapper, transport=gdb_wrapper.Transport()) + debugger=gdb_wrapper, transport=gdb_wrapper.Transport() + ) return transport.SubprocessTransport([micro_binary.abspath(micro_binary.binary_file)]) diff --git a/python/tvm/micro/debugger.py b/python/tvm/micro/debugger.py index 06e7c1c..33db554 100644 --- a/python/tvm/micro/debugger.py +++ b/python/tvm/micro/debugger.py @@ -60,7 +60,7 @@ class GdbDebugger(Debugger): try: callback() except Exception: # pylint: disable=broad-except - logging.warn('on_terminate_callback raised exception', exc_info=True) + logging.warn("on_terminate_callback raised exception", exc_info=True) def start(self): kwargs = self.popen_kwargs() @@ -95,27 +95,33 @@ class GdbTransportDebugger(GdbDebugger): os.set_inheritable(stdout_write, True) sysname = os.uname()[0] - if sysname == 'Darwin': - args = ['lldb', - '-O', f'target create {self.args[0]}', - '-O', f'settings set target.input-path /dev/fd/{stdin_read}', - '-O', f'settings set target.output-path /dev/fd/{stdout_write}'] + if sysname == "Darwin": + args = [ + "lldb", + "-O", + f"target create {self.args[0]}", + "-O", + f"settings set target.input-path /dev/fd/{stdin_read}", + "-O", + f"settings set target.output-path /dev/fd/{stdout_write}", + ] if len(self.args) > 1: args.extend( - ['-O', 'settings set target.run-args {}'.format(' '.join(self.args[1:]))]) - elif sysname == 'Linux': - args = (['gdb', '--args'] + - self.args + - ['/dev/fd/{stdout_write}']) + ["-O", "settings set target.run-args {}".format(" ".join(self.args[1:]))] + ) + elif sysname == "Linux": + args = ( + ["gdb", "--args"] + self.args + ["/dev/fd/{stdout_write}"] + ) else: - raise NotImplementedError(f'System {sysname} is not yet supported') + raise NotImplementedError(f"System {sysname} is not yet supported") - self.stdin = os.fdopen(stdin_write, 'wb', buffering=0) - self.stdout = os.fdopen(stdout_read, 'rb', buffering=0) + self.stdin = os.fdopen(stdin_write, "wb", buffering=0) + self.stdout = os.fdopen(stdout_read, "rb", buffering=0) return { - 'args': args, - 'pass_fds': [stdin_read, stdout_write], + "args": args, + "pass_fds": [stdin_read, stdout_write], } def _wait_for_process_death(self): @@ -156,8 +162,9 @@ class GdbTransportDebugger(GdbDebugger): class GdbRemoteDebugger(GdbDebugger): """A Debugger that invokes GDB and attaches to a remote GDBserver-based target.""" - def __init__(self, gdb_binary, remote_hostport, debug_binary, wrapping_context_manager=None, - **popen_kw): + def __init__( + self, gdb_binary, remote_hostport, debug_binary, wrapping_context_manager=None, **popen_kw + ): super(GdbRemoteDebugger, self).__init__() self.gdb_binary = gdb_binary self.remote_hostport = remote_hostport @@ -167,9 +174,13 @@ class GdbRemoteDebugger(GdbDebugger): def popen_kwargs(self): kwargs = { - 'args': [self.gdb_binary, - '-iex', f'file {self.debug_binary}', - '-iex', f'target remote {self.remote_hostport}'], + "args": [ + self.gdb_binary, + "-iex", + f"file {self.debug_binary}", + "-iex", + f"target remote {self.remote_hostport}", + ], } kwargs.update(self.popen_kw) diff --git a/python/tvm/micro/micro_binary.py b/python/tvm/micro/micro_binary.py index 8de144e..9d411a1 100644 --- a/python/tvm/micro/micro_binary.py +++ b/python/tvm/micro/micro_binary.py @@ -23,27 +23,32 @@ from . import artifact class MicroBinary(artifact.Artifact): """An Artifact that describes a compiled binary.""" - ARTIFACT_TYPE = 'micro_binary' + ARTIFACT_TYPE = "micro_binary" @classmethod def from_unarchived(cls, base_dir, labelled_files, metadata): - binary_file = labelled_files['binary_file'][0] - del labelled_files['binary_file'] + binary_file = labelled_files["binary_file"][0] + del labelled_files["binary_file"] debug_files = None - if 'debug_files' in labelled_files: - debug_files = labelled_files['debug_files'] - del labelled_files['debug_files'] - - return cls(base_dir, binary_file, debug_files=debug_files, labelled_files=labelled_files, - metadata=metadata) + if "debug_files" in labelled_files: + debug_files = labelled_files["debug_files"] + del labelled_files["debug_files"] + + return cls( + base_dir, + binary_file, + debug_files=debug_files, + labelled_files=labelled_files, + metadata=metadata, + ) def __init__(self, base_dir, binary_file, debug_files=None, labelled_files=None, metadata=None): labelled_files = {} if labelled_files is None else dict(labelled_files) metadata = {} if metadata is None else dict(metadata) - labelled_files['binary_file'] = [binary_file] + labelled_files["binary_file"] = [binary_file] if debug_files is not None: - labelled_files['debug_files'] = debug_files + labelled_files["debug_files"] = debug_files super(MicroBinary, self).__init__(base_dir, labelled_files, metadata) diff --git a/python/tvm/micro/micro_library.py b/python/tvm/micro/micro_library.py index 7ca82e8..52c8cf2 100644 --- a/python/tvm/micro/micro_library.py +++ b/python/tvm/micro/micro_library.py @@ -25,28 +25,34 @@ from . import compiler class MicroLibrary(artifact.Artifact): """An Artifact that describes a compiled static library.""" - ARTIFACT_TYPE = 'micro_library' + ARTIFACT_TYPE = "micro_library" @classmethod def from_unarchived(cls, base_dir, labelled_files, metadata): - library_files = labelled_files['library_files'] - del labelled_files['library_files'] + library_files = labelled_files["library_files"] + del labelled_files["library_files"] debug_files = None - if 'debug_files' in labelled_files: - debug_files = labelled_files['debug_files'] - del labelled_files['debug_files'] - - return cls(base_dir, library_files, debug_files=debug_files, labelled_files=labelled_files, - metadata=metadata) - - def __init__(self, base_dir, library_files, debug_files=None, labelled_files=None, - metadata=None): + if "debug_files" in labelled_files: + debug_files = labelled_files["debug_files"] + del labelled_files["debug_files"] + + return cls( + base_dir, + library_files, + debug_files=debug_files, + labelled_files=labelled_files, + metadata=metadata, + ) + + def __init__( + self, base_dir, library_files, debug_files=None, labelled_files=None, metadata=None + ): labelled_files = {} if labelled_files is None else dict(labelled_files) metadata = {} if metadata is None else dict(metadata) - labelled_files['library_files'] = library_files + labelled_files["library_files"] = library_files if debug_files is not None: - labelled_files['debug_files'] = debug_files + labelled_files["debug_files"] = debug_files super(MicroLibrary, self).__init__(base_dir, labelled_files, metadata) @@ -68,13 +74,13 @@ def create_micro_library(output, objects, options=None): """ temp_dir = util.tempdir() comp = compiler.DefaultCompiler() - output = temp_dir.relpath('micro-library.o') + output = temp_dir.relpath("micro-library.o") comp.library(output, objects, options=options) - with open(output, 'rb') as output_f: + with open(output, "rb") as output_f: elf_data = output_f.read() # TODO(areusch): Define a mechanism to determine compiler and linker flags for each lib # enabled by the target str, and embed here. - micro_lib = MicroLibrary('', elf_data, {'target': comp.target.str()}) + micro_lib = MicroLibrary("", elf_data, {"target": comp.target.str()}) micro_lib.save(output) diff --git a/python/tvm/micro/session.py b/python/tvm/micro/session.py index 000e8e9..084f467 100644 --- a/python/tvm/micro/session.py +++ b/python/tvm/micro/session.py @@ -46,8 +46,9 @@ class Session: micro_mod = sess.create_micro_mod(c_mod) """ - def __init__(self, binary=None, flasher=None, transport_context_manager=None, - session_name='micro-rpc'): + def __init__( + self, binary=None, flasher=None, transport_context_manager=None, session_name="micro-rpc" + ): """Configure a new session. Parameters @@ -73,7 +74,7 @@ class Session: self._graph_runtime = None def get_system_lib(self): - return self._rpc.get_function('runtime.SystemLib')() + return self._rpc.get_function("runtime.SystemLib")() def __enter__(self): """Initialize this session and establish an RPC session with the on-device RPC server. @@ -88,9 +89,11 @@ class Session: time.sleep(3.0) self.transport = TransportLogger( - self.session_name, self.transport_context_manager, level=logging.INFO).__enter__() - self._rpc = RPCSession(_rpc_connect( - self.session_name, self.transport.write, self.transport.read)) + self.session_name, self.transport_context_manager, level=logging.INFO + ).__enter__() + self._rpc = RPCSession( + _rpc_connect(self.session_name, self.transport.write, self.transport.read) + ) self.context = self._rpc.cpu(0) return self @@ -120,5 +123,4 @@ def create_local_graph_runtime(graph_json_str, mod, ctx): """ device_type_id = [ctx.device_type, ctx.device_id] fcreate = get_global_func("tvm.graph_runtime.create") - return graph_runtime.GraphModule(fcreate( - graph_json_str, mod, *device_type_id)) + return graph_runtime.GraphModule(fcreate(graph_json_str, mod, *device_type_id)) diff --git a/python/tvm/micro/transport.py b/python/tvm/micro/transport.py index 52617ab..f9b41a4 100644 --- a/python/tvm/micro/transport.py +++ b/python/tvm/micro/transport.py @@ -106,20 +106,20 @@ class TransportLogger(Transport): self.level = level # Construct PRINTABLE to exclude whitespace from string.printable. - PRINTABLE = (string.digits + string.ascii_letters + string.punctuation) + PRINTABLE = string.digits + string.ascii_letters + string.punctuation @classmethod def _to_hex(cls, data): lines = [] if not data: - lines.append('') + lines.append("") return lines for i in range(0, (len(data) + 15) // 16): - chunk = data[i * 16:(i + 1) * 16] - hex_chunk = ' '.join(f'{c:02x}' for c in chunk) - ascii_chunk = ''.join((chr(c) if chr(c) in cls.PRINTABLE else '.') for c in chunk) - lines.append(f'{i * 16:04x} {hex_chunk:47} {ascii_chunk}') + chunk = data[i * 16 : (i + 1) * 16] + hex_chunk = " ".join(f"{c:02x}" for c in chunk) + ascii_chunk = "".join((chr(c) if chr(c) in cls.PRINTABLE else ".") for c in chunk) + lines.append(f"{i * 16:04x} {hex_chunk:47} {ascii_chunk}") if len(lines) == 1: lines[0] = lines[0][6:] @@ -127,22 +127,29 @@ class TransportLogger(Transport): return lines def open(self): - self.logger.log(self.level, 'opening transport') + self.logger.log(self.level, "opening transport") self.child.open() def close(self): - self.logger.log(self.level, 'closing transport') + self.logger.log(self.level, "closing transport") return self.child.close() def read(self, n): data = self.child.read(n) hex_lines = self._to_hex(data) if len(hex_lines) > 1: - self.logger.log(self.level, '%s read %4d B -> [%d B]:\n%s', - self.name, n, len(data), '\n'.join(hex_lines)) + self.logger.log( + self.level, + "%s read %4d B -> [%d B]:\n%s", + self.name, + n, + len(data), + "\n".join(hex_lines), + ) else: - self.logger.log(self.level, '%s read %4d B -> [%d B]: %s', - self.name, n, len(data), hex_lines[0]) + self.logger.log( + self.level, "%s read %4d B -> [%d B]: %s", self.name, n, len(data), hex_lines[0] + ) return data @@ -150,11 +157,17 @@ class TransportLogger(Transport): bytes_written = self.child.write(data) hex_lines = self._to_hex(data[:bytes_written]) if len(hex_lines) > 1: - self.logger.log(self.level, '%s write <- [%d B]:\n%s', - self.name, bytes_written, '\n'.join(hex_lines)) + self.logger.log( + self.level, + "%s write <- [%d B]:\n%s", + self.name, + bytes_written, + "\n".join(hex_lines), + ) else: - self.logger.log(self.level, '%s write <- [%d B]: %s', - self.name, bytes_written, hex_lines[0]) + self.logger.log( + self.level, "%s write <- [%d B]: %s", self.name, bytes_written, hex_lines[0] + ) return bytes_written @@ -168,9 +181,9 @@ class SubprocessTransport(Transport): self.popen = None def open(self): - self.kwargs['stdout'] = subprocess.PIPE - self.kwargs['stdin'] = subprocess.PIPE - self.kwargs['bufsize'] = 0 + self.kwargs["stdout"] = subprocess.PIPE + self.kwargs["stdin"] = subprocess.PIPE + self.kwargs["bufsize"] = 0 self.popen = subprocess.Popen(self.args, **self.kwargs) self.stdin = self.popen.stdin self.stdout = self.popen.stdout diff --git a/python/tvm/relay/frontend/pytorch.py b/python/tvm/relay/frontend/pytorch.py index 2eff415..886729b 100644 --- a/python/tvm/relay/frontend/pytorch.py +++ b/python/tvm/relay/frontend/pytorch.py @@ -1230,14 +1230,16 @@ def _reshape(): return _impl + def _pixel_shuffle(prelude): def _impl(inputs, input_types): data = inputs[0] upscale_factor = inputs[1] upscale_squared = upscale_factor * upscale_factor b, c, h, w = _infer_shape(data) - assert c % upscale_squared == 0, \ - "input channel should be divisible by square of upscale_factor" + assert ( + c % upscale_squared == 0 + ), "input channel should be divisible by square of upscale_factor" ndims = len(_infer_shape(data, prelude.mod)) axes = list(range(ndims)) @@ -1256,8 +1258,10 @@ def _pixel_shuffle(prelude): axes = [0, 1, 4, 2, 5, 3] data = _op.transform.transpose(data, axes) return _op.transform.reshape(data, out_shape) + return _impl + def _clone(): def _impl(inputs, input_types): data = inputs[0] diff --git a/python/tvm/relay/op/_transform.py b/python/tvm/relay/op/_transform.py index d0a0bd2..a2fab24 100644 --- a/python/tvm/relay/op/_transform.py +++ b/python/tvm/relay/op/_transform.py @@ -759,6 +759,7 @@ def _repeat_shape_func(data_shape, repeats, axis): return out + @_reg.register_shape_func("repeat", False) def repeat_shape_func(attrs, inputs, _): """ @@ -789,6 +790,7 @@ def _stack_shape_func(data_shape, axis, num_inputs): return out + @_reg.register_shape_func("stack", False) def stack_shape_func(attrs, inputs, _): axis = get_const_int(attrs.axis) diff --git a/python/tvm/relay/op/nn/_nn.py b/python/tvm/relay/op/nn/_nn.py index 00cc94c..8a6ef7a 100644 --- a/python/tvm/relay/op/nn/_nn.py +++ b/python/tvm/relay/op/nn/_nn.py @@ -774,18 +774,15 @@ def conv2d_NCHWc_shape_func(attrs, inputs, _): @script -def _conv2d_transpose_nchw_shape_func(dshape, kshape, strides, - padding, dilation, output_padding): +def _conv2d_transpose_nchw_shape_func(dshape, kshape, strides, padding, dilation, output_padding): out = output_tensor((dshape.shape[0],), "int64") kheight = kshape[2] kwidth = kshape[3] dilated_kh = (kheight - 1) * dilation[0] + 1 dilated_kw = (kwidth - 1) * dilation[1] + 1 - out_height = strides[0] * (dshape[2] - 1) + dilated_kh - \ - 2 * padding[0] + output_padding[0] - out_width = strides[1] * (dshape[3] - 1) + dilated_kw - \ - 2 * padding[1] + output_padding[1] + out_height = strides[0] * (dshape[2] - 1) + dilated_kh - 2 * padding[0] + output_padding[0] + out_width = strides[1] * (dshape[3] - 1) + dilated_kw - 2 * padding[1] + output_padding[1] out[0] = dshape[0] out[1] = kshape[1] @@ -811,7 +808,7 @@ def conv2d_transpose_nchw_shape_func(attrs, inputs, _): convert(strides), convert(padding), convert(dilation), - convert(output_padding) + convert(output_padding), ) ] diff --git a/python/tvm/relay/op/vision/_vision.py b/python/tvm/relay/op/vision/_vision.py index 28e21e9..04676e2 100644 --- a/python/tvm/relay/op/vision/_vision.py +++ b/python/tvm/relay/op/vision/_vision.py @@ -94,7 +94,7 @@ def _roi_align_shape_func(data_shape, rois_shape, pooled_size): out[3] = int64(pooled_size[1]) return out + @reg.register_shape_func("vision.roi_align", False) def roi_align_shape_func(attrs, inputs, _): - return [_roi_align_shape_func(inputs[0], inputs[1], - convert(attrs.pooled_size))] + return [_roi_align_shape_func(inputs[0], inputs[1], convert(attrs.pooled_size))] diff --git a/python/tvm/rpc/client.py b/python/tvm/rpc/client.py index 34859ec..b9ad94d 100644 --- a/python/tvm/rpc/client.py +++ b/python/tvm/rpc/client.py @@ -193,7 +193,8 @@ class RPCSession(object): """ if "download_linked_module" not in self._remote_funcs: self._remote_funcs["download_linked_module"] = self.get_function( - "tvm.rpc.server.download_linked_module") + "tvm.rpc.server.download_linked_module" + ) return self._remote_funcs["download_linked_module"](path) def cpu(self, dev_id=0): diff --git a/python/tvm/target/target.py b/python/tvm/target/target.py index a9d4ede..7549f28 100644 --- a/python/tvm/target/target.py +++ b/python/tvm/target/target.py @@ -219,6 +219,7 @@ def intel_graphics(model="unknown", options=None): opts = _merge_opts(opts, options) return Target(" ".join(["opencl"] + opts)) + def micro(hardware="unknown", options=None): """Returns a microTVM target. @@ -239,6 +240,7 @@ def micro(hardware="unknown", options=None): # external dependencies are present. return Target(" ".join(["c"] + opts)) + def arm_cpu(model="unknown", options=None): """Returns a ARM CPU target. This function will also download pre-tuned op parameters when there is none. diff --git a/python/tvm/topi/x86/conv2d.py b/python/tvm/topi/x86/conv2d.py index 2723a1c..a52e27a 100644 --- a/python/tvm/topi/x86/conv2d.py +++ b/python/tvm/topi/x86/conv2d.py @@ -150,7 +150,6 @@ def _pack_data(cfg, data, kernel): if isinstance(ic, tvm.tir.Any): raise RuntimeError("Dynamic input channel is not supported for conv2d.") - data = te.compute( (n, ic_chunk, ih, iw, ic_bn), lambda bs, c, h, w, vc: data[bs, c * ic_bn + vc, h, w], diff --git a/tests/lint/add_asf_header.py b/tests/lint/add_asf_header.py index 21d25c2..a83373c 100644 --- a/tests/lint/add_asf_header.py +++ b/tests/lint/add_asf_header.py @@ -116,24 +116,24 @@ header_groovystyle = """ """.strip() FMT_MAP = { - "sh" : header_pystyle, - "cc" : header_cstyle, - "c" : header_cstyle, - "mm" : header_cstyle, - "m" : header_cstyle, - "go" : header_cstyle, - "java" : header_cstyle, - "h" : header_cstyle, - "py" : header_pystyle, - "toml" : header_pystyle, + "sh": header_pystyle, + "cc": header_cstyle, + "c": header_cstyle, + "mm": header_cstyle, + "m": header_cstyle, + "go": header_cstyle, + "java": header_cstyle, + "h": header_cstyle, + "py": header_pystyle, + "toml": header_pystyle, "yml": header_pystyle, "yaml": header_pystyle, - "rs" : header_cstyle, - "md" : header_mdstyle, - "cmake" : header_pystyle, - "mk" : header_pystyle, - "rst" : header_rststyle, - "gradle" : header_groovystyle, + "rs": header_cstyle, + "md": header_mdstyle, + "cmake": header_pystyle, + "mk": header_pystyle, + "rst": header_rststyle, + "gradle": header_groovystyle, "tcl": header_pystyle, "xml": header_mdstyle, "storyboard": header_mdstyle, @@ -149,8 +149,9 @@ def copyright_line(line): # so that the copyright detector won"t detect the file itself. if line.find("Copyright " + "(c)") != -1: return True - if (line.find("Copyright") != -1 and - line.find(" by") != -1): + # break pattern into two lines to avoid false-negative check + spattern1 = "Copyright" + if line.find(spattern1) != -1 and line.find("by") != -1: return True return False @@ -190,7 +191,7 @@ def add_header(fname, header): elif lines[0].startswith(""): skipline = True elif lines[0].startswith("// !$"): - skipline =True + skipline = True if skipline: outfile.write(lines[0]) @@ -206,6 +207,7 @@ def add_header(fname, header): if has_copyright: print("Removed copyright line from %s" % fname) + def main(args): if len(args) != 2: print("Usage: python add_asf_header.py ") diff --git a/tests/lint/check_file_type.py b/tests/lint/check_file_type.py index 9c0a607..60aa732 100644 --- a/tests/lint/check_file_type.py +++ b/tests/lint/check_file_type.py @@ -164,8 +164,9 @@ def copyright_line(line): # so that the copyright detector won't detect the file itself. if line.find("Copyright " + "(c)") != -1: return True - if (line.find("Copyright") != -1 and - line.find(" by") != -1): + # break pattern into two lines to avoid false-negative check + spattern1 = "Copyright" + if line.find(spattern1) != -1 and line.find("by") != -1: return True return False @@ -192,8 +193,7 @@ def check_asf_copyright(fname): def main(): cmd = ["git", "ls-files"] - proc = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) (out, _) = proc.communicate() assert proc.returncode == 0, f'{" ".join(cmd)} errored: {out}' res = out.decode("utf-8") @@ -208,9 +208,11 @@ def main(): report = "------File type check report----\n" report += "\n".join(error_list) report += "\nFound %d files that are now allowed\n" % len(error_list) - report += ("We do not check in binary files into the repo.\n" - "If necessary, please discuss with committers and" - "modify tests/lint/check_file_type.py to enable the file you need.\n") + report += ( + "We do not check in binary files into the repo.\n" + "If necessary, please discuss with committers and" + "modify tests/lint/check_file_type.py to enable the file you need.\n" + ) sys.stderr.write(report) sys.stderr.flush() sys.exit(-1) @@ -224,7 +226,9 @@ def main(): if asf_copyright_list: report = "------File type check report----\n" report += "\n".join(asf_copyright_list) + "\n" - report += "------Found %d files that has ASF header with copyright message----\n" % len(asf_copyright_list) + report += "------Found %d files that has ASF header with copyright message----\n" % len( + asf_copyright_list + ) report += "--- Files with ASF header do not need Copyright lines.\n" report += "--- Contributors retain copyright to their contribution by default.\n" report += "--- If a file comes with a different license, consider put it under the 3rdparty folder instead.\n" diff --git a/tests/python/frontend/pytorch/test_forward.py b/tests/python/frontend/pytorch/test_forward.py index 4192cf4..fe7be5b 100644 --- a/tests/python/frontend/pytorch/test_forward.py +++ b/tests/python/frontend/pytorch/test_forward.py @@ -182,7 +182,7 @@ def verify_model(model_name, input_data=[], custom_convert_map={}, rtol=1e-5, at with torch.no_grad(): baseline_outputs = baseline_model(*baseline_input) - + if isinstance(baseline_outputs, tuple): baseline_outputs = tuple(out.cpu().numpy() for out in baseline_outputs) else: diff --git a/tests/python/relay/test_any.py b/tests/python/relay/test_any.py index 2dde8f9..06a9420 100644 --- a/tests/python/relay/test_any.py +++ b/tests/python/relay/test_any.py @@ -36,7 +36,9 @@ def any_dims(ndim): return tuple(shape) -def check_result(args, mod, expected, flatten=False, assert_shape=False, only_vm=False, targets=None): +def check_result( + args, mod, expected, flatten=False, assert_shape=False, only_vm=False, targets=None +): for kind in ["debug", "vm"]: targets = targets or tvm.testing.enabled_targets() for tgt, ctx in targets: @@ -484,15 +486,15 @@ def test_any_conv2d_NCHWc(): def verify_any_conv2d_transpose_nchw( - data_shape, - kernel_shape, - strides, - padding, - dilation, - groups, - static_data_shape, - ref_out_shape, - output_padding, + data_shape, + kernel_shape, + strides, + padding, + dilation, + groups, + static_data_shape, + ref_out_shape, + output_padding, ): mod = tvm.IRModule() dtype = "float32" @@ -511,8 +513,9 @@ def verify_any_conv2d_transpose_nchw( mod["main"] = relay.Function([data, kernel], y) data_np = np.random.uniform(size=static_data_shape).astype(dtype) kernel_np = np.random.uniform(size=kernel_shape).astype(dtype) - check_result([data_np, kernel_np], mod, ref_out_shape, assert_shape=True, - targets=[('llvm', tvm.cpu())]) + check_result( + [data_np, kernel_np], mod, ref_out_shape, assert_shape=True, targets=[("llvm", tvm.cpu())] + ) # TODO(@kevinthesun): Support dynamic input height and width. @@ -537,7 +540,7 @@ def test_any_conv2d_transpose_nchw(): 1, (1, 32, 224, 224), (1, 64, 448, 448), - (1, 1) + (1, 1), ) @@ -1152,6 +1155,7 @@ def verify_any_repeat(data_shape, np_dshape, repeats, axis): ref_res = np.repeat(np_data, repeats, axis) check_result([np_data], mod, ref_res) + @tvm.testing.uses_gpu def test_any_repeat(): verify_any_repeat(any_dims(2), (1, 2), 2, 0) diff --git a/tests/python/topi/python/test_topi_pooling.py b/tests/python/topi/python/test_topi_pooling.py index 30b532e..251172f 100644 --- a/tests/python/topi/python/test_topi_pooling.py +++ b/tests/python/topi/python/test_topi_pooling.py @@ -45,6 +45,7 @@ _pool_grad_schedule = { "gpu": topi.cuda.schedule_pool_grad, } + def verify_pool(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_pad=True): """verify function of pool""" iw = ih @@ -52,10 +53,17 @@ def verify_pool(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_ sw = sh pt, pl, pb, pr = padding layout = "NCHW" - A = te.placeholder((n, ic, ih, iw), name='A') - B = topi.nn.pool(A, kernel=[kh, kw], stride=[sh, sw], padding=padding, - pool_type=pool_type, ceil_mode=ceil_mode, - layout="NCHW", count_include_pad=count_include_pad) + A = te.placeholder((n, ic, ih, iw), name="A") + B = topi.nn.pool( + A, + kernel=[kh, kw], + stride=[sh, sw], + padding=padding, + pool_type=pool_type, + ceil_mode=ceil_mode, + layout="NCHW", + count_include_pad=count_include_pad, + ) B = topi.nn.relu(B) dtype = A.dtype @@ -69,27 +77,33 @@ def verify_pool(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_ assert bshape[3] == int(math.floor(float(ashape[3] - kw + pl + pr) / sw) + 1) a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype) - pad_np = np.zeros(shape=(n, ic, ih+pt+pb, iw+pl+pr)).astype(dtype) - no_zero = (range(n), range(ic), (range(pt, ih+pt)), (range(pl, iw+pl))) + pad_np = np.zeros(shape=(n, ic, ih + pt + pb, iw + pl + pr)).astype(dtype) + no_zero = (range(n), range(ic), (range(pt, ih + pt)), (range(pl, iw + pl))) pad_np[np.ix_(*no_zero)] = a_np _, oc, oh, ow = get_const_tuple(B.shape) b_np = np.zeros(shape=(n, oc, oh, ow)).astype(dtype) - if pool_type == 'avg': + if pool_type == "avg": for i in range(oh): for j in range(ow): if count_include_pad: - b_np[:, :, i, j] = \ - np.mean(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2, 3)) + b_np[:, :, i, j] = np.mean( + pad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw], axis=(2, 3) + ) else: - pad_count = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw] > 0, axis=(2, 3)) - b_np[:, :, i, j] = np.sum(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2, 3)) \ - / np.maximum(pad_count, 1) - - elif pool_type == 'max': + pad_count = np.sum( + pad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw] > 0, axis=(2, 3) + ) + b_np[:, :, i, j] = np.sum( + pad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw], axis=(2, 3) + ) / np.maximum(pad_count, 1) + + elif pool_type == "max": for i in range(oh): for j in range(ow): - b_np[:, :, i, j] = np.max(pad_np[:, :, i*sh:i*sh+kh, j*sw:j*sw+kw], axis=(2, 3)) + b_np[:, :, i, j] = np.max( + pad_np[:, :, i * sh : i * sh + kh, j * sw : j * sw + kw], axis=(2, 3) + ) b_np = np.maximum(b_np, 0.0) def check_device(device, ctx): @@ -107,17 +121,26 @@ def verify_pool(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_ for device, ctx in tvm.testing.enabled_targets(): check_device(device, ctx) -def verify_pool_grad(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_pad=True, - add_relu=False): + +def verify_pool_grad( + n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_pad=True, add_relu=False +): """verify function of pool_grad""" iw = ih kw = kh sw = sh pt, pl, pb, pr = padding - A = te.placeholder((n, ic, ih, iw), name='A') - B = topi.nn.pool(A, kernel=[kh, kw], stride=[sh, sw], padding=padding, - pool_type=pool_type, ceil_mode=ceil_mode, - layout="NCHW", count_include_pad=count_include_pad) + A = te.placeholder((n, ic, ih, iw), name="A") + B = topi.nn.pool( + A, + kernel=[kh, kw], + stride=[sh, sw], + padding=padding, + pool_type=pool_type, + ceil_mode=ceil_mode, + layout="NCHW", + count_include_pad=count_include_pad, + ) dtype = A.dtype bshape = get_const_tuple(B.shape) @@ -128,21 +151,35 @@ def verify_pool_grad(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_inc else: assert bshape[2] == int(math.floor(float(ashape[2] - kh + pt + pb) / sh) + 1) assert bshape[3] == int(math.floor(float(ashape[3] - kw + pl + pr) / sw) + 1) - OutGrad = te.placeholder(bshape, name='OutGrad') - PoolGrad = topi.nn.pool_grad(OutGrad, A, kernel=[kh, kw], stride=[sh, sw], padding=padding, - pool_type=pool_type, ceil_mode=ceil_mode, - layout="NCHW", count_include_pad=count_include_pad) + OutGrad = te.placeholder(bshape, name="OutGrad") + PoolGrad = topi.nn.pool_grad( + OutGrad, + A, + kernel=[kh, kw], + stride=[sh, sw], + padding=padding, + pool_type=pool_type, + ceil_mode=ceil_mode, + layout="NCHW", + count_include_pad=count_include_pad, + ) if add_relu: PoolGrad = topi.nn.relu(PoolGrad) a_np = np.random.uniform(low=0.001, size=(n, ic, ih, iw)).astype(dtype) out_grad_np = np.random.uniform(low=0.001, size=bshape).astype(dtype) - pool_grad_np = tvm.topi.testing.pool_grad_nchw(a_np, out_grad_np, pool_size=(kh, kw), - strides=(sh, sw), padding=padding, - pool_type=pool_type, ceil_mode=ceil_mode, - count_include_pad=count_include_pad) + pool_grad_np = tvm.topi.testing.pool_grad_nchw( + a_np, + out_grad_np, + pool_size=(kh, kw), + strides=(sh, sw), + padding=padding, + pool_type=pool_type, + ceil_mode=ceil_mode, + count_include_pad=count_include_pad, + ) if add_relu: - pool_grad_np = np.maximum(pool_grad_np, 0.) + pool_grad_np = np.maximum(pool_grad_np, 0.0) def check_device(device, ctx): print("Running on target: %s" % device) @@ -160,60 +197,62 @@ def verify_pool_grad(n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_inc for device, ctx in tvm.testing.enabled_targets(): check_device(device, ctx) + @tvm.testing.uses_gpu def test_pool(): """test cases of pool""" - verify_pool(1, 256, 32, 2, 2, [0, 0, 0, 0], 'avg', False, True) - verify_pool(1, 256, 31, 3, 3, [1, 2, 1, 2], 'avg', False, True) - verify_pool(1, 256, 32, 2, 2, [1, 2, 1, 2], 'avg', False, False) - verify_pool(1, 256, 31, 4, 4, [3, 3, 3, 3], 'avg', False, False) - verify_pool(1, 256, 31, 4, 4, [0, 0, 0, 0], 'avg', False, False) - verify_pool(1, 256, 32, 2, 2, [0, 0, 0, 0], 'max', False) - verify_pool(1, 256, 31, 3, 3, [2, 1, 2, 1], 'max', False) - verify_pool(1, 256, 31, 3, 3, [2, 1, 2, 1], 'max', True) - - verify_pool(1, 256, 31, 3, 3, [2, 1, 0, 3], 'avg', False, True) - verify_pool(1, 256, 32, 2, 2, [0, 3, 2, 1], 'avg', False, False) - verify_pool(1, 256, 31, 3, 3, [1, 0, 3, 2], 'max', False) - verify_pool(1, 256, 31, 3, 3, [3, 2, 1, 0], 'max', True) + verify_pool(1, 256, 32, 2, 2, [0, 0, 0, 0], "avg", False, True) + verify_pool(1, 256, 31, 3, 3, [1, 2, 1, 2], "avg", False, True) + verify_pool(1, 256, 32, 2, 2, [1, 2, 1, 2], "avg", False, False) + verify_pool(1, 256, 31, 4, 4, [3, 3, 3, 3], "avg", False, False) + verify_pool(1, 256, 31, 4, 4, [0, 0, 0, 0], "avg", False, False) + verify_pool(1, 256, 32, 2, 2, [0, 0, 0, 0], "max", False) + verify_pool(1, 256, 31, 3, 3, [2, 1, 2, 1], "max", False) + verify_pool(1, 256, 31, 3, 3, [2, 1, 2, 1], "max", True) + + verify_pool(1, 256, 31, 3, 3, [2, 1, 0, 3], "avg", False, True) + verify_pool(1, 256, 32, 2, 2, [0, 3, 2, 1], "avg", False, False) + verify_pool(1, 256, 31, 3, 3, [1, 0, 3, 2], "max", False) + verify_pool(1, 256, 31, 3, 3, [3, 2, 1, 0], "max", True) + @tvm.testing.uses_gpu def test_pool_grad(): """test cases of pool_grad""" - verify_pool_grad(1, 256, 32, 3, 2, [1, 1, 1, 1], 'avg', False, False) - verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], 'avg', False, True) - verify_pool_grad(1, 256, 31, 3, 3, [1, 2, 1, 2], 'avg', False, True) - verify_pool_grad(1, 256, 32, 2, 2, [1, 2, 1, 2], 'avg', False, False) - verify_pool_grad(1, 256, 31, 4, 4, [2, 2, 2, 2], 'avg', False, False) - verify_pool_grad(1, 256, 31, 4, 4, [0, 0, 0, 0], 'avg', False, False) - verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], 'max', False) - verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 2, 1], 'max', False) - verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 2, 1], 'max', True) - - verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 0, 3], 'avg', False, True) - verify_pool_grad(1, 256, 32, 2, 2, [0, 3, 2, 1], 'avg', False, False) - verify_pool_grad(1, 256, 31, 3, 3, [1, 0, 3, 2], 'max', False) - verify_pool_grad(1, 256, 31, 3, 3, [3, 2, 1, 0], 'max', True) - verify_pool_grad(1, 256, 32, 3, 2, [1, 1, 1, 1], 'max', False) - verify_pool_grad(1, 256, 32, 1, 2, [1, 1, 1, 1], 'avg', False, False) - - verify_pool_grad(1, 256, 31, 4, 4, [0, 0, 0, 0], 'avg', False, False, add_relu=True) - verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], 'max', False, add_relu=True) - - -def verify_global_pool(dshape, pool_type, layout='NCHW'): + verify_pool_grad(1, 256, 32, 3, 2, [1, 1, 1, 1], "avg", False, False) + verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], "avg", False, True) + verify_pool_grad(1, 256, 31, 3, 3, [1, 2, 1, 2], "avg", False, True) + verify_pool_grad(1, 256, 32, 2, 2, [1, 2, 1, 2], "avg", False, False) + verify_pool_grad(1, 256, 31, 4, 4, [2, 2, 2, 2], "avg", False, False) + verify_pool_grad(1, 256, 31, 4, 4, [0, 0, 0, 0], "avg", False, False) + verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], "max", False) + verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 2, 1], "max", False) + verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 2, 1], "max", True) + + verify_pool_grad(1, 256, 31, 3, 3, [2, 1, 0, 3], "avg", False, True) + verify_pool_grad(1, 256, 32, 2, 2, [0, 3, 2, 1], "avg", False, False) + verify_pool_grad(1, 256, 31, 3, 3, [1, 0, 3, 2], "max", False) + verify_pool_grad(1, 256, 31, 3, 3, [3, 2, 1, 0], "max", True) + verify_pool_grad(1, 256, 32, 3, 2, [1, 1, 1, 1], "max", False) + verify_pool_grad(1, 256, 32, 1, 2, [1, 1, 1, 1], "avg", False, False) + + verify_pool_grad(1, 256, 31, 4, 4, [0, 0, 0, 0], "avg", False, False, add_relu=True) + verify_pool_grad(1, 256, 32, 2, 2, [0, 0, 0, 0], "max", False, add_relu=True) + + +def verify_global_pool(dshape, pool_type, layout="NCHW"): """verify function of global_pool""" assert layout in ["NCHW", "NHWC"] - A = te.placeholder(shape=dshape, name='A') + A = te.placeholder(shape=dshape, name="A") B = topi.nn.global_pool(A, pool_type=pool_type, layout=layout) B = topi.nn.relu(B) a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype) - axis = (layout.find('H'), layout.find('W')) - if pool_type == 'avg': + axis = (layout.find("H"), layout.find("W")) + if pool_type == "avg": b_np = np.mean(a_np, axis=axis, keepdims=True) - elif pool_type == 'max': + elif pool_type == "max": b_np = np.max(a_np, axis=axis, keepdims=True) b_np = np.maximum(b_np, 0.0) @@ -234,17 +273,18 @@ def verify_global_pool(dshape, pool_type, layout='NCHW'): for device, ctx in tvm.testing.enabled_targets(): check_device(device, ctx) + @tvm.testing.uses_gpu def test_global_pool(): """test cases of global_pool""" - verify_global_pool((1, 1024, 7, 7), 'avg') - verify_global_pool((4, 1024, 7, 7), 'avg') - verify_global_pool((1, 1024, 7, 7), 'max') - verify_global_pool((4, 1024, 7, 7), 'max') - verify_global_pool((1, 7, 7, 1024), 'avg', 'NHWC') - verify_global_pool((4, 7, 7, 1024), 'avg', 'NHWC') - verify_global_pool((1, 7, 7, 1024), 'max', 'NHWC') - verify_global_pool((4, 7, 7, 1024), 'max', 'NHWC') + verify_global_pool((1, 1024, 7, 7), "avg") + verify_global_pool((4, 1024, 7, 7), "avg") + verify_global_pool((1, 1024, 7, 7), "max") + verify_global_pool((4, 1024, 7, 7), "max") + verify_global_pool((1, 7, 7, 1024), "avg", "NHWC") + verify_global_pool((4, 7, 7, 1024), "avg", "NHWC") + verify_global_pool((1, 7, 7, 1024), "max", "NHWC") + verify_global_pool((4, 7, 7, 1024), "max", "NHWC") def verify_adaptive_pool(dshape, out_size, pool_type, layout="NCHW", dtype="float32"): @@ -297,8 +337,9 @@ def test_adaptive_pool(): verify_adaptive_pool((1, 16, 32, 32, 32), (2, 4, 4), "max", layout="NDHWC") -def verify_pool3d(n, ic, ih, kh, sh, padding, pool_type, - ceil_mode, count_include_pad=True, layout='NCDHW'): +def verify_pool3d( + n, ic, ih, kh, sh, padding, pool_type, ceil_mode, count_include_pad=True, layout="NCDHW" +): """verify function of pool3d""" id = iw = ih kd = kw = kh @@ -306,17 +347,25 @@ def verify_pool3d(n, ic, ih, kh, sh, padding, pool_type, input_shape = (n, ic, id, ih, iw) kernel = [kd, kh, kw] stride = [sd, sh, sw] - A = te.placeholder(input_shape, name='A') - B = topi.nn.pool3d(A, kernel=kernel, stride=stride, padding=padding, - pool_type=pool_type, ceil_mode=ceil_mode, - layout=layout, count_include_pad=count_include_pad) + A = te.placeholder(input_shape, name="A") + B = topi.nn.pool3d( + A, + kernel=kernel, + stride=stride, + padding=padding, + pool_type=pool_type, + ceil_mode=ceil_mode, + layout=layout, + count_include_pad=count_include_pad, + ) B = topi.nn.relu(B) dtype = A.dtype output_shape = [int(i) for i in B.shape] input_np = np.random.uniform(low=0.001, size=input_shape).astype(dtype) - ref_np = tvm.topi.testing.pool3d_ncdhw_python(input_np, kernel, stride, padding, - output_shape, pool_type, count_include_pad, ceil_mode) + ref_np = tvm.topi.testing.pool3d_ncdhw_python( + input_np, kernel, stride, padding, output_shape, pool_type, count_include_pad, ceil_mode + ) def check_device(device, ctx): print("Running on target: %s" % device) @@ -337,38 +386,47 @@ def verify_pool3d(n, ic, ih, kh, sh, padding, pool_type, @tvm.testing.uses_gpu def test_pool3d(): """test cases of pool3d""" - verify_pool3d(1, 256, 32, 2, 2, [0, 0, 0, 0, 0, 0], 'avg', False, True) - verify_pool3d(1, 256, 31, 3, 3, [1, 1, 2, 2, 2, 1], 'avg', False, True) - verify_pool3d(1, 256, 32, 2, 2, [1, 1, 2, 2, 2, 1], 'avg', False, False) - verify_pool3d(1, 256, 31, 4, 4, [3, 3, 3, 3, 3, 3], 'avg', False, False) - verify_pool3d(1, 256, 31, 4, 4, [0, 0, 0, 0, 0, 0], 'avg', False, False) - verify_pool3d(1, 256, 32, 2, 2, [0, 0, 0, 0, 0, 0], 'max', False) - verify_pool3d(1, 256, 31, 3, 3, [2, 2, 1, 1, 1, 2], 'max', False) - verify_pool3d(1, 256, 31, 3, 3, [2, 2, 1, 1, 1, 2], 'max', True) - - verify_pool3d(1, 256, 31, 3, 3, [2, 1, 0, 5, 4, 3], 'avg', False, True) - verify_pool3d(1, 256, 32, 2, 2, [0, 5, 4, 3, 2, 1], 'avg', False, False) - verify_pool3d(1, 256, 31, 3, 3, [1, 0, 5, 4, 3, 2], 'max', False) - verify_pool3d(1, 256, 31, 3, 3, [3, 2, 1, 0, 5, 4], 'max', True) - - -def verify_pool1d(n, ic, iw, kw, sw, padding, pool_type, - ceil_mode, count_include_pad=True, layout='NCW'): + verify_pool3d(1, 256, 32, 2, 2, [0, 0, 0, 0, 0, 0], "avg", False, True) + verify_pool3d(1, 256, 31, 3, 3, [1, 1, 2, 2, 2, 1], "avg", False, True) + verify_pool3d(1, 256, 32, 2, 2, [1, 1, 2, 2, 2, 1], "avg", False, False) + verify_pool3d(1, 256, 31, 4, 4, [3, 3, 3, 3, 3, 3], "avg", False, False) + verify_pool3d(1, 256, 31, 4, 4, [0, 0, 0, 0, 0, 0], "avg", False, False) + verify_pool3d(1, 256, 32, 2, 2, [0, 0, 0, 0, 0, 0], "max", False) + verify_pool3d(1, 256, 31, 3, 3, [2, 2, 1, 1, 1, 2], "max", False) + verify_pool3d(1, 256, 31, 3, 3, [2, 2, 1, 1, 1, 2], "max", True) + + verify_pool3d(1, 256, 31, 3, 3, [2, 1, 0, 5, 4, 3], "avg", False, True) + verify_pool3d(1, 256, 32, 2, 2, [0, 5, 4, 3, 2, 1], "avg", False, False) + verify_pool3d(1, 256, 31, 3, 3, [1, 0, 5, 4, 3, 2], "max", False) + verify_pool3d(1, 256, 31, 3, 3, [3, 2, 1, 0, 5, 4], "max", True) + + +def verify_pool1d( + n, ic, iw, kw, sw, padding, pool_type, ceil_mode, count_include_pad=True, layout="NCW" +): """verify function of pool1d""" input_shape = (n, ic, iw) kernel = [kw] stride = [sw] - A = te.placeholder(input_shape, name='A') - B = topi.nn.pool1d(A, kernel=kernel, stride=stride, padding=padding, - pool_type=pool_type, ceil_mode=ceil_mode, - layout=layout, count_include_pad=count_include_pad) + A = te.placeholder(input_shape, name="A") + B = topi.nn.pool1d( + A, + kernel=kernel, + stride=stride, + padding=padding, + pool_type=pool_type, + ceil_mode=ceil_mode, + layout=layout, + count_include_pad=count_include_pad, + ) B = topi.nn.relu(B) dtype = A.dtype output_shape = [int(i) for i in B.shape] input_np = np.random.uniform(low=0.001, size=input_shape).astype(dtype) - ref_np = tvm.topi.testing.pool1d_ncw_python(input_np, kernel, stride, padding, - output_shape, pool_type, count_include_pad, ceil_mode) + ref_np = tvm.topi.testing.pool1d_ncw_python( + input_np, kernel, stride, padding, output_shape, pool_type, count_include_pad, ceil_mode + ) def check_device(device, ctx): print("Running on target: %s" % device) @@ -389,19 +447,19 @@ def verify_pool1d(n, ic, iw, kw, sw, padding, pool_type, @tvm.testing.uses_gpu def test_pool1d(): """test cases of pool1d""" - verify_pool1d(1, 256, 32, 2, 2, [0, 0], 'avg', False, True) - verify_pool1d(1, 256, 31, 3, 3, [1, 2], 'avg', False, True) - verify_pool1d(1, 256, 32, 2, 2, [1, 2], 'avg', False, False) - verify_pool1d(1, 256, 31, 4, 4, [3, 3], 'avg', False, False) - verify_pool1d(1, 256, 31, 4, 4, [0, 0], 'avg', False, False) - verify_pool1d(1, 256, 32, 2, 2, [0, 0], 'max', False) - verify_pool1d(1, 256, 31, 3, 3, [2, 1], 'max', False) - verify_pool1d(1, 256, 31, 3, 3, [2, 1], 'max', True) - - verify_pool1d(1, 256, 31, 3, 3, [2, 5], 'avg', False, True) - verify_pool1d(1, 256, 32, 2, 2, [0, 3], 'avg', False, False) - verify_pool1d(1, 256, 31, 3, 3, [1, 4], 'max', False) - verify_pool1d(1, 256, 31, 3, 3, [3, 0], 'max', True) + verify_pool1d(1, 256, 32, 2, 2, [0, 0], "avg", False, True) + verify_pool1d(1, 256, 31, 3, 3, [1, 2], "avg", False, True) + verify_pool1d(1, 256, 32, 2, 2, [1, 2], "avg", False, False) + verify_pool1d(1, 256, 31, 4, 4, [3, 3], "avg", False, False) + verify_pool1d(1, 256, 31, 4, 4, [0, 0], "avg", False, False) + verify_pool1d(1, 256, 32, 2, 2, [0, 0], "max", False) + verify_pool1d(1, 256, 31, 3, 3, [2, 1], "max", False) + verify_pool1d(1, 256, 31, 3, 3, [2, 1], "max", True) + + verify_pool1d(1, 256, 31, 3, 3, [2, 5], "avg", False, True) + verify_pool1d(1, 256, 32, 2, 2, [0, 3], "avg", False, False) + verify_pool1d(1, 256, 31, 3, 3, [1, 4], "max", False) + verify_pool1d(1, 256, 31, 3, 3, [3, 0], "max", True) if __name__ == "__main__": diff --git a/tests/python/unittest/test_crt.py b/tests/python/unittest/test_crt.py index fe6b03b..3fc2e04 100644 --- a/tests/python/unittest/test_crt.py +++ b/tests/python/unittest/test_crt.py @@ -37,105 +37,114 @@ from tvm.topi.testing import conv2d_nchw_python BUILD = True DEBUG = False -TARGET = tvm.target.target.micro('host') +TARGET = tvm.target.target.micro("host") + def _make_sess_from_op(workspace, op_name, sched, arg_bufs): - with tvm.transform.PassContext(opt_level=3, config={'tir.disable_vectorize': True}): - mod = tvm.build(sched, arg_bufs, TARGET, target_host=TARGET, name=op_name) + with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): + mod = tvm.build(sched, arg_bufs, TARGET, target_host=TARGET, name=op_name) - return _make_session(workspace, mod) + return _make_session(workspace, mod) def _make_session(workspace, mod): - compiler = tvm.micro.DefaultCompiler(target=TARGET) - opts = tvm.micro.default_options(os.path.join(tvm.micro.CRT_ROOT_DIR, 'host')) - - micro_binary = tvm.micro.build_static_runtime( - # the x86 compiler *expects* you to give the exact same dictionary for both - # lib_opts and bin_opts. so the library compiler is mutating lib_opts and - # the binary compiler is expecting those mutations to be in bin_opts. - # TODO(weberlo) fix this very bizarre behavior - workspace, compiler, mod, lib_opts=opts['bin_opts'], bin_opts=opts['bin_opts']) - - flasher_kw = { - 'debug': DEBUG, - } - flasher = compiler.flasher(**flasher_kw) - return tvm.micro.Session(binary=micro_binary, flasher=flasher) + compiler = tvm.micro.DefaultCompiler(target=TARGET) + opts = tvm.micro.default_options(os.path.join(tvm.micro.CRT_ROOT_DIR, "host")) + + micro_binary = tvm.micro.build_static_runtime( + # the x86 compiler *expects* you to give the exact same dictionary for both + # lib_opts and bin_opts. so the library compiler is mutating lib_opts and + # the binary compiler is expecting those mutations to be in bin_opts. + # TODO(weberlo) fix this very bizarre behavior + workspace, + compiler, + mod, + lib_opts=opts["bin_opts"], + bin_opts=opts["bin_opts"], + ) + + flasher_kw = { + "debug": DEBUG, + } + flasher = compiler.flasher(**flasher_kw) + return tvm.micro.Session(binary=micro_binary, flasher=flasher) def _make_add_sess(workspace): - A = tvm.te.placeholder((2,), dtype='int8') - B = tvm.te.placeholder((1,), dtype='int8') - C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name='C') - sched = tvm.te.create_schedule(C.op) - return _make_sess_from_op(workspace, 'add', sched, [A, B, C]) + A = tvm.te.placeholder((2,), dtype="int8") + B = tvm.te.placeholder((1,), dtype="int8") + C = tvm.te.compute(A.shape, lambda i: A[i] + B[0], name="C") + sched = tvm.te.create_schedule(C.op) + return _make_sess_from_op(workspace, "add", sched, [A, B, C]) def _make_ident_sess(workspace): - A = tvm.te.placeholder((2,), dtype='int8') - B = tvm.te.compute(A.shape, lambda i: A[i], name='B') - sched = tvm.te.create_schedule(B.op) - return _make_sess_from_op(workspace, 'ident', sched, [A, B]) + A = tvm.te.placeholder((2,), dtype="int8") + B = tvm.te.compute(A.shape, lambda i: A[i], name="B") + sched = tvm.te.create_schedule(B.op) + return _make_sess_from_op(workspace, "ident", sched, [A, B]) def test_compile_runtime(): - """Test compiling the on-device runtime.""" - workspace = tvm.micro.Workspace() + """Test compiling the on-device runtime.""" + workspace = tvm.micro.Workspace() - with _make_add_sess(workspace) as sess: - A_data = tvm.nd.array(np.array([2, 3], dtype='int8'), ctx=sess.context) - assert (A_data.asnumpy() == np.array([2, 3])).all() - B_data = tvm.nd.array(np.array([4], dtype='int8'), ctx=sess.context) - assert (B_data.asnumpy() == np.array([4])).all() - C_data = tvm.nd.array(np.array([0, 0], dtype='int8'), ctx=sess.context) - assert (C_data.asnumpy() == np.array([0, 0])).all() + with _make_add_sess(workspace) as sess: + A_data = tvm.nd.array(np.array([2, 3], dtype="int8"), ctx=sess.context) + assert (A_data.asnumpy() == np.array([2, 3])).all() + B_data = tvm.nd.array(np.array([4], dtype="int8"), ctx=sess.context) + assert (B_data.asnumpy() == np.array([4])).all() + C_data = tvm.nd.array(np.array([0, 0], dtype="int8"), ctx=sess.context) + assert (C_data.asnumpy() == np.array([0, 0])).all() - system_lib = sess.get_system_lib() - system_lib.get_function('add')(A_data, B_data, C_data) - assert (C_data.asnumpy() == np.array([6, 7])).all() + system_lib = sess.get_system_lib() + system_lib.get_function("add")(A_data, B_data, C_data) + assert (C_data.asnumpy() == np.array([6, 7])).all() def test_reset(): - """Test when the remote end resets during a session.""" - workspace = tvm.micro.Workspace() + """Test when the remote end resets during a session.""" + workspace = tvm.micro.Workspace() - with _make_add_sess(workspace) as sess: - try: - sess._rpc.get_function('tvm.testing.reset_server')() - assert False, 'expected to raise SessionTerminatedError; did not raise' - except transport.SessionTerminatedError: - pass + with _make_add_sess(workspace) as sess: + try: + sess._rpc.get_function("tvm.testing.reset_server")() + assert False, "expected to raise SessionTerminatedError; did not raise" + except transport.SessionTerminatedError: + pass def test_graph_runtime(): - """Test use of the graph runtime with microTVM.""" - workspace = tvm.micro.Workspace() - relay_mod = tvm.parser.fromtext( - """ + """Test use of the graph runtime with microTVM.""" + workspace = tvm.micro.Workspace() + relay_mod = tvm.parser.fromtext( + """ #[version = "0.0.5"] def @main(%a : Tensor[(1, 2), uint8], %b : Tensor[(1, 2), uint8]) { %0 = %a + %b; %0 - }""") + }""" + ) - with tvm.transform.PassContext(opt_level=3, config={'tir.disable_vectorize': True}): - factory = tvm.relay.build(relay_mod, target=TARGET) + with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}): + factory = tvm.relay.build(relay_mod, target=TARGET) - with _make_session(workspace, factory.get_lib()) as sess: - graph_mod = tvm.micro.create_local_graph_runtime(factory.get_json(), sess.get_system_lib(), sess.context) - A_data = tvm.nd.array(np.array([2, 3], dtype='uint8'), ctx=sess.context) - assert (A_data.asnumpy() == np.array([2, 3])).all() - B_data = tvm.nd.array(np.array([4, 7], dtype='uint8'), ctx=sess.context) - assert (B_data.asnumpy() == np.array([4, 7])).all() + with _make_session(workspace, factory.get_lib()) as sess: + graph_mod = tvm.micro.create_local_graph_runtime( + factory.get_json(), sess.get_system_lib(), sess.context + ) + A_data = tvm.nd.array(np.array([2, 3], dtype="uint8"), ctx=sess.context) + assert (A_data.asnumpy() == np.array([2, 3])).all() + B_data = tvm.nd.array(np.array([4, 7], dtype="uint8"), ctx=sess.context) + assert (B_data.asnumpy() == np.array([4, 7])).all() - graph_mod.run(a=A_data, b=B_data) + graph_mod.run(a=A_data, b=B_data) - out = graph_mod.get_output(0) - assert (out.asnumpy() == np.array([6, 10])).all() + out = graph_mod.get_output(0) + assert (out.asnumpy() == np.array([6, 10])).all() -if __name__ == '__main__': - test_compile_runtime() - test_reset() - test_graph_runtime() +if __name__ == "__main__": + test_compile_runtime() + test_reset() + test_graph_runtime() diff --git a/tests/python/unittest/test_runtime_rpc.py b/tests/python/unittest/test_runtime_rpc.py index bb0a4e0..d25eff2 100644 --- a/tests/python/unittest/test_runtime_rpc.py +++ b/tests/python/unittest/test_runtime_rpc.py @@ -159,6 +159,7 @@ def test_rpc_echo(): check(rpc.LocalSession()) check(client) + def check_minrpc(): if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None: return @@ -175,8 +176,10 @@ def test_rpc_echo(): session_constructor_args=["rpc.PopenSession", open(minrpc_exec, "rb").read()], ) check(client) + check_minrpc() + def test_rpc_file_exchange(): if not tvm.runtime.enabled("rpc"): return diff --git a/tutorials/micro/micro_tflite.py b/tutorials/micro/micro_tflite.py index 0cd6a4f..6fd2de1 100644 --- a/tutorials/micro/micro_tflite.py +++ b/tutorials/micro/micro_tflite.py @@ -157,7 +157,9 @@ mod, params = relay.frontend.from_tflite( # TARGET = tvm.target.target.micro("host") -with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True},disabled_pass=["FuseOps"]): +with tvm.transform.PassContext( + opt_level=3, config={"tir.disable_vectorize": True}, disabled_pass=["FuseOps"] +): graph, c_mod, c_params = relay.build(mod, target=TARGET, params=params) @@ -173,11 +175,16 @@ compiler = tvm.micro.DefaultCompiler(target=TARGET) opts = tvm.micro.default_options(os.path.join(tvm.micro.CRT_ROOT_DIR, "host")) micro_binary = tvm.micro.build_static_runtime( - # the x86 compiler *expects* you to give the exact same dictionary for both - # lib_opts and bin_opts. so the library compiler is mutating lib_opts and - # the binary compiler is expecting those mutations to be in bin_opts. - # TODO(weberlo) fix this very bizarre behavior - workspace, compiler, c_mod, lib_opts=opts["bin_opts"], bin_opts=opts["bin_opts"]) + # the x86 compiler *expects* you to give the exact same dictionary for both + # lib_opts and bin_opts. so the library compiler is mutating lib_opts and + # the binary compiler is expecting those mutations to be in bin_opts. + # TODO(weberlo) fix this very bizarre behavior + workspace, + compiler, + c_mod, + lib_opts=opts["bin_opts"], + bin_opts=opts["bin_opts"], +) ###################################################################### @@ -191,7 +198,8 @@ micro_binary = tvm.micro.build_static_runtime( flasher = compiler.flasher() with tvm.micro.Session(binary=micro_binary, flasher=flasher) as session: graph_mod = tvm.micro.create_local_graph_runtime( - graph, session.get_system_lib(), session.context) + graph, session.get_system_lib(), session.context + ) # Set the model parameters using the lowered parameters produced by `relay.build`. graph_mod.set_input(**c_params) @@ -203,4 +211,4 @@ with tvm.micro.Session(binary=micro_binary, flasher=flasher) as session: graph_mod.run() tvm_output = graph_mod.get_output(0).asnumpy() - print("result is: "+str(tvm_output)) + print("result is: " + str(tvm_output)) -- 2.7.4