# tvm.runtime
from .runtime.object import Object
from .runtime.ndarray import context, cpu, gpu, opencl, cl, vulkan, metal, mtl
-from .runtime.ndarray import vpi, rocm, opengl, ext_dev, micro_dev, hexagon
+from .runtime.ndarray import vpi, rocm, ext_dev, micro_dev, hexagon
from .runtime import ndarray as nd
# tvm.error
8 : 'metal',
9 : 'vpi',
10: 'rocm',
- 11: 'opengl',
12: 'ext_dev',
13: 'micro_dev',
14: 'hexagon',
'metal': 8,
'vpi': 9,
'rocm': 10,
- 'opengl': 11,
'ext_dev': 12,
'micro_dev': 13,
'hexagon': 14,
from . import hls
from . import mali
from . import bifrost
-from . import opengl
from . import rocm
from . import intel_graphics
+++ /dev/null
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-"""Definition of OpenGL operator strategy."""
-# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
-import topi
-from .generic import *
-from .. import op as _op
-
-@schedule_injective.register("opengl")
-def schedule_injective_opengl(attrs, outs, target):
- """schedule injective ops for opengl"""
- with target:
- return topi.opengl.schedule_injective(outs)
-
-@schedule_concatenate.register("opengl")
-def schedule_concatenate_opengl(attrs, outs, target):
- """schedule concatenate for opengl"""
- with target:
- return topi.opengl.schedule_injective(outs)
-
-@schedule_pool.register("opengl")
-def schedule_pool_opengl(attrs, outs, target):
- """schedule pooling ops for opengl"""
- with target:
- return topi.opengl.schedule_pool(outs, attrs.layout)
-
-@schedule_adaptive_pool.register("opengl")
-def schedule_adaptive_pool_opengl(attrs, outs, target):
- """schedule adative pooling ops for opengl"""
- with target:
- return topi.opengl.schedule_adaptive_pool(outs)
-
-@softmax_strategy.register("opengl")
-def softmax_strategy_opengl(attrs, inputs, out_type, target):
- """softmax opengl strategy"""
- strategy = _op.OpStrategy()
- strategy.add_implementation(
- wrap_compute_softmax(topi.nn.softmax),
- wrap_topi_schedule(topi.opengl.schedule_softmax),
- name="softmax.opengl")
- return strategy
-
-@schedule_log_softmax.register("opengl")
-def schedule_log_softmax_opengl(attrs, outs, target):
- """schedule log_softmax for opengl"""
- with target:
- return topi.opengl.schedule_softmax(outs)
-
-@conv2d_strategy.register("opengl")
-def conv2d_strategy_opengl(attrs, inputs, out_type, target):
- """conv2d opengl strategy"""
- strategy = _op.OpStrategy()
- groups = attrs.groups
- layout = attrs.data_layout
- assert groups == 1, "Don't support group conv2d on OpenGL"
- assert layout == "NCHW", "Only support conv2d layout NCHW for OpenGL"
- strategy.add_implementation(wrap_compute_conv2d(topi.nn.conv2d),
- wrap_topi_schedule(topi.opengl.schedule_conv2d_nchw),
- name="conv2d_nchw.opengl")
- return strategy
-
-@dense_strategy.register("opengl")
-def dense_strategy_opengl(attrs, inputs, out_type, target):
- """dense opengl strategy"""
- strategy = _op.OpStrategy()
- strategy.add_implementation(wrap_compute_dense(topi.nn.dense),
- wrap_topi_schedule(topi.opengl.schedule_dense),
- name="dense.opengl")
- return strategy
"""Construct Metal device."""
return self.context(8, dev_id)
- def opengl(self, dev_id=0):
- """Construct OpenGL device."""
- return self.context(11, dev_id)
-
def ext_dev(self, dev_id=0):
"""Construct extension device."""
return self.context(12, dev_id)
# function exposures
from .object_generic import convert_to_object, convert, const
from .ndarray import context, cpu, gpu, opencl, cl, vulkan, metal, mtl
-from .ndarray import vpi, rocm, opengl, ext_dev, micro_dev
+from .ndarray import vpi, rocm, ext_dev, micro_dev
from .module import load_module, enabled, system_lib
from .container import String
return TVMContext(7, dev_id)
-def opengl(dev_id=0):
- """Construct a OpenGL device
-
- Parameters
- ----------
- dev_id : int, optional
- The integer device id
-
- Returns
- -------
- ctx : TVMContext
- The created context
- """
- return TVMContext(11, dev_id)
-
-
def ext_dev(dev_id=0):
"""Construct a extension device
We can also use other specific function in this module to create specific targets.
"""
from .target import Target, create
-from .target import cuda, rocm, mali, intel_graphics, opengl, arm_cpu, rasp, vta, bifrost, hexagon
+from .target import cuda, rocm, mali, intel_graphics, arm_cpu, rasp, vta, bifrost, hexagon
from .generic_func import GenericFunc
from .generic_func import generic_func, get_native_generic_func, override_native_generic_func
from . import datatype
return _ffi_api.TargetCreate("opencl", *opts)
-def opengl(model='unknown', options=None):
- """Returns a OpenGL target.
-
- Parameters
- ----------
- options : str or list of str
- Additional options
- """
- opts = _merge_opts(["-model=%s" % model], options)
- return _ffi_api.TargetCreate("opengl", *opts)
-
-
def arm_cpu(model='unknown', options=None):
"""Returns a ARM CPU target.
This function will also download pre-tuned op parameters when there is none.
"""
_ffi_api.StageDoubleBuffer(self)
- def opengl(self):
- """The special OpenGL schedule
-
- Maps each output element to a pixel.
- """
- _ffi_api.StageOpenGL(self)
-
@tvm._ffi.register_object
class SpecializedCondition(Object):
from . import mali
from . import bifrost
from . import intel_graphics
-from . import opengl
from . import util
from . import rocm
from . import vision
+++ /dev/null
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# pylint: disable=redefined-builtin, wildcard-import
-"""CUDA specific declaration and schedules."""
-from __future__ import absolute_import as _abs
-
-from .conv2d_nchw import schedule_conv2d_nchw
-from .injective import schedule_injective, schedule_elemwise, schedule_broadcast
-from .softmax import schedule_softmax
-from .dense import schedule_dense
-from .pooling import schedule_pool, schedule_adaptive_pool
+++ /dev/null
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#pylint: disable=invalid-name, no-member, too-many-locals, too-many-statements, too-many-arguments, too-many-branches, line-too-long
-"""Schedule for conv2d_nchw with auto fusion"""
-import tvm
-from tvm import te
-from .. import tag
-
-def schedule_conv2d_nchw(outs):
- """Schedule for conv2d_nchw.
-
- Parameters
- ----------
- outs: Array of Tensor
- The computation graph description of conv2d_nchw
- in the format of an array of tensors.
-
- Returns
- -------
- s: Schedule
- The computation schedule for conv2d_nchw.
- """
- outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
- s = te.create_schedule([x.op for x in outs])
- scheduled_ops = []
-
- def _schedule(conv2d, data):
- if conv2d.op in s.outputs:
- Out = conv2d
- else:
- Out = outs[0].op.output(0)
- s[conv2d].opengl()
- s[Out].opengl()
- s[data].opengl()
-
- def traverse(OP):
- """Internal traverse function"""
- # inline all one-to-one-mapping operators except the last stage (output)
- if tag.is_broadcast(OP.tag):
- if OP not in s.outputs:
- s[OP].opengl()
- for tensor in OP.input_tensors:
- if isinstance(tensor.op, tvm.te.ComputeOp) and tensor.op not in scheduled_ops:
- traverse(tensor.op)
- # schedule conv2d_nchw
- elif OP.tag.startswith('conv2d_nchw'):
- conv2d = OP.output(0)
- data = OP.input_tensors[0]
- kernel = OP.input_tensors[1]
- if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
- s[kernel].compute_inline()
- _schedule(conv2d, data)
- else:
- raise RuntimeError("Unsupported operator: %s" % OP.tag)
-
- scheduled_ops.append(OP)
-
- traverse(outs[0].op)
- return s
+++ /dev/null
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# pylint: disable=invalid-name, unused-variable
-"""Schedule for dense operator"""
-from tvm import te
-from .. import tag
-
-def schedule_dense(outs):
- """Schedule for dense operator.
-
- Parameters
- ----------
- outs: Array of Tensor
- The computation graph description of dense
- in the format of an array of tensors.
-
- Returns
- -------
- s: Schedule
- The computation schedule for dense.
- """
- outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
- s = te.create_schedule([x.op for x in outs])
- scheduled_ops = []
-
- def _schedule(Dense):
- if Dense.op in s.outputs:
- Out = Dense
- else:
- Out = outs[0].op.output(0)
- s[Dense].opengl()
- s[Out].opengl()
-
- def traverse(OP):
- """Internal traverse function"""
- # inline all one-to-one-mapping operators except the last stage (output)
- if tag.is_broadcast(OP.tag):
- if OP not in s.outputs:
- s[OP].compute_inline()
- for tensor in OP.input_tensors:
- if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
- traverse(tensor.op)
- # schedule dense
- elif OP.tag == 'dense':
- Dense = OP.output(0)
- _schedule(Dense)
- else:
- raise RuntimeError("Unsupported operator: %s" % OP.tag)
-
- scheduled_ops.append(OP)
-
- traverse(outs[0].op)
- return s
+++ /dev/null
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# pylint: disable=invalid-name, unused-variable,
-"""Schedule for composition of injective operator"""
-from tvm import te
-
-def schedule_injective_from_existing(sch, out):
- """Schedule for injective op from existing schedule.
-
- Parameters
- ----------
- sch: Schedule
- The schedule to update.
- out: Tensor
- The tensor representing the injective op.
-
- Returns
- -------
- sch: Schedule
- The updated schedule.
- """
- sch[out].opengl()
- return sch
-
-def schedule_injective(outs):
- """Schedule for injective op.
-
- Parameters
- ----------
- outs: Array of Tensor
- The computation graph description of injective in the format
- of an array of tensors.
-
- Returns
- -------
- sch: Schedule
- The computation schedule for the op.
- """
- outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
- s = te.create_schedule([x.op for x in outs])
-
- te.schedule.AutoInlineInjective(s)
- for out in outs:
- schedule_injective_from_existing(s, out)
- return s
-
-schedule_elemwise = schedule_injective
-schedule_broadcast = schedule_injective
+++ /dev/null
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# pylint: disable=invalid-name, unused-variable, unused-argument
-"""Schedule for pooling operators"""
-from tvm import te
-from .. import tag
-
-def schedule_adaptive_pool(outs):
- """Schedule for adaptive pool.
-
- Parameters
- ----------
- outs: Array of Tensor
- The computation graph description of global_pool
- in the format of an array of tensors.
-
- Returns
- -------
- s: Schedule
- The computation schedule for adaptive pool.
- """
- outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
- s = te.create_schedule([x.op for x in outs])
- scheduled_ops = []
-
- def _schedule(Pool):
- if Pool.op in s.outputs:
- Out = Pool
- else:
- Out = outs[0].op.output(0)
- s[Pool].opengl()
- s[Out].opengl()
-
- def traverse(OP):
- """Internal traverse function"""
- # inline all one-to-one-mapping operators except the last stage (output)
- if tag.is_broadcast(OP.tag):
- if OP not in s.outputs:
- s[OP].opengl()
- for tensor in OP.input_tensors:
- if isinstance(tensor.op, te.tensor.ComputeOp) and tensor.op not in scheduled_ops:
- traverse(tensor.op)
- # schedule global_pool
- elif OP.tag.startswith('adaptive_pool'):
- Pool = OP.output(0)
- _schedule(Pool)
- else:
- raise RuntimeError("Unsupported operator: %s" % OP.tag)
-
- scheduled_ops.append(OP)
-
- traverse(outs[0].op)
- return s
-
-
-def schedule_pool(outs, layout):
- """Schedule for pool.
-
- Parameters
- ----------
- outs: Array of Tensor
- The computation graph description of pool
- in the format of an array of tensors.
-
- layout: str
- Data layout.
-
- Returns
- -------
- s: Schedule
- The computation schedule for pool.
- """
- outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
- s = te.create_schedule([x.op for x in outs])
- scheduled_ops = []
-
- def _schedule(PaddedInput, Pool):
- if isinstance(PaddedInput.op, te.tensor.ComputeOp):
- s[PaddedInput].opengl()
- if Pool.op in s.outputs:
- Out = Pool
- else:
- Out = outs[0].op.output(0)
- s[Pool].opengl()
- s[Out].opengl()
-
- def traverse(OP):
- """Internal traverse function"""
- # inline all one-to-one-mapping operators except the last stage (output)
- if tag.is_broadcast(OP.tag):
- if OP not in s.outputs:
- s[OP].compute_inline()
- for tensor in OP.input_tensors:
- if tensor.op not in scheduled_ops and isinstance(tensor.op, te.tensor.ComputeOp):
- traverse(tensor.op)
- # schedule pool
- elif OP.tag.startswith('pool'):
- PaddedInput = OP.input_tensors[0]
- Pool = OP.output(0)
- _schedule(PaddedInput, Pool)
- else:
- raise RuntimeError("Unsupported operator: %s" % OP.tag)
-
- scheduled_ops.append(OP)
-
- traverse(outs[0].op)
- return s
+++ /dev/null
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# pylint: disable=invalid-name, unused-variable, trailing-whitespace
-"""Schedule for softmax operator"""
-from tvm import te
-
-def schedule_softmax(outs):
- """Schedule for softmax op.
-
- Parameters
- ----------
- outs: Array of Tensor
- The computation graph description of softmax in the format
- of an array of tensors.
-
- Returns
- -------
- sch: Schedule
- The computation schedule for the op.
- """
- outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs
- s = te.create_schedule([x.op for x in outs])
- softmax = outs[0]
-
- op_tag = softmax.op.tag
- if op_tag == 'softmax_output':
- expsum = softmax.op.input_tensors[1]
- exp = softmax.op.input_tensors[0]
- max_elem = s[exp].op.input_tensors[1]
- elif op_tag == 'log_softmax_output':
- exp = None
- max_elem = softmax.op.input_tensors[1]
- expsum = softmax.op.input_tensors[2]
- else:
- raise ValueError('Tag is expected to be softmax_output or log_softmax_output. \
- Got {0}'.format(op_tag))
-
- if exp is not None:
- s[exp].opengl()
-
- s[max_elem].opengl()
- s[expsum].opengl()
- s[softmax].opengl()
- return s
"arm_cpu": topi.arm_cpu.schedule_injective,
"gpu": topi.cuda.schedule_injective,
"hls": topi.hls.schedule_injective,
- "opengl": topi.opengl.schedule_injective
}
_reduce_schedule = {
topi.mali.schedule_conv2d_nchw_spatial_pack),
"bifrost": (topi.bifrost.conv2d_nchw_spatial_pack,
topi.bifrost.schedule_conv2d_nchw_spatial_pack),
- "opengl": (topi.nn.conv2d_nchw, topi.opengl.schedule_conv2d_nchw),
"intel_graphics": (topi.intel_graphics.conv2d_nchw,
topi.intel_graphics.schedule_conv2d_nchw),
"hls": (topi.nn.conv2d_nchw, topi.hls.schedule_conv2d_nchw)
(topi.cuda.dense_large_batch, topi.cuda.schedule_dense_large_batch)],
"mali": [(topi.mali.dense, topi.mali.schedule_dense)],
"bifrost": [(topi.bifrost.dense, topi.bifrost.schedule_dense)],
- "opengl": [(topi.nn.dense, topi.opengl.schedule_dense)],
"rocm": [(topi.rocm.dense, topi.rocm.schedule_dense)],
"hls": [(topi.nn.dense, topi.hls.schedule_dense)],
}
"cpu": topi.x86.schedule_softmax,
"gpu": topi.cuda.schedule_softmax,
"hls": topi.hls.schedule_softmax,
- "opengl": topi.opengl.schedule_softmax,
}
def check_device(A, B, a_np, b_np, device, name):