import numpy as np
from tvm.te import schedule, thread_axis
+from tvm.tir import expr
from tvm.autotvm.util import get_const_int
Axis = namedtuple('Axis', ['space', 'index'])
Parameters
---------
- flop: int or float
+ flop: int or float or IntImm or FloatImm
number of float operations
"""
- self.flop += flop
+ if isinstance(flop, (expr.IntImm, expr.FloatImm)):
+ flop = flop.value
+ self.flop += float(flop)
def raise_error(self, msg):
"""register error in config
"""Compute definition for conv2d with cuda backend"""
from tvm import te
from tvm import autotvm
+from tvm.autotvm.task.space import OtherOptionEntity
from tvm.contrib import cudnn
from .. import nn, generic
else:
dtype = data.dtype
+ cfg.define_knob('algo', range(8))
+ if cfg.is_fallback: # Let CUDNN choose the best algo
+ cfg['algo'] = OtherOptionEntity(-1)
+
return cudnn.conv_forward(data,
kernel,
[pt, pl], # cudnn padding pt, pl on both sides of input
[dilation_h, dilation_w],
conv_mode=1,
tensor_format=tensor_format,
- algo=-1, # let CUDNN choose the best algo
+ algo=cfg['algo'].val,
conv_dtype=dtype,
groups=groups)