from ..expr import RefCreate, RefRead, RefWrite
from ..expr_functor import ExprFunctor
from ..adt import Match, Clause
+from ..op.tensor import minimum as _minimum, maximum as _maximum
from .common import AttrCvt, Renamer
from .common import get_relay_op, new_var, infer_shape, infer_channels
return _vision.roi_align(x, rois, [output_height, output_width],
spatial_scale, sampling_ratio)
+class Clip(OnnxOpConverter):
+ """Operator converter for Clip.
+ """
+ @staticmethod
+ def convert_attributes(inputs, attr, params):
+ convert = AttrCvt('clip', transforms={'min': 'a_min', 'max': 'a_max'})
+ return convert(inputs, attr, params)
+
+ @classmethod
+ def _impl_v1(cls, inputs, attr, params):
+ return Clip.convert_attributes(inputs, attr, params)
+
+ @classmethod
+ def _impl_v11(cls, inputs, attr, params):
+ if 'min' in attr and 'max' in attr:
+ return Clip.convert_attributes(inputs, attr, params)
+
+ assert len(inputs) <= 3, "Clip-11 takes up to 3 inputs, input, min, max"
+ result = inputs[0]
+ for i, op in enumerate([_maximum, _minimum]):
+ if i < len(inputs) - 1:
+ result = op(result, inputs[i+1])
+ return result
+
# compatible operators that do NOT require any conversion.
_identity_list = []
'Min': Minimum.get_converter(opset),
'Sum': Sum.get_converter(opset),
'Mean': Mean.get_converter(opset),
- 'Clip': AttrCvt('clip', transforms={'min': 'a_min', 'max': 'a_max'}),
+ 'Clip': Clip.get_converter(opset),
# softmax default axis is different in onnx
'Softmax': Softmax.get_converter(opset),
'LogSoftmax': AttrCvt('log_softmax', {'axis': ('axis', 1)}),
tvm_out = get_tvm_output(model, x, target, ctx, out_shape, dtype)
tvm.testing.assert_allclose(c2_out, tvm_out, rtol=1e-5, atol=1e-5)
+def make_constant_node(name, data_type, dims, vals):
+ return helper.make_node('Constant',
+ inputs=[],
+ outputs=[name],
+ value=helper.make_tensor(name=name,
+ data_type=data_type,
+ dims=dims,
+ vals=vals))
@tvm.testing.uses_gpu
def test_reshape():
@tvm.testing.uses_gpu
+def test_clip_min_max_as_inputs():
+ input_shape=(2,4,5,6)
+ nodes = [
+ make_constant_node('min', onnx.TensorProto.FLOAT, (), [0.]),
+ make_constant_node('max', onnx.TensorProto.FLOAT, (), [6.]),
+ ]
+ input_names = ['in', 'min', 'max']
+ nodes.append(helper.make_node(
+ 'Clip',
+ inputs=input_names,
+ outputs=['out']))
+ graph = helper.make_graph(nodes,
+ "clip_test",
+ inputs=[helper.make_tensor_value_info("in",
+ TensorProto.FLOAT, list(input_shape))],
+ outputs=[helper.make_tensor_value_info("out",
+ TensorProto.FLOAT, list(input_shape))])
+ model = helper.make_model(graph, producer_name='clip_test')
+
+ indata = np.random.uniform(-1, 7, size=input_shape).astype('float32')
+ onnx_out = get_onnxruntime_output(model, indata, 'float32')
+ for target, ctx in tvm.testing.enabled_targets():
+ tvm_out = get_tvm_output(
+ model, indata, target, ctx, input_shape, 'float32')
+ tvm.testing.assert_allclose(onnx_out, tvm_out)
+
+
+@tvm.testing.uses_gpu
def test_round():
_test_onnx_op_elementwise((2, 4, 5, 6), np.round, {}, 'float32', 'Round', {})
@tvm.testing.uses_gpu
def test_resize():
- def make_constant_node(name, data_type, dims, vals):
- return helper.make_node('Constant',
- inputs=[],
- outputs=[name],
- value=helper.make_tensor(name=name,
- data_type=data_type,
- dims=dims,
- vals=vals))
-
def verify(ishape, oshape, scales, mode, coord_trans):
nodes = [
make_constant_node('roi', onnx.TensorProto.FLOAT, (0,), []),
test_isinf()
test_isnan()
test_clip()
+ test_clip_min_max_as_inputs()
test_onehot()
test_matmul()
test_batch_matmul()