def _mx_adaptive_avg_pooling(inputs, attrs):
output_size = attrs.get_int_tuple("output_size", [])
- return _op.contrib.adaptive_avg_pool2d(inputs[0], output_size)
+ return _op.nn.adaptive_avg_pool2d(inputs[0], output_size)
def _mx_dropout(inputs, attrs):
data = inputs[0]
output_size = _infer_shape(inputs[1])
- return _op.contrib.contrib.adaptive_avg_pool2d(
+ return _op.nn.adaptive_avg_pool2d(
data,
output_size=output_size)
return _impl
data = inputs[0]
output_size = _infer_shape(inputs[1])
- return _op.contrib.contrib.adaptive_max_pool2d(
+ return _op.nn.adaptive_max_pool2d(
data,
output_size=output_size)
return _impl
from . import memory
from . import image
from . import vision
-from . import contrib
from . import op_attrs
register_injective_schedule("right_shift")
register_injective_schedule("left_shift")
register_injective_schedule("shape_of")
+register_injective_schedule("ndarray_size")
+
# zeros
@register_compute("zeros")
# pylint: disable=wildcard-import
"""Neural network related operators."""
from __future__ import absolute_import as _abs
-from .contrib import *
-from . import _contrib
+++ /dev/null
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-# pylint: disable=invalid-name, unused-argument
-"""Backend compiler related feature registration"""
-from __future__ import absolute_import
-
-from .. import op as reg
-from .. import strategy
-from ..op import OpPattern
-
-
-# adaptive_max_pool2d
-reg.register_schedule("contrib.adaptive_max_pool2d", strategy.schedule_adaptive_pool)
-reg.register_pattern("contrib.adaptive_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
-
-
-# adaptive_avg_pool2d
-reg.register_schedule("contrib.adaptive_avg_pool2d", strategy.schedule_adaptive_pool)
-reg.register_pattern("contrib.adaptive_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
-
-# relay.contrib.ndarray_size
-reg.register_injective_schedule("contrib.ndarray_size")
+++ /dev/null
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-"""Constructor APIs"""
-import tvm._ffi
-
-tvm._ffi._init_api("relay.op.contrib._make", __name__)
#pylint: disable=invalid-name, too-many-lines
"""Contrib operations."""
from __future__ import absolute_import as _abs
-from . import _make
-
-
-def adaptive_max_pool2d(data,
- output_size=None,
- layout="NCHW"):
- r"""2D adaptive max pooling operator. This operator is experimental.
-
- This operator takes data as input and does 2D max value calculation
- across each window represented by WxH.
-
-
- In the default case, where the data_layout is `NCHW`
- a data Tensor with shape `(batch_size, in_channels, height, width)`,
- to produce an output Tensor with shape
- (batch_size, in_channels, output_height, output_width).
-
- The pooling kernel and stride sizes are automatically chosen for
- desired output sizes.
-
- For output_size:
- If this argument is not provided, input height and width will be used
- as output height and width.
-
- If a single integer is provided for output_size, the output size is
- (N x C x output_size x output_size) for any input (NCHW).
-
- If a tuple of integers (height, width) are provided for output_size,
- the output size is (N x C x height x width) for any input (NCHW).
-
- Parameters
- ----------
- data : tvm.relay.Expr
- The input data to the operator.
-
- output_size : tuple of int. optional
- Output height and width.
-
- layout : str, optional
- Layout of the input.
-
- Returns
- -------
- result : tvm.relay.Expr
- The computed result.
- """
- output_size = [] or output_size
- return _make.adaptive_max_pool2d(data, output_size, layout)
-
-def adaptive_avg_pool2d(data,
- output_size=None,
- layout="NCHW"):
- r"""2D adaptive average pooling operator. This operator is experimental.
-
- This operator takes data as input and does 2D average value calculation
- across each window represented by WxH.
-
-
- In the default case, where the data_layout is `NCHW`
- a data Tensor with shape `(batch_size, in_channels, height, width)`,
- to produce an output Tensor with shape
- (batch_size, in_channels, output_height, output_width).
-
- The pooling kernel and stride sizes are automatically chosen for
- desired output sizes.
-
- For output_size:
- If this argument is not provided, input height and width will be used
- as output height and width.
-
- If a single integer is provided for output_size, the output size is
- (N x C x output_size x output_size) for any input (NCHW).
-
- If a tuple of integers (height, width) are provided for output_size,
- the output size is (N x C x height x width) for any input (NCHW).
-
- Parameters
- ----------
- data : tvm.relay.Expr
- The input data to the operator.
-
- output_size : tuple of int. optional
- Output height and width.
-
- layout : str, optional
- Layout of the input.
-
- Returns
- -------
- result : tvm.relay.Expr
- The computed result.
- """
- output_size = [] or output_size
- return _make.adaptive_avg_pool2d(data, output_size, layout)
-
-def ndarray_size(data, dtype="int32"):
- """Get number of elements of input tensor.
-
- Parameters
- ----------
- data : tvm.relay.Expr
- The input tensor.
-
- dtype : str, optional
- The target data type.
-
- Returns
- -------
- result : tvm.relay.Expr
- The number of elements of input tensor.
- """
- return _make.ndarray_size(data, dtype)
reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
+# adaptive_max_pool2d
+reg.register_schedule("nn.adaptive_max_pool2d", strategy.schedule_adaptive_pool)
+reg.register_pattern("nn.adaptive_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
+
+
+# adaptive_avg_pool2d
+reg.register_schedule("nn.adaptive_avg_pool2d", strategy.schedule_adaptive_pool)
+reg.register_pattern("nn.adaptive_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
+
+
# leaky_relu
reg.register_broadcast_schedule("nn.leaky_relu")
reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE)
in_height / block_size, in_width / block_size]
"""
return _make.space_to_depth(data, block_size, layout)
+
+
+def adaptive_max_pool2d(data,
+ output_size=None,
+ layout="NCHW"):
+ r"""2D adaptive max pooling operator. This operator is experimental.
+
+ This operator takes data as input and does 2D max value calculation
+ across each window represented by WxH.
+
+
+ In the default case, where the data_layout is `NCHW`
+ a data Tensor with shape `(batch_size, in_channels, height, width)`,
+ to produce an output Tensor with shape
+ (batch_size, in_channels, output_height, output_width).
+
+ The pooling kernel and stride sizes are automatically chosen for
+ desired output sizes.
+
+ For output_size:
+ If this argument is not provided, input height and width will be used
+ as output height and width.
+
+ If a single integer is provided for output_size, the output size is
+ (N x C x output_size x output_size) for any input (NCHW).
+
+ If a tuple of integers (height, width) are provided for output_size,
+ the output size is (N x C x height x width) for any input (NCHW).
+
+ Parameters
+ ----------
+ data : tvm.relay.Expr
+ The input data to the operator.
+
+ output_size : tuple of int. optional
+ Output height and width.
+
+ layout : str, optional
+ Layout of the input.
+
+ Returns
+ -------
+ result : tvm.relay.Expr
+ The computed result.
+ """
+ output_size = [] or output_size
+ return _make.adaptive_max_pool2d(data, output_size, layout)
+
+
+def adaptive_avg_pool2d(data,
+ output_size=None,
+ layout="NCHW"):
+ r"""2D adaptive average pooling operator. This operator is experimental.
+
+ This operator takes data as input and does 2D average value calculation
+ across each window represented by WxH.
+
+
+ In the default case, where the data_layout is `NCHW`
+ a data Tensor with shape `(batch_size, in_channels, height, width)`,
+ to produce an output Tensor with shape
+ (batch_size, in_channels, output_height, output_width).
+
+ The pooling kernel and stride sizes are automatically chosen for
+ desired output sizes.
+
+ For output_size:
+ If this argument is not provided, input height and width will be used
+ as output height and width.
+
+ If a single integer is provided for output_size, the output size is
+ (N x C x output_size x output_size) for any input (NCHW).
+
+ If a tuple of integers (height, width) are provided for output_size,
+ the output size is (N x C x height x width) for any input (NCHW).
+
+ Parameters
+ ----------
+ data : tvm.relay.Expr
+ The input data to the operator.
+
+ output_size : tuple of int. optional
+ Output height and width.
+
+ layout : str, optional
+ Layout of the input.
+
+ Returns
+ -------
+ result : tvm.relay.Expr
+ The computed result.
+ """
+ output_size = [] or output_size
+ return _make.adaptive_avg_pool2d(data, output_size, layout)
The shape tensor.
"""
return _make.shape_of(data, dtype)
+
+
+def ndarray_size(data, dtype="int32"):
+ """Get number of elements of input tensor.
+
+ Parameters
+ ----------
+ data : tvm.relay.Expr
+ The input tensor.
+
+ dtype : str, optional
+ The target data type.
+
+ Returns
+ -------
+ result : tvm.relay.Expr
+ The number of elements of input tensor.
+ """
+ return _make.ndarray_size(data, dtype)
mode, layout.name()) };
}
-// relay.contrib.adaptive_avg_pool2d
+// relay.nn.adaptive_avg_pool2d
Expr MakeAdaptiveAvgPool2D(Expr data,
Array<IndexExpr> output_size,
std::string layout) {
auto attrs = make_object<AdaptivePool2DAttrs>();
attrs->output_size = std::move(output_size);
attrs->layout = std::move(layout);
- static const Op& op = Op::Get("contrib.adaptive_avg_pool2d");
+ static const Op& op = Op::Get("nn.adaptive_avg_pool2d");
return CallNode::make(op, {data}, Attrs(attrs), {});
}
-TVM_REGISTER_GLOBAL("relay.op.contrib._make.adaptive_avg_pool2d")
+TVM_REGISTER_GLOBAL("relay.op.nn._make.adaptive_avg_pool2d")
.set_body_typed(MakeAdaptiveAvgPool2D);
-RELAY_REGISTER_OP("contrib.adaptive_avg_pool2d")
+RELAY_REGISTER_OP("nn.adaptive_avg_pool2d")
.describe(R"code(Adaptive average pooling operation for 2D data.
- **data**: This depends on the `layout` parameter. Input is 4D array of shape
.set_attr<FTVMCompute>("FTVMCompute", AdaptivePool2DCompute<topi::nn::kAvgPool>);
-// relay.contrib.adaptive_max_pool2d
+// relay.nn.adaptive_max_pool2d
Expr MakeAdaptiveMaxPool2D(Expr data,
Array<IndexExpr> output_size,
std::string layout) {
auto attrs = make_object<AdaptivePool2DAttrs>();
attrs->output_size = std::move(output_size);
attrs->layout = std::move(layout);
- static const Op& op = Op::Get("contrib.adaptive_max_pool2d");
+ static const Op& op = Op::Get("nn.adaptive_max_pool2d");
return CallNode::make(op, {data}, Attrs(attrs), {});
}
-TVM_REGISTER_GLOBAL("relay.op.contrib._make.adaptive_max_pool2d")
+TVM_REGISTER_GLOBAL("relay.op.nn._make.adaptive_max_pool2d")
.set_body_typed(MakeAdaptiveMaxPool2D);
-RELAY_REGISTER_OP("contrib.adaptive_max_pool2d")
+RELAY_REGISTER_OP("nn.adaptive_max_pool2d")
.describe(R"code(Adaptive max pooling operation for 2D data.
- **data**: This depends on the `layout` parameter. Input is 4D array of shape
return Array<te::Tensor>{topi::ndarray_size(inputs[0], param->dtype)};
}
-TVM_REGISTER_GLOBAL("relay.op.contrib._make.ndarray_size")
+TVM_REGISTER_GLOBAL("relay.op._make.ndarray_size")
.set_body_typed([](Expr data, DataType dtype) {
auto attrs = make_object<NdarraySizeAttrs>();
attrs->dtype = dtype;
- static const Op& op = Op::Get("contrib.ndarray_size");
+ static const Op& op = Op::Get("ndarray_size");
return CallNode::make(op, {data}, Attrs(attrs), {});
});
-RELAY_REGISTER_OP("contrib.ndarray_size")
+RELAY_REGISTER_OP("ndarray_size")
.describe(R"code(Returns a tensor representing the number of elements of input tensor.
)code" TVM_ADD_FILELINE)
def test_ndarray_size():
def verify_ndarray_size(shape):
x = relay.var("x", shape=shape)
- func = relay.Function([x], relay.op.contrib.ndarray_size(x))
+ func = relay.Function([x], relay.op.ndarray_size(x))
func = run_infer_type(func)
x_data = np.random.uniform(size=shape).astype("float32")
l_sl = slice(l_start, l_end)
np_out[i, j, k, l] = np_op(np_data[i, j, k_sl, l_sl])
- opfunc = relay.contrib.adaptive_avg_pool2d if pool_type == "avg" else relay.contrib.adaptive_max_pool2d
+ opfunc = relay.nn.adaptive_avg_pool2d if pool_type == "avg" else relay.nn.adaptive_max_pool2d
x = relay.var("x", relay.TensorType((n, c, h, w), "float32"))
y = opfunc(x, out_size, layout)
func = relay.Function([x], y)