[REFACTOR] top - namespace for Tensor Operation DSL (#4727)
authorTianqi Chen <tqchen@users.noreply.github.com>
Thu, 16 Jan 2020 23:23:54 +0000 (15:23 -0800)
committerGitHub <noreply@github.com>
Thu, 16 Jan 2020 23:23:54 +0000 (15:23 -0800)
* [REFACTOR] introduce top - Tensor Operation DSL.

Historically we put Tensor, Schedule and compute under the root tvm namespace.
This is no longer a good idea as the project's scope grows larger
than the tensor operation DSL.

This PR introduces top -- a namespace for tensor operational
DSL concepts such as schedule, tensor, compute.
We moved the related files to the new top subfolder.

* Move relevant files into include/tvm/top and src/top

125 files changed:
CMakeLists.txt
include/tvm/arith/bound.h
include/tvm/build_module.h
include/tvm/ir_pass.h
include/tvm/lowered_func.h
include/tvm/packed_func_ext.h
include/tvm/relay/op_attr_types.h
include/tvm/top/operation.h [moved from include/tvm/operation.h with 98% similarity]
include/tvm/top/schedule.h [moved from include/tvm/schedule.h with 99% similarity]
include/tvm/top/schedule_pass.h [moved from include/tvm/schedule_pass.h with 92% similarity]
include/tvm/top/tensor.h [moved from include/tvm/tensor.h with 95% similarity]
include/tvm/top/tensor_intrin.h [moved from include/tvm/tensor_intrin.h with 96% similarity]
src/README.md
src/api/api_arith.cc
src/api/api_base.cc
src/api/api_lang.cc
src/api/api_pass.cc
src/api/api_schedule.cc
src/api/api_test.cc
src/arith/domain_touched.cc
src/codegen/build_module.cc
src/contrib/hybrid/codegen_hybrid.h
src/pass/inject_prefetch.cc
src/pass/storage_flatten.cc
src/pass/tensor_core.cc
src/pass/verify_compact_buffer.cc
src/relay/backend/compile_engine.cc
src/relay/backend/compile_engine.h
src/relay/backend/utils.h
src/relay/backend/vm/compiler.cc
src/relay/op/annotation/annotation.cc
src/relay/op/debug.cc
src/relay/op/memory/memory.cc
src/relay/op/nn/nn.cc
src/relay/op/nn/pad.cc
src/relay/op/nn/pooling.cc
src/relay/op/tensor/binary.cc
src/relay/op/tensor/reduce.cc
src/relay/op/tensor/transform.cc
src/relay/op/tensor/unary.cc
src/relay/op/vision/yolo.cc
src/relay/pass/alter_op_layout.cc
src/relay/pass/convert_layout.cc
src/relay/pass/gradient.cc
src/relay/pass/legalize.cc
src/top/operation/compute_op.cc [moved from src/op/compute_op.cc with 96% similarity]
src/top/operation/compute_op.h [moved from src/op/compute_op.h with 95% similarity]
src/top/operation/cross_thread_reduction.cc [moved from src/op/cross_thread_reduction.cc with 93% similarity]
src/top/operation/extern_op.cc [moved from src/op/extern_op.cc with 98% similarity]
src/top/operation/hybrid_op.cc [moved from src/op/hybrid_op.cc with 97% similarity]
src/top/operation/hybrid_op.h [moved from src/op/hybrid_op.h with 92% similarity]
src/top/operation/op_util.cc [moved from src/op/op_util.cc with 97% similarity]
src/top/operation/op_util.h [moved from src/op/op_util.h with 93% similarity]
src/top/operation/placeholder_op.cc [moved from src/op/placeholder_op.cc with 97% similarity]
src/top/operation/scan_op.cc [moved from src/op/scan_op.cc with 98% similarity]
src/top/operation/tensor_compute_op.cc [moved from src/op/tensor_compute_op.cc with 92% similarity]
src/top/operation/tensorize.cc [moved from src/op/tensorize.cc with 97% similarity]
src/top/schedule/auto_inline_elem_wise.cc [moved from src/schedule/auto_inline_elem_wise.cc with 96% similarity]
src/top/schedule/bound.cc [moved from src/schedule/bound.cc with 98% similarity]
src/top/schedule/graph.cc [moved from src/schedule/graph.cc with 98% similarity]
src/top/schedule/graph.h [moved from src/schedule/graph.h with 95% similarity]
src/top/schedule/message_passing.cc [moved from src/schedule/message_passing.cc with 99% similarity]
src/top/schedule/message_passing.h [moved from src/schedule/message_passing.h with 95% similarity]
src/top/schedule/schedule_dataflow_rewrite.cc [moved from src/schedule/schedule_dataflow_rewrite.cc with 97% similarity]
src/top/schedule/schedule_lang.cc [moved from src/schedule/schedule_lang.cc with 99% similarity]
src/top/schedule/schedule_ops.cc [moved from src/schedule/schedule_ops.cc with 98% similarity]
src/top/tensor.cc [moved from src/lang/tensor.cc with 97% similarity]
tests/cpp/build_module_test.cc
tests/cpp/expr_test.cc
tests/cpp/ir_simplify_test.cc
tests/cpp/relay_build_module_test.cc
tests/cpp/relay_pass_type_infer_test.cc
tests/cpp/relay_transform_sequential.cc
tests/cpp/simple_passes_test.cc
tests/cpp/tensor_test.cc
tests/cpp/topi_ewise_test.cc
tests/cpp/utvm_runtime_standalone_test.cc
topi/include/topi/broadcast.h
topi/include/topi/contrib/cublas.h
topi/include/topi/contrib/rocblas.h
topi/include/topi/cuda/dense.h
topi/include/topi/cuda/injective.h
topi/include/topi/cuda/normalization.h
topi/include/topi/cuda/pooling.h
topi/include/topi/cuda/reduction.h
topi/include/topi/cuda/softmax.h
topi/include/topi/detail/array_utils.h
topi/include/topi/detail/broadcast.h
topi/include/topi/detail/constant_utils.h
topi/include/topi/detail/extern.h
topi/include/topi/detail/fuse.h
topi/include/topi/detail/pad_utils.h
topi/include/topi/detail/ravel_unravel.h
topi/include/topi/detail/tensor_utils.h
topi/include/topi/elemwise.h
topi/include/topi/generic/default.h
topi/include/topi/generic/extern.h
topi/include/topi/generic/injective.h
topi/include/topi/image/resize.h
topi/include/topi/nn.h
topi/include/topi/nn/batch_matmul.h
topi/include/topi/nn/bias_add.h
topi/include/topi/nn/bnn.h
topi/include/topi/nn/dense.h
topi/include/topi/nn/dilate.h
topi/include/topi/nn/flatten.h
topi/include/topi/nn/l2_normalize.h
topi/include/topi/nn/local_response_norm.h
topi/include/topi/nn/mapping.h
topi/include/topi/nn/pooling.h
topi/include/topi/nn/softmax.h
topi/include/topi/nn/upsampling.h
topi/include/topi/reduction.h
topi/include/topi/rocm/dense.h
topi/include/topi/rocm/injective.h
topi/include/topi/rocm/normalization.h
topi/include/topi/rocm/pooling.h
topi/include/topi/rocm/reduction.h
topi/include/topi/rocm/softmax.h
topi/include/topi/transform.h
topi/include/topi/vision/reorg.h
topi/include/topi/x86/bnn.h
topi/include/topi/x86/default.h
topi/include/topi/x86/injective.h
topi/src/topi.cc

index f48e7d1..a62173e 100644 (file)
@@ -124,20 +124,24 @@ assign_source_group("Source" ${GROUP_SOURCE})
 assign_source_group("Include" ${GROUP_INCLUDE})
 
 # Source file lists
-file(GLOB COMPILER_SRCS
+file(GLOB_RECURSE COMPILER_SRCS
     src/node/*.cc
     src/ir/*.cc
     src/target/*.cc
-    src/api/*.cc
     src/arith/*.cc
+    src/top/*.cc
+    src/api/*.cc
     src/autotvm/*.cc
-    src/codegen/*.cc
     src/lang/*.cc
     src/pass/*.cc
-    src/op/*.cc
-    src/schedule/*.cc
     )
 
+file(GLOB CODEGEN_SRCS
+    src/codegen/*.cc
+    )
+
+list(APPEND COMPILER_SRCS ${CODEGEN_SRCS})
+
 file(GLOB_RECURSE RELAY_OP_SRCS
     src/relay/op/*.cc
     )
index 73c0733..e069548 100644 (file)
@@ -32,7 +32,9 @@
 
 namespace tvm {
 // forward delcare Tensor
+namespace top {
 class Tensor;
+}
 namespace arith {
 
 /*!
@@ -75,7 +77,10 @@ IntSet DeduceBound(PrimExpr v, PrimExpr cond,
  * \param consider_provides If provides (write) are considered.
  * \return The domain that covers all the calls or provides within the given statement.
  */
-Domain DomainTouched(Stmt body, const Tensor &tensor, bool consider_calls, bool consider_provides);
+Domain DomainTouched(Stmt body,
+                     const top::Tensor &tensor,
+                     bool consider_calls,
+                     bool consider_provides);
 
 }  // namespace arith
 }  // namespace tvm
index 2990378..4e6e517 100644 (file)
@@ -26,6 +26,8 @@
 
 #include <tvm/target/target.h>
 #include <tvm/support/with.h>
+#include <tvm/top/schedule_pass.h>
+
 #include <string>
 #include <vector>
 #include <utility>
@@ -33,7 +35,7 @@
 #include <unordered_set>
 
 #include "runtime/packed_func.h"
-#include "schedule_pass.h"
+
 #include "lowered_func.h"
 
 namespace tvm {
@@ -172,10 +174,10 @@ class BuildConfig : public ::tvm::ObjectRef {
 * \param config The build configuration.
 * \return The lowered function.
 */
-TVM_DLL Array<LoweredFunc> lower(Schedule sch,
-                                 const Array<Tensor>& args,
+TVM_DLL Array<LoweredFunc> lower(top::Schedule sch,
+                                 const Array<top::Tensor>& args,
                                  const std::string& name,
-                                 const std::unordered_map<Tensor, Buffer>& binds,
+                                 const std::unordered_map<top::Tensor, Buffer>& binds,
                                  const BuildConfig& config);
 /*!
 * \brief Split host/device function and running necessary pass before build
index 891d324..bf44426 100644 (file)
 #ifndef TVM_IR_PASS_H_
 #define TVM_IR_PASS_H_
 
+#include <tvm/top/schedule.h>
+
 #include <unordered_map>
 #include <unordered_set>
 #include <vector>
 #include <string>
 #include "expr.h"
 #include "buffer.h"
-#include "schedule.h"
 #include "lowered_func.h"
 
 namespace tvm {
@@ -203,7 +204,7 @@ Stmt Inline(Stmt stmt,
  * \return Transformed stmt.
  */
 Stmt StorageFlatten(Stmt stmt,
-                    Map<Tensor, Buffer> extern_buffer,
+                    Map<top::Tensor, Buffer> extern_buffer,
                     int cache_line_size,
                     bool create_bound_attribute = false);
 
@@ -217,8 +218,8 @@ Stmt StorageFlatten(Stmt stmt,
  * \return Transformed stmt.
  */
 Stmt RewriteForTensorCore(Stmt stmt,
-                          Schedule schedule,
-                          Map<Tensor, Buffer> extern_buffer);
+                          top::Schedule schedule,
+                          Map<top::Tensor, Buffer> extern_buffer);
 
 /*!
  * \brief Verify if there is any argument bound to compact buffer.
index 2b643d7..b0350ae 100644 (file)
 #ifndef TVM_LOWERED_FUNC_H_
 #define TVM_LOWERED_FUNC_H_
 
+#include <tvm/top/tensor.h>
+
 #include <string>
 
 #include "expr.h"
-#include "tensor.h"
 #include "tvm/node/container.h"
 
 namespace tvm {
index cc380cd..f7b0d08 100644 (file)
 #ifndef TVM_PACKED_FUNC_EXT_H_
 #define TVM_PACKED_FUNC_EXT_H_
 
+#include <tvm/top/tensor.h>
+
 #include <string>
 #include <memory>
 #include <limits>
 #include <type_traits>
 
 #include "expr.h"
-#include "tensor.h"
 #include "runtime/packed_func.h"
 
 namespace tvm {
@@ -116,8 +117,8 @@ inline TVMPODValue_::operator tvm::PrimExpr() const {
   if (ptr->IsInstance<IterVarNode>()) {
     return IterVar(ObjectPtr<Object>(ptr))->var;
   }
-  if (ptr->IsInstance<TensorNode>()) {
-    return Tensor(ObjectPtr<Object>(ptr))();
+  if (ptr->IsInstance<top::TensorNode>()) {
+    return top::Tensor(ObjectPtr<Object>(ptr))();
   }
   CHECK(ObjectTypeChecker<PrimExpr>::Check(ptr))
       << "Expect type " << ObjectTypeChecker<PrimExpr>::TypeName()
index b6221e0..385d645 100644 (file)
@@ -24,8 +24,8 @@
 #ifndef TVM_RELAY_OP_ATTR_TYPES_H_
 #define TVM_RELAY_OP_ATTR_TYPES_H_
 
-#include <tvm/tensor.h>
-#include <tvm/schedule.h>
+#include <tvm/top/tensor.h>
+#include <tvm/top/schedule.h>
 #include <tvm/build_module.h>
 #include <tvm/relay/type.h>
 #include <tvm/relay/expr.h>
@@ -99,10 +99,10 @@ using TShapeDataDependant = bool;
  * \return The output compute description of the operator.
  */
 using FTVMCompute = runtime::TypedPackedFunc<
-  Array<Tensor>(const Attrs& attrs,
-                const Array<Tensor>& inputs,
-                const Type& out_type,
-                const Target& target)>;
+  Array<top::Tensor>(const Attrs& attrs,
+                     const Array<top::Tensor>& inputs,
+                     const Type& out_type,
+                     const Target& target)>;
 
 /*!
  * \brief Build the computation schedule for
@@ -114,9 +114,9 @@ using FTVMCompute = runtime::TypedPackedFunc<
  * \return schedule The computation schedule.
  */
 using FTVMSchedule = runtime::TypedPackedFunc<
-  Schedule(const Attrs& attrs,
-           const Array<Tensor>& outs,
-           const Target& target)>;
+  top::Schedule(const Attrs& attrs,
+                const Array<top::Tensor>& outs,
+                const Target& target)>;
 
 /*!
  * \brief Alternate the layout of operators or replace the
@@ -131,7 +131,7 @@ using FTVMSchedule = runtime::TypedPackedFunc<
 using FTVMAlterOpLayout = runtime::TypedPackedFunc<
   Expr(const Attrs& attrs,
        const Array<Expr>& args,
-       const Array<Tensor>& tinfos)>;
+       const Array<top::Tensor>& tinfos)>;
 
 /*!
  * \brief Convert the layout of operators or replace the
@@ -147,7 +147,7 @@ using FTVMAlterOpLayout = runtime::TypedPackedFunc<
 using FTVMConvertOpLayout = runtime::TypedPackedFunc<
   Expr(const Attrs& attrs,
        const Array<Expr>& args,
-       const Array<Tensor>& tinfos,
+       const Array<top::Tensor>& tinfos,
        const std::string& desired_layout)>;
 /*!
  * \brief Legalizes an expression with another expression. This function will be
@@ -206,9 +206,9 @@ enum AnyCodegenStrategy {
 using Shape = Array<IndexExpr>;
 
 using FShapeFunc = runtime::TypedPackedFunc<
-  Array<Tensor>(const Attrs& attrs,
-                const Array<Tensor>& inputs,
-                const Array<IndexExpr>& out_ndims)>;
+  Array<top::Tensor>(const Attrs& attrs,
+                     const Array<top::Tensor>& inputs,
+                     const Array<IndexExpr>& out_ndims)>;
 
 }  // namespace relay
 }  // namespace tvm
similarity index 98%
rename from include/tvm/operation.h
rename to include/tvm/top/operation.h
index 54c98a3..2cee218 100644 (file)
  */
 
 /*!
- * \file tvm/operation.h
+ * \file tvm/top/operation.h
  * \brief Operation node can generate one or multiple Tensors
  */
-#ifndef TVM_OPERATION_H_
-#define TVM_OPERATION_H_
+#ifndef TVM_TOP_OPERATION_H_
+#define TVM_TOP_OPERATION_H_
 
 #include <tvm/arith/analyzer.h>
+#include <tvm/top/tensor.h>
+#include <tvm/top/schedule.h>
+
+#include <tvm/expr.h>
+#include <tvm/expr_operator.h>
+#include <tvm/buffer.h>
 
 #include <string>
 #include <vector>
 #include <unordered_map>
 
-#include "expr.h"
-#include "expr_operator.h"
-#include "tensor.h"
-#include "schedule.h"
-#include "buffer.h"
+
 
 namespace tvm {
+namespace top {
 
 using arith::IntSet;
 
@@ -655,5 +658,6 @@ inline Tensor compute(Array<PrimExpr> shape,
 inline const OperationNode* Operation::operator->() const {
   return static_cast<const OperationNode*>(get());
 }
+}  // namespace top
 }  // namespace tvm
-#endif  // TVM_OPERATION_H_
+#endif  // TVM_TOP_OPERATION_H_
similarity index 99%
rename from include/tvm/schedule.h
rename to include/tvm/top/schedule.h
index 3115b0a..2adaa13 100644 (file)
  */
 
 /*!
- * \file tvm/schedule.h
+ * \file tvm/top/schedule.h
  * \brief Define a schedule.
  */
 // Acknowledgement: Many schedule primitives originate from Halide and Loopy.
-#ifndef TVM_SCHEDULE_H_
-#define TVM_SCHEDULE_H_
+#ifndef TVM_TOP_SCHEDULE_H_
+#define TVM_TOP_SCHEDULE_H_
+
+#include <tvm/expr.h>
+#include <tvm/top/tensor.h>
+#include <tvm/top/tensor_intrin.h>
+
 
 #include <string>
 #include <unordered_map>
-#include "expr.h"
-#include "tensor.h"
-#include "tensor_intrin.h"
 
-namespace tvm {
 
+namespace tvm {
+namespace top {
 // Node container for Stage
 class StageNode;
 // Node container for Schedule
@@ -764,5 +767,6 @@ inline const IterVarRelationNode* IterVarRelation::operator->() const {
 inline const IterVarAttrNode* IterVarAttr::operator->() const {
   return static_cast<const IterVarAttrNode*>(get());
 }
+}  // namespace top
 }  // namespace tvm
-#endif  // TVM_SCHEDULE_H_
+#endif  // TVM_TOP_SCHEDULE_H_
similarity index 92%
rename from include/tvm/schedule_pass.h
rename to include/tvm/top/schedule_pass.h
index af2459b..eacc9cd 100644 (file)
  */
 
 /*!
- * \file tvm/schedule_pass.h
+ * \file tvm/top/schedule_pass.h
  * \brief  Collection of Schedule pass functions.
  *
  *  These passes works on the schedule hyper-graph
  *  and infers information such as bounds, check conditions
  *  read/write dependencies between the IterVar
  */
-#ifndef TVM_SCHEDULE_PASS_H_
-#define TVM_SCHEDULE_PASS_H_
+#ifndef TVM_TOP_SCHEDULE_PASS_H_
+#define TVM_TOP_SCHEDULE_PASS_H_
 
-#include "schedule.h"
+#include <tvm/top/schedule.h>
 
 namespace tvm {
-namespace schedule {
+namespace top {
 
 /*!
  * \brief Infer the bound of all iteration variables relates to the schedule.
@@ -71,6 +71,6 @@ void AutoInlineElemWise(Schedule sch);
  */
 TVM_DLL void AutoInlineInjective(Schedule sch);
 
-}  // namespace schedule
+}  // namespace top
 }  // namespace tvm
-#endif  // TVM_SCHEDULE_PASS_H_
+#endif  // TVM_TOP_SCHEDULE_PASS_H_
similarity index 95%
rename from include/tvm/tensor.h
rename to include/tvm/top/tensor.h
index 91c0c96..bdfbbeb 100644 (file)
  */
 
 /*!
- * \file tvm/tensor.h
+ * \file tvm/top/tensor.h
  * \brief Dataflow tensor object
  */
-#ifndef TVM_TENSOR_H_
-#define TVM_TENSOR_H_
+#ifndef TVM_TOP_TENSOR_H_
+#define TVM_TOP_TENSOR_H_
 
 #include <tvm/node/container.h>
 #include <tvm/arith/bound.h>
+#include <tvm/expr.h>
+#include <tvm/expr_operator.h>
 
 #include <string>
 #include <vector>
 #include <utility>
 #include <type_traits>
 
-#include "expr.h"
-#include "expr_operator.h"
+
 
 namespace tvm {
+namespace top {
 
 // Internal node container of Tensor
 class TensorNode;
@@ -246,16 +248,17 @@ DEFINE_OVERLOAD_SLICE_BINARY_OP(<<);
 DEFINE_OVERLOAD_SLICE_BINARY_OP(>);  // NOLINT(*)
 DEFINE_OVERLOAD_SLICE_BINARY_OP(<);  // NOLINT(*)
 
+}  // namespace top
 }  // namespace tvm
 
 namespace std {
 template <>
-struct hash<::tvm::Operation> : public ::tvm::ObjectHash {
+struct hash<::tvm::top::Operation> : public ::tvm::ObjectHash {
 };
 
 template <>
-struct hash<::tvm::Tensor> {
-  std::size_t operator()(const ::tvm::Tensor& k) const {
+struct hash<::tvm::top::Tensor> {
+  std::size_t operator()(const ::tvm::top::Tensor& k) const {
     ::tvm::ObjectHash hasher;
     if (k.defined() && k->op.defined()) {
       return hasher(k->op);
@@ -265,4 +268,4 @@ struct hash<::tvm::Tensor> {
   }
 };
 }  // namespace std
-#endif  // TVM_TENSOR_H_
+#endif  // TVM_TOP_TENSOR_H_
similarity index 96%
rename from include/tvm/tensor_intrin.h
rename to include/tvm/top/tensor_intrin.h
index 879e206..99eb885 100644 (file)
  */
 
 /*!
- * \file tvm/tensor_intrin.h
+ * \file tvm/top/tensor_intrin.h
  * \brief Tensor intrinsic operations.
  */
-#ifndef TVM_TENSOR_INTRIN_H_
-#define TVM_TENSOR_INTRIN_H_
+#ifndef TVM_TOP_TENSOR_INTRIN_H_
+#define TVM_TOP_TENSOR_INTRIN_H_
+
+#include <tvm/top/tensor.h>
+#include <tvm/buffer.h>
 
 #include <string>
-#include "tensor.h"
-#include "buffer.h"
+
 
 namespace tvm {
+namespace top {
 
 // Internal node container of tensor intrinsics.
 class TensorIntrinNode;
@@ -173,5 +176,6 @@ inline const TensorIntrinCallNode* TensorIntrinCall::operator->() const {
   return static_cast<const TensorIntrinCallNode*>(get());
 }
 
+}  // namespace top
 }  // namespace tvm
-#endif  // TVM_TENSOR_INTRIN_H_
+#endif  // TVM_TOP_TENSOR_INTRIN_H_
index b0172b6..2de8141 100644 (file)
@@ -24,13 +24,12 @@ There can be internal header files within each module that sit in src.
 - support: Internal support utilities.
 - runtime: Minimum runtime related codes.
 - node: base infra for IR/AST nodes that is dialect independent.
-- api: API function registration.
-- lang: The definition of DSL related data structure.
 - arith: Arithmetic expression and set simplification.
-- op: The detail implementations about each operation(compute, scan, placeholder).
-- schedule: The operations on the schedule graph before converting to IR.
+- top: tensor operation DSL for compute and schedule.
+- relay: Implementation of Relay. The second generation of NNVM, a new IR for deep learning frameworks.
 - pass: The optimization pass on the IR structure.
 - codegen: The code generator.
 - autotvm: The auto-tuning module.
-- relay: Implementation of Relay. The second generation of NNVM, a new IR for deep learning frameworks.
 - contrib: Contrib extension libraries.
+- api: API function registration.
+- lang: The definition of DSL related data structure.
index b4020eb..0c28d08 100644 (file)
@@ -31,7 +31,7 @@
 #include <tvm/runtime/registry.h>
 #include <tvm/packed_func_ext.h>
 
-#include <tvm/tensor.h>
+#include <tvm/top/tensor.h>
 
 namespace tvm {
 namespace arith {
index 131ac36..4b74d02 100644 (file)
@@ -23,7 +23,7 @@
  */
 #include <dmlc/memory_io.h>
 #include <tvm/expr.h>
-#include <tvm/tensor.h>
+#include <tvm/top/tensor.h>
 #include <tvm/runtime/registry.h>
 #include <tvm/packed_func_ext.h>
 
index 2c7eb3c..89c2c53 100644 (file)
  */
 #include <tvm/expr.h>
 #include <tvm/ir.h>
-#include <tvm/tensor.h>
-#include <tvm/operation.h>
+#include <tvm/top/tensor.h>
+#include <tvm/top/operation.h>
 #include <tvm/buffer.h>
-#include <tvm/schedule.h>
+#include <tvm/top/schedule.h>
 #include <tvm/runtime/registry.h>
 #include <tvm/packed_func_ext.h>
 
@@ -274,6 +274,7 @@ TVM_REGISTER_GLOBAL("_BijectiveLayoutForwardShape")
 TVM_REGISTER_GLOBAL("_BijectiveLayoutBackwardShape")
 .set_body_method(&BijectiveLayout::BackwardShape);
 
+namespace top {
 TVM_REGISTER_GLOBAL("_Tensor")
 .set_body_typed(TensorNode::make);
 
@@ -441,6 +442,7 @@ TVM_REGISTER_GLOBAL("_ScheduleCacheWrite")
 
 TVM_REGISTER_GLOBAL("_ScheduleRFactor")
 .set_body_method(&Schedule::rfactor);
+}  // namespace top
 
 TVM_REGISTER_GLOBAL("_CommReducerCombine")
 .set_body_method<ir::CommReducer>(&ir::CommReducerNode::operator());
index 639855c..a822cc1 100644 (file)
@@ -96,7 +96,9 @@ TVM_REGISTER_GLOBAL("ir_pass.StorageFlatten")
 
 TVM_REGISTER_GLOBAL("ir_pass.RewriteForTensorCore")
 .set_body_typed
-  ([](const Stmt& stmt, const Schedule& schedule, const Map<Tensor, Buffer>& extern_buffer) {
+  ([](const Stmt& stmt,
+      const top::Schedule& schedule,
+      const Map<top::Tensor, Buffer>& extern_buffer) {
       return RewriteForTensorCore(stmt, schedule, extern_buffer);
   });
 
index a7c27e4..7aa305f 100644 (file)
  * \file api_schedule.cc
  */
 #include <tvm/expr.h>
-#include <tvm/tensor.h>
-#include <tvm/schedule.h>
-#include <tvm/schedule_pass.h>
+#include <tvm/top/tensor.h>
+#include <tvm/top/schedule.h>
+#include <tvm/top/schedule_pass.h>
 #include <tvm/runtime/registry.h>
 #include <tvm/packed_func_ext.h>
 
-#include "../schedule/graph.h"
+#include "../top/schedule/graph.h"
 
 namespace tvm {
-namespace schedule {
+namespace top {
 
 TVM_REGISTER_GLOBAL("schedule.AutoInlineElemWise")
 .set_body_typed(AutoInlineElemWise);
@@ -60,5 +60,5 @@ REGISTER_SCHEDULE_PASS(CreateAttachPath);
 REGISTER_SCHEDULE_PASS(ScanGetBody);
 REGISTER_SCHEDULE_PASS(ScanFixPointAnalysis);
 
-}  // namespace schedule
+}  // namespace top
 }  // namespace tvm
index 0bc83ea..957a034 100644 (file)
@@ -22,7 +22,7 @@
  * \file api_test.cc
  */
 #include <tvm/expr.h>
-#include <tvm/tensor.h>
+#include <tvm/top/tensor.h>
 #include <tvm/ir/attrs.h>
 #include <tvm/runtime/registry.h>
 #include <tvm/ir/env_func.h>
index 3889cd2..6e665c8 100644 (file)
@@ -24,7 +24,7 @@
 #include <tvm/expr.h>
 #include <tvm/ir_pass.h>
 #include <tvm/ir_functor_ext.h>
-#include <tvm/tensor.h>
+#include <tvm/top/tensor.h>
 #include <tvm/runtime/registry.h>
 #include <tvm/packed_func_ext.h>
 
@@ -40,7 +40,7 @@ using namespace ir;
 // Find Read region of the tensor in the stmt.
 class FuncTouchedDomain final : public StmtExprVisitor {
  public:
-  FuncTouchedDomain(const Tensor &tensor, bool consider_calls, bool consider_provides)
+  FuncTouchedDomain(const top::Tensor &tensor, bool consider_calls, bool consider_provides)
     : tensor_(tensor), consider_calls_(consider_calls), consider_provides_(consider_provides)  {}
 
   Domain Find(const Stmt& stmt) {
@@ -108,13 +108,16 @@ class FuncTouchedDomain final : public StmtExprVisitor {
     }
   }
 
-  const Tensor &tensor_;
+  const top::Tensor &tensor_;
   bool consider_calls_, consider_provides_;
   std::vector<std::vector<IntSet> > bounds_;
   std::unordered_map<const VarNode*, IntSet> dom_map_;
 };
 
-Domain DomainTouched(Stmt stmt, const Tensor &tensor, bool consider_calls, bool consider_provides) {
+Domain DomainTouched(Stmt stmt,
+                     const top::Tensor &tensor,
+                     bool consider_calls,
+                     bool consider_provides) {
   return FuncTouchedDomain(tensor, consider_calls, consider_provides).Find(stmt);
 }
 
index 771583b..cfb75c4 100644 (file)
@@ -23,7 +23,7 @@
  */
 #include <dmlc/thread_local.h>
 #include <tvm/build_module.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/ir_pass.h>
 #include <tvm/codegen.h>
 #include <tvm/runtime/registry.h>
@@ -87,10 +87,10 @@ Buffer BufferWithOffsetAlignment(Array<PrimExpr> shape,
     data_alignment, offset_factor, buffer_type);
 }
 
-void GetBinds(const Array<Tensor>& args,
+void GetBinds(const Array<top::Tensor>& args,
               bool compact,
-              const std::unordered_map<Tensor, Buffer>& binds,
-              Map<Tensor, Buffer>* out_binds,
+              const std::unordered_map<top::Tensor, Buffer>& binds,
+              Map<top::Tensor, Buffer>* out_binds,
               Array<ObjectRef>* out_arg_list,
               const BuildConfig& config) {
   *out_binds = binds;
@@ -117,21 +117,21 @@ void GetBinds(const Array<Tensor>& args,
 * \param config The build configuration.
 * \return The built Stmt.
 */
-Stmt BuildStmt(Schedule sch,
-               const Array<Tensor>& args,
-               const std::unordered_map<Tensor, Buffer>& binds,
+Stmt BuildStmt(top::Schedule sch,
+               const Array<top::Tensor>& args,
+               const std::unordered_map<top::Tensor, Buffer>& binds,
                bool loop_partition,
                Array<ObjectRef> *out_arg_list,
                const BuildConfig& config) {
   sch = sch.normalize();
 
   // Phase 0
-  auto bounds = schedule::InferBound(sch);
-  auto stmt = schedule::ScheduleOps(sch, bounds, false);
+  auto bounds = top::InferBound(sch);
+  auto stmt = top::ScheduleOps(sch, bounds, false);
   stmt = ir::InjectPrefetch(stmt);
 
   bool compact = ir::VerifyCompactBuffer(stmt);
-  Map<Tensor, Buffer> out_binds;
+  Map<top::Tensor, Buffer> out_binds;
   GetBinds(args, compact, binds, &out_binds, out_arg_list, config);
 
   // Phase 1
@@ -165,10 +165,10 @@ Stmt BuildStmt(Schedule sch,
   return stmt;
 }
 
-Array<LoweredFunc> lower(Schedule sch,
-                         const Array<Tensor>& args,
+Array<LoweredFunc> lower(top::Schedule sch,
+                         const Array<top::Tensor>& args,
                          const std::string& name,
-                         const std::unordered_map<Tensor, Buffer>& binds,
+                         const std::unordered_map<top::Tensor, Buffer>& binds,
                          const BuildConfig& config) {
   Array<ObjectRef> out_arg_list;
   auto stmt = BuildStmt(sch, args, binds, true, &out_arg_list, config);
index 33bd0ef..f5ba9ab 100644 (file)
@@ -28,7 +28,7 @@
 #include <tvm/ir_functor_ext.h>
 #include <tvm/codegen.h>
 #include <tvm/lowered_func.h>
-#include <tvm/schedule.h>
+#include <tvm/top/schedule.h>
 #include <map>
 #include <string>
 #include <unordered_map>
@@ -38,6 +38,7 @@
 namespace tvm {
 namespace contrib {
 
+using namespace top;
 using namespace ir;
 /*!
  * \brief A base class to generate Hybrid Script.
index d877863..a2895d5 100644 (file)
@@ -39,7 +39,7 @@ class PrefetchInjector : public StmtMutator {
     Stmt ret = StmtMutator::VisitStmt_(op);
     op = ret.as<AttrStmtNode>();
     if (op && op->attr_key == attr::prefetch_scope) {
-      Tensor ts = Downcast<Tensor>(op->node);
+      top::Tensor ts = Downcast<top::Tensor>(op->node);
       CHECK_NE(loop_nest_.size(), 0U);
       Domain domain = DomainTouched(op->body, ts, true, false);
       Region region;
index fc46ef3..b506765 100644 (file)
@@ -25,7 +25,7 @@
 #include <tvm/arith/analyzer.h>
 #include <tvm/ir.h>
 #include <tvm/expr.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/ir_functor_ext.h>
 #include <tvm/expr_operator.h>
 #include <tvm/ir_pass.h>
@@ -49,7 +49,7 @@ using intrinsic::tvm_address_of;
 
 class StorageFlattener : public StmtExprMutator {
  public:
-  explicit StorageFlattener(Map<Tensor, Buffer> extern_buffer,
+  explicit StorageFlattener(Map<top::Tensor, Buffer> extern_buffer,
                             int cache_line_size, bool create_bound_attributes,
                             IRVisitorWithAnalyzer* bounded_analyzer)
       : bounded_analyzer_(bounded_analyzer),
@@ -82,8 +82,8 @@ class StorageFlattener : public StmtExprMutator {
       storage_scope_[op->node.get()] = op->value.as<StringImmNode>()->value;
       return this->VisitStmt(op->body);
     } else if (op->attr_key == attr::double_buffer_scope &&
-               op->node->IsInstance<OperationNode>()) {
-      Operation func = Downcast<Operation>(op->node);
+               op->node->IsInstance<top::OperationNode>()) {
+      auto func = Downcast<top::Operation>(op->node);
       Stmt body = this->VisitStmt(op->body);
       for (int i = 0; i < func->num_outputs(); ++i) {
         TensorKey key{func, i};
@@ -104,7 +104,7 @@ class StorageFlattener : public StmtExprMutator {
     } else if (op->attr_key == attr::buffer_bind_scope) {
       return HandleBufferBindScope(op);
     } else if (op->attr_key == attr::buffer_dim_align) {
-      Tensor tensor = Downcast<Tensor>(op->node);
+      auto tensor = Downcast<top::Tensor>(op->node);
       const CallNode* tuple = op->value.as<CallNode>();
       CHECK(tuple && tuple->is_intrinsic(intrinsic::tvm_tuple));
       TensorKey key{tensor->op, tensor->value_index};
@@ -406,7 +406,7 @@ class StorageFlattener : public StmtExprMutator {
     Array<ObjectRef> arr = Downcast<Array<ObjectRef> > (op->node);
     CHECK_EQ(arr.size(), 2U);
     const BufferNode* buffer = arr[0].as<BufferNode>();
-    const TensorNode* tensor = arr[1].as<TensorNode>();
+    const top::TensorNode* tensor = arr[1].as<top::TensorNode>();
     const CallNode* tuple = op->value.as<CallNode>();
     CHECK(buffer && tensor);
     CHECK(tuple && tuple->is_intrinsic(intrinsic::tvm_tuple));
@@ -529,7 +529,7 @@ class StorageFlattener : public StmtExprMutator {
   bool create_bound_attributes_{false};
 };
 
-Stmt StorageFlatten(Stmt stmt, Map<Tensor, Buffer> extern_buffer,
+Stmt StorageFlatten(Stmt stmt, Map<top::Tensor, Buffer> extern_buffer,
                     int cache_line_size, bool create_bound_attributes) {
   IRVisitorWithAnalyzer bounded_analyzer;
   bounded_analyzer(stmt);
index c5c81ca..bf36b0a 100644 (file)
@@ -23,7 +23,7 @@
 // IR Passes for TensorCore CodeGen
 #include <tvm/ir.h>
 #include <tvm/expr.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/ir_functor_ext.h>
 #include <tvm/expr_operator.h>
 #include <tvm/ir_pass.h>
@@ -39,6 +39,7 @@
 namespace tvm {
 namespace ir {
 
+using namespace top;
 using runtime::StorageRank;
 using runtime::StorageScope;
 using runtime::ThreadScope;
@@ -417,7 +418,7 @@ class BufferAnalyser : public StmtExprVisitor {
       storage_scope_[op->node.get()] = op->value.as<StringImmNode>()->value;
       this->VisitStmt(op->body);
     } else if (op->attr_key == attr::buffer_dim_align) {
-      Tensor tensor = Downcast<Tensor>(op->node);
+      top::Tensor tensor = Downcast<top::Tensor>(op->node);
       const CallNode* tuple = op->value.as<CallNode>();
       CHECK(tuple && tuple->is_intrinsic(intrinsic::tvm_tuple));
       auto& vinfo = dim_align_[TensorKey{tensor->op, tensor->value_index}];
@@ -831,7 +832,7 @@ class TensorCoreIRMutator : public StmtExprMutator {
   Stmt VisitStmt_(const AttrStmtNode* op) final {
     Stmt stmt = StmtExprMutator::VisitStmt_(op);
     if (op->attr_key == attr::realize_scope) {
-      auto node = op->node.as<OperationNode>();
+      auto node = op->node.as<top::OperationNode>();
       if (node != nullptr) {
         if (!frag_reg_.count(node->name)) {
           return stmt;
@@ -1119,9 +1120,9 @@ class TensorCoreIRMutator : public StmtExprMutator {
     buffer_node->offset_factor = 1;
     Buffer buffer(buffer_node);
 
-    ObjectPtr<TensorNode> tensor_node = make_object<TensorNode>();
+    ObjectPtr<top::TensorNode> tensor_node = make_object<top::TensorNode>();
     tensor_node->value_index = key.value_index;
-    tensor_node->op = Downcast<Operation>(key.f);
+    tensor_node->op = Downcast<top::Operation>(key.f);
     tensor_node->shape = shape;
     tensor_node->dtype = datatype;
     Tensor tensor(tensor_node);
index f6c454d..95dcbdd 100644 (file)
@@ -25,7 +25,7 @@
 #include <tvm/expr.h>
 #include <tvm/ir.h>
 #include <tvm/ir_functor_ext.h>
-#include <tvm/tensor.h>
+#include <tvm/top/tensor.h>
 
 #include <unordered_map>
 
index 5ee4ce3..14967c1 100644 (file)
@@ -23,9 +23,9 @@
  */
 #include "compile_engine.h"
 
-#include <tvm/schedule.h>
+#include <tvm/top/schedule.h>
 #include <tvm/packed_func_ext.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/runtime/registry.h>
 #include <tvm/relay/attrs/device_copy.h>
 #include <tvm/relay/analysis.h>
@@ -101,20 +101,20 @@ Array<IndexExpr> GetShape(const Array<IndexExpr>& shape) {
 // The getter to get schedule from compile engine.
 // Get schedule from functor.
 class ScheduleGetter :
-      public ExprFunctor<Array<Tensor>(const Expr&)> {
+      public ExprFunctor<Array<top::Tensor>(const Expr&)> {
  public:
   explicit ScheduleGetter(Target target)
       : target_(target), device_copy_op_(Op::Get("device_copy")) {}
 
-  std::pair<Schedule, CachedFunc> Create(const Function& prim_func) {
+  std::pair<top::Schedule, CachedFunc> Create(const Function& prim_func) {
     static auto fschedule =
         Op::GetAttr<FTVMSchedule>("FTVMSchedule");
     auto cache_node = make_object<CachedFuncNode>();
     cache_node->target = target_;
     for (Var param : prim_func->params) {
-      Array<tvm::Tensor> inputs;
+      Array<tvm::top::Tensor> inputs;
       if (const auto* ttype = param->checked_type().as<TensorTypeNode>()) {
-        tvm::Tensor tensor = tvm::placeholder(
+        tvm::top::Tensor tensor = tvm::top::placeholder(
             GetShape(ttype->shape), ttype->dtype);
         cache_node->inputs.push_back(tensor);
         inputs.push_back(tensor);
@@ -125,7 +125,7 @@ class ScheduleGetter :
           const auto* ttype = field.as<TensorTypeNode>();
           // TODO(@icemelon): Allow recursive tuple
           CHECK(ttype != nullptr);
-          tvm::Tensor tensor = tvm::placeholder(
+          tvm::top::Tensor tensor = tvm::top::placeholder(
               GetShape(ttype->shape), ttype->dtype);
           cache_node->inputs.push_back(tensor);
           inputs.push_back(tensor);
@@ -150,13 +150,13 @@ class ScheduleGetter :
     // Fusion over tupled results may leave identity relationships
     // between inputs and outputs, and those should not be scheduled.
     // Hence schedule only non PlaceholderOp outputs.
-    tvm::Array<Tensor> tensor_outs;
+    tvm::Array<top::Tensor> tensor_outs;
     for (const auto& tensor : cache_node->outputs) {
-      if (!tensor->op.as<PlaceholderOpNode>()) {
+      if (!tensor->op.as<top::PlaceholderOpNode>()) {
         tensor_outs.push_back(tensor);
       }
     }
-    Schedule schedule;
+    top::Schedule schedule;
     // No need to register schedule for device copy op.
     if (master_attrs_.as<DeviceCopyAttrs>() == nullptr) {
       schedule =
@@ -170,27 +170,27 @@ class ScheduleGetter :
     return std::make_pair(schedule, cfunc);
   }
 
-  Array<Tensor> VisitExpr(const Expr& expr) {
+  Array<top::Tensor> VisitExpr(const Expr& expr) {
     auto it = memo_.find(expr);
     if (it != memo_.end()) {
       return it->second;
     } else {
-      Array<Tensor> res = ExprFunctor::VisitExpr(expr);
+      Array<top::Tensor> res = ExprFunctor::VisitExpr(expr);
       memo_[expr] = res;
       return res;
     }
   }
 
-  Array<Tensor> VisitExpr_(const VarNode* op) final {
+  Array<top::Tensor> VisitExpr_(const VarNode* op) final {
     LOG(FATAL) << "Free variable " << op->name_hint();
     return {};
   }
 
-  Array<Tensor> VisitExpr_(const ConstantNode* op) final {
+  Array<top::Tensor> VisitExpr_(const ConstantNode* op) final {
     CHECK(op->is_scalar());
     void* data = op->data->data;
     DataType dtype = DataType(op->data->dtype);
-    Tensor value = tvm::compute({}, [&](const Array<tvm::Var>&) {
+    auto value = top::compute({}, [&](const Array<tvm::Var>&) {
         if (dtype == DataType::Int(32)) {
           return make_const(dtype, static_cast<const int32_t*>(data)[0]);
         } else if (dtype == DataType::Int(64)) {
@@ -210,19 +210,19 @@ class ScheduleGetter :
     return {value};
   }
 
-  Array<Tensor> VisitExpr_(const CallNode* call_node) final {
+  Array<top::Tensor> VisitExpr_(const CallNode* call_node) final {
     static auto fcompute =
         Op::GetAttr<FTVMCompute>("FTVMCompute");
     static auto fpattern =
         Op::GetAttr<TOpPattern>("TOpPattern");
 
-    Array<Tensor> inputs;
+    Array<top::Tensor> inputs;
     int count_tuple = 0;
     for (Expr arg : call_node->args) {
       if (arg->checked_type().as<TupleTypeNode>()) {
         ++count_tuple;
       }
-      for (Tensor tensor : VisitExpr(arg)) {
+      for (top::Tensor tensor : VisitExpr(arg)) {
         inputs.push_back(tensor);
       }
     }
@@ -252,12 +252,12 @@ class ScheduleGetter :
     CHECK(call_node->op.as<OpNode>())
         << "Primitive function only allows call into primitive ops";
     Op op = Downcast<Op>(call_node->op);
-    Array<Tensor> outputs;
+    Array<top::Tensor> outputs;
     // Skip fcompute for device copy operators as it is not registered.
     if (op == device_copy_op_) {
       const auto* copy_input = inputs[0].operator->();
-      outputs.push_back(TensorNode::make(copy_input->shape, copy_input->dtype,
-                                         Operation(), 0));
+      outputs.push_back(top::TensorNode::make(copy_input->shape, copy_input->dtype,
+                                         top::Operation(), 0));
     } else {
       outputs = fcompute[op](call_node->attrs, inputs,
                              call_node_type, target_);
@@ -291,33 +291,33 @@ class ScheduleGetter :
     return outputs;
   }
 
-  Array<Tensor> VisitExpr_(const FunctionNode* op) final {
+  Array<top::Tensor> VisitExpr_(const FunctionNode* op) final {
     LOG(FATAL) << "Do not support sub function";
-    return Array<Tensor>();
+    return Array<top::Tensor>();
   }
 
-  Array<Tensor> VisitExpr_(const LetNode* op) final {
-    Array<Tensor> val = VisitExpr(op->value);
+  Array<top::Tensor> VisitExpr_(const LetNode* op) final {
+    Array<top::Tensor> val = VisitExpr(op->value);
     CHECK(!memo_.count(op->var));
     memo_[op->var] = val;
     return VisitExpr(op->body);
   }
 
-  Array<Tensor> VisitExpr_(const TupleNode* op) final {
-    Array<Tensor> fields;
+  Array<top::Tensor> VisitExpr_(const TupleNode* op) final {
+    Array<top::Tensor> fields;
     for (Expr field : op->fields) {
       CHECK(field->checked_type().as<TensorTypeNode>())
           << "Only allow Tuple of Tensor";
-      Array<Tensor> res = VisitExpr(field);
+      Array<top::Tensor> res = VisitExpr(field);
       CHECK_EQ(res.size(), 1);
       fields.push_back(res[0]);
     }
     return fields;
   }
 
-  Array<Tensor> VisitExpr_(const TupleGetItemNode* op) final {
+  Array<top::Tensor> VisitExpr_(const TupleGetItemNode* op) final {
     const auto* tuple_type = op->tuple->type_as<TupleTypeNode>();
-    Array<Tensor> tuple = VisitExpr(op->tuple);
+    Array<top::Tensor> tuple = VisitExpr(op->tuple);
     CHECK_EQ(tuple_type->fields.size(), tuple.size());
     CHECK_GE(op->index, 0);
     CHECK_LT(static_cast<size_t>(op->index), tuple.size());
@@ -330,28 +330,28 @@ class ScheduleGetter :
   Attrs master_attrs_;
   int master_op_pattern_{0};
   std::ostringstream readable_name_stream_;
-  std::unordered_map<Expr, Array<Tensor>, ObjectHash, ObjectEqual> memo_;
-  Array<Operation> scalars_;
+  std::unordered_map<Expr, Array<top::Tensor>, ObjectHash, ObjectEqual> memo_;
+  Array<top::Operation> scalars_;
   // Cache device copy op for equivalence checking to reduce registry lookup
   // overhead for each invocation of call node when retrieving schedules.
   const Op& device_copy_op_;
 };
 
 // Creates shape function from functor.
-class MakeShapeFunc : public ExprFunctor<Array<Tensor>(const Expr&)> {
+class MakeShapeFunc : public ExprFunctor<Array<top::Tensor>(const Expr&)> {
  public:
   MakeShapeFunc() {}
 
-  std::pair<Schedule, CachedFunc> Create(const Function& prim_func) {
+  std::pair<top::Schedule, CachedFunc> Create(const Function& prim_func) {
     for (auto param : prim_func->params) {
       param_states_[param] = kNoNeed;
-      Array<tvm::Tensor> data_inputs;
-      Array<tvm::Tensor> shape_inputs;
+      Array<tvm::top::Tensor> data_inputs;
+      Array<tvm::top::Tensor> shape_inputs;
 
       auto add_placeholder = [&data_inputs, &shape_inputs](const TensorTypeNode* ttype) {
         // Add data placeholder
         Shape shape = GetShape(ttype->shape);
-        tvm::Tensor data_tensor = tvm::placeholder(shape, ttype->dtype);
+        tvm::top::Tensor data_tensor = tvm::top::placeholder(shape, ttype->dtype);
         data_inputs.push_back(data_tensor);
         // Add shape placeholder
         int64_t ndim = shape.size();
@@ -359,7 +359,7 @@ class MakeShapeFunc : public ExprFunctor<Array<Tensor>(const Expr&)> {
         if (ndim > 0) {
           sshape.push_back(tvm::Integer(ndim));
         }
-        tvm::Tensor shape_tensor = tvm::placeholder(sshape, DataType::Int(64));
+        tvm::top::Tensor shape_tensor = tvm::top::placeholder(sshape, DataType::Int(64));
         shape_inputs.push_back(shape_tensor);
       };
 
@@ -410,12 +410,12 @@ class MakeShapeFunc : public ExprFunctor<Array<Tensor>(const Expr&)> {
 
     CachedFunc cfunc(cache_node);
     // generate schedule for shape func
-    Array<Operation> out_ops;
+    Array<top::Operation> out_ops;
     for (auto t : cache_node->outputs) {
       out_ops.push_back(t->op);
     }
-    auto schedule = create_schedule(out_ops);
-    tvm::schedule::AutoInlineInjective(schedule);
+    auto schedule = top::create_schedule(out_ops);
+    tvm::top::AutoInlineInjective(schedule);
     for (const auto& scalar : scalars_) {
       auto scalar_op = scalar->op;
       if (schedule->Contain(scalar_op)) {
@@ -425,12 +425,12 @@ class MakeShapeFunc : public ExprFunctor<Array<Tensor>(const Expr&)> {
     return std::make_pair(schedule, cfunc);
   }
 
-  Array<Tensor> VisitExpr(const Expr& expr) {
+  Array<top::Tensor> VisitExpr(const Expr& expr) {
     auto it = memo_.find(expr);
     if (it != memo_.end()) {
       return it->second;
     } else {
-      Array<Tensor> res = ExprFunctor::VisitExpr(expr);
+      Array<top::Tensor> res = ExprFunctor::VisitExpr(expr);
       if (expr.as<VarNode>() == nullptr) {
         // Do not memoize vars because shape functions could use either the data
         // or the shape of a var each time.
@@ -440,7 +440,7 @@ class MakeShapeFunc : public ExprFunctor<Array<Tensor>(const Expr&)> {
     }
   }
 
-  Array<Tensor> VisitExpr_(const VarNode* var_node) final {
+  Array<top::Tensor> VisitExpr_(const VarNode* var_node) final {
     auto var = GetRef<Var>(var_node);
     auto it = param_states_.find(var);
     if (it == param_states_.end()) {
@@ -459,14 +459,14 @@ class MakeShapeFunc : public ExprFunctor<Array<Tensor>(const Expr&)> {
     }
   }
 
-  Array<Tensor> VisitExpr_(const ConstantNode* op) final {
+  Array<top::Tensor> VisitExpr_(const ConstantNode* op) final {
     CHECK(data_dependants_.size());
     CHECK(op->is_scalar());
     bool data_dependant = data_dependants_.back();
     if (data_dependant) {
       void* data = op->data->data;
       DataType dtype = DataType(op->data->dtype);
-      Tensor value = tvm::compute({}, [&](const Array<tvm::Var>&) {
+      auto value = tvm::top::compute({}, [&](const Array<tvm::Var>&) {
           if (dtype == DataType::Int(32)) {
             return make_const(dtype, static_cast<const int32_t*>(data)[0]);
           } else if (dtype == DataType::Int(64)) {
@@ -485,7 +485,7 @@ class MakeShapeFunc : public ExprFunctor<Array<Tensor>(const Expr&)> {
       scalars_.push_back(value);
       return {value};
     } else {
-      Tensor value = tvm::compute({}, [&](const Array<tvm::Var>&) {
+      auto value = tvm::top::compute({}, [&](const Array<tvm::Var>&) {
           return make_const(DataType::Int(64), 0);
       }, "shape_const", topi::kBroadcast);
       scalars_.push_back(value);
@@ -493,7 +493,7 @@ class MakeShapeFunc : public ExprFunctor<Array<Tensor>(const Expr&)> {
     }
   }
 
-  Array<Tensor> VisitExpr_(const CallNode* call_node) final {
+  Array<top::Tensor> VisitExpr_(const CallNode* call_node) final {
     static auto fshape_func = Op::GetAttr<FShapeFunc>("FShapeFunc");
     static auto tshape_data_dependant = Op::GetAttr<TShapeDataDependant>(
         "TShapeDataDependant");
@@ -510,13 +510,13 @@ class MakeShapeFunc : public ExprFunctor<Array<Tensor>(const Expr&)> {
 
     data_dependants_.push_back(tshape_data_dependant[op]);
     // Visit all inputs
-    Array<Tensor> inputs;
+    Array<top::Tensor> inputs;
     int count_tuple = 0;
     for (Expr arg : call_node->args) {
       if (arg->checked_type().as<TupleTypeNode>()) {
         ++count_tuple;
       }
-      for (Tensor tensor : VisitExpr(arg)) {
+      for (top::Tensor tensor : VisitExpr(arg)) {
         inputs.push_back(tensor);
       }
     }
@@ -546,24 +546,24 @@ class MakeShapeFunc : public ExprFunctor<Array<Tensor>(const Expr&)> {
     return outputs;
   }
 
-  Array<Tensor> VisitExpr_(const FunctionNode* op) final {
+  Array<top::Tensor> VisitExpr_(const FunctionNode* op) final {
     LOG(FATAL) << "Do not support sub function";
-    return Array<Tensor>();
+    return Array<top::Tensor>();
   }
 
-  Array<Tensor> VisitExpr_(const LetNode* op) final {
-    Array<Tensor> val = VisitExpr(op->value);
+  Array<top::Tensor> VisitExpr_(const LetNode* op) final {
+    Array<top::Tensor> val = VisitExpr(op->value);
     CHECK(!memo_.count(op->var));
     memo_[op->var] = val;
     return VisitExpr(op->body);
   }
 
-  Array<Tensor> VisitExpr_(const TupleNode* op) final {
-    Array<Tensor> fields;
+  Array<top::Tensor> VisitExpr_(const TupleNode* op) final {
+    Array<top::Tensor> fields;
     for (Expr field : op->fields) {
       CHECK(field->checked_type().as<TensorTypeNode>())
         << "Only allow Tuple of Tensor";
-      Array<Tensor> res = VisitExpr(field);
+      Array<top::Tensor> res = VisitExpr(field);
       CHECK_EQ(res.size(), 1);
       fields.push_back(res[0]);
     }
@@ -576,15 +576,15 @@ class MakeShapeFunc : public ExprFunctor<Array<Tensor>(const Expr&)> {
   /*! \brief Map from parameter to its shape function usage state */
   std::unordered_map<Expr, int, ObjectHash, ObjectEqual> param_states_;
   /*! \brief Map from parameter to list of data placeholder */
-  std::unordered_map<Expr, Array<Tensor>, ObjectHash, ObjectEqual> param_data_;
+  std::unordered_map<Expr, Array<top::Tensor>, ObjectHash, ObjectEqual> param_data_;
   /*! \brief Map from parameter to list of shape placeholder */
-  std::unordered_map<Expr, Array<Tensor>, ObjectHash, ObjectEqual> param_shapes_;
+  std::unordered_map<Expr, Array<top::Tensor>, ObjectHash, ObjectEqual> param_shapes_;
   /*! \brief Memoized visit result */
-  std::unordered_map<Expr, Array<Tensor>, ObjectHash, ObjectEqual> memo_;
+  std::unordered_map<Expr, Array<top::Tensor>, ObjectHash, ObjectEqual> memo_;
   /*! \brief Stack of data dependencies for shape function */
   std::vector<bool> data_dependants_;
   /*! \brief Scalars used in the shape function */
-  Array<Tensor> scalars_;
+  Array<top::Tensor> scalars_;
 };
 
 class CompileEngineImpl : public CompileEngineNode {
@@ -672,7 +672,7 @@ class CompileEngineImpl : public CompileEngineNode {
    * \return Pair of schedule and cache.
    *  The funcs field in cache is not yet populated.
    */
-  std::pair<Schedule, CachedFunc> CreateSchedule(
+  std::pair<top::Schedule, CachedFunc> CreateSchedule(
       const Function& source_func, const Target& target) {
     return ScheduleGetter(target).Create(source_func);
   }
@@ -723,8 +723,8 @@ class CompileEngineImpl : public CompileEngineNode {
 
     cache_node->func_name = GetUniqueName(cache_node->func_name);
     // NOTE: array will copy on write.
-    Array<Tensor> all_args = cache_node->inputs;
-    for (Tensor arg : cache_node->outputs) {
+    Array<top::Tensor> all_args = cache_node->inputs;
+    for (top::Tensor arg : cache_node->outputs) {
       all_args.push_back(arg);
     }
     // lower the function
@@ -733,7 +733,7 @@ class CompileEngineImpl : public CompileEngineNode {
           spair.first, all_args, cache_node->func_name, key->source_func);
     } else {
       tvm::BuildConfig bcfg = BuildConfig::Create();
-      std::unordered_map<Tensor, Buffer> binds;
+      std::unordered_map<top::Tensor, Buffer> binds;
       cache_node->funcs = tvm::lower(spair.first, all_args, cache_node->func_name, binds, bcfg);
     }
     value->cached_func = CachedFunc(cache_node);
@@ -763,12 +763,12 @@ class CompileEngineImpl : public CompileEngineNode {
     cache_node->func_name = GetUniqueName(cache_node->func_name);
     cache_node->target = key->target;
 
-    Array<Tensor> all_args = cache_node->inputs;
-    for (Tensor arg : cache_node->outputs) {
+    Array<top::Tensor> all_args = cache_node->inputs;
+    for (top::Tensor arg : cache_node->outputs) {
       all_args.push_back(arg);
     }
     tvm::BuildConfig bcfg = BuildConfig::Create();
-    std::unordered_map<Tensor, Buffer> binds;
+    std::unordered_map<top::Tensor, Buffer> binds;
     cache_node->funcs = tvm::lower(spair.first, all_args, cache_node->func_name, binds, bcfg);
     value->cached_func = CachedFunc(cache_node);
     return value;
index f6c38ba..386eba7 100644 (file)
@@ -51,9 +51,9 @@ struct CachedFuncNode : public Object {
   /*! \brief Function name */
   std::string func_name;
   /* \brief The inputs to the function */
-  tvm::Array<Tensor> inputs;
+  tvm::Array<top::Tensor> inputs;
   /* \brief The outputs to the function */
-  tvm::Array<Tensor> outputs;
+  tvm::Array<top::Tensor> outputs;
   /*! \brief The lowered functions to support the function. */
   tvm::Array<tvm::LoweredFunc> funcs;
   /*! \brief Parameter usage states in the shape function. */
index 3ef7403..7958368 100644 (file)
@@ -30,7 +30,7 @@
 #include <tvm/build_module.h>
 #include <tvm/codegen.h>
 #include <tvm/ir_pass.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 
 #include <typeinfo>
 #include <string>
index e4a34a3..00e47bc 100644 (file)
@@ -22,7 +22,7 @@
  * \brief A compiler from relay::Module to the VM byte code.
  */
 
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/ir/error.h>
 #include <tvm/relay/expr_functor.h>
 #include <tvm/relay/interpreter.h>
index 3d03f88..2aefbd7 100644 (file)
@@ -78,8 +78,8 @@ TVM_ADD_FILELINE)
 .set_attr<TOpIsStateful>("TOpIsStateful", false)
 .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
 .set_attr<FTVMCompute>("FTVMCompute",
-                       [](const Attrs& attrs, const Array<Tensor>& inputs,
-                          const Type& out_dtype, const Target& target) -> Array<Tensor> {
+                       [](const Attrs& attrs, const Array<top::Tensor>& inputs,
+                          const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
                          return {topi::identity(inputs[0])};
                        });
 
@@ -104,8 +104,8 @@ TVM_ADD_FILELINE)
 .set_attr<TOpIsStateful>("TOpIsStateful", false)
 .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
 .set_attr<FTVMCompute>("FTVMCompute",
-                       [](const Attrs& attrs, const Array<Tensor>& inputs,
-                          const Type& out_dtype, const Target& target) -> Array<Tensor> {
+                       [](const Attrs& attrs, const Array<top::Tensor>& inputs,
+                          const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
                          return {topi::identity(inputs[0])};
                        });
 
@@ -122,8 +122,8 @@ Mark the start of bitpacking.
 .set_attr<FInferCorrectLayout>("FInferCorrectLayout",
                                ElemwiseArbitraryLayout)
 .set_attr<FTVMCompute>("FTVMCompute",
-                       [](const Attrs& attrs, const Array<Tensor>& inputs,
-                          const Type& out_dtype, const Target& target) -> Array<Tensor> {
+                       [](const Attrs& attrs, const Array<top::Tensor>& inputs,
+                          const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
                          return {topi::identity(inputs[0])};
                        });
 
@@ -139,8 +139,8 @@ Mark the end of bitpacking.
 .set_attr<FInferCorrectLayout>("FInferCorrectLayout",
                                ElemwiseArbitraryLayout)
 .set_attr<FTVMCompute>("FTVMCompute",
-                       [](const Attrs& attrs, const Array<Tensor>& inputs,
-                          const Type& out_dtype, const Target& target) -> Array<Tensor> {
+                       [](const Attrs& attrs, const Array<top::Tensor>& inputs,
+                          const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
                          return {topi::identity(inputs[0])};
                        });
 
@@ -162,9 +162,9 @@ Mark a checkpoint for checkpointing memory optimization.
 .set_attr<FInferCorrectLayout>("FInferCorrectLayout",
                                ElemwiseArbitraryLayout)
 .set_attr<FTVMCompute>("FTVMCompute",
-                       [](const Attrs& attrs, const Array<Tensor>& inputs,
-                          const Type& out_dtype, const Target& target) -> Array<Tensor> {
-                         Array<Tensor> outputs;
+                       [](const Attrs& attrs, const Array<top::Tensor>& inputs,
+                          const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
+                         Array<top::Tensor> outputs;
                          for (size_t i = 0; i < inputs.size(); ++i) {
                            outputs.push_back(topi::identity(inputs[i]));
                          }
@@ -183,8 +183,8 @@ Beginning of a region that is handled by a given compiler.
 .set_attr<FInferCorrectLayout>("FInferCorrectLayout",
                                ElemwiseArbitraryLayout)
 .set_attr<FTVMCompute>("FTVMCompute",
-                       [](const Attrs& attrs, const Array<Tensor>& inputs,
-                          const Type& out_dtype, const Target& target) -> Array<Tensor> {
+                       [](const Attrs& attrs, const Array<top::Tensor>& inputs,
+                          const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
                          return {topi::identity(inputs[0])};
                        });
 
@@ -208,8 +208,8 @@ End of a region that is handled by a given compiler.
 .set_attr<FInferCorrectLayout>("FInferCorrectLayout",
                                ElemwiseArbitraryLayout)
 .set_attr<FTVMCompute>("FTVMCompute",
-                       [](const Attrs& attrs, const Array<Tensor>& inputs,
-                          const Type& out_dtype, const Target& target) -> Array<Tensor> {
+                       [](const Attrs& attrs, const Array<top::Tensor>& inputs,
+                          const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
                          return {topi::identity(inputs[0])};
                        });
 
index cdfdac0..87e5797 100644 (file)
@@ -35,11 +35,11 @@ namespace relay {
 
 TVM_REGISTER_NODE_TYPE(DebugAttrs);
 
-Array<Tensor> DebugCompute(const Attrs& attrs,
-                           const Array<Tensor>& inputs,
+Array<top::Tensor> DebugCompute(const Attrs& attrs,
+                           const Array<top::Tensor>& inputs,
                            const Type& out_type,
                            const Target& target) {
-  return Array<Tensor>{ topi::identity(inputs[0]) };
+  return Array<top::Tensor>{ topi::identity(inputs[0]) };
 }
 
 RELAY_REGISTER_OP("debug")
index bd3b543..6c4b3ea 100644 (file)
@@ -82,8 +82,8 @@ RELAY_REGISTER_OP("memory.alloc_storage")
     .set_attr<TNonComputational>("TNonComputational", true)
     .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
     .set_attr<FTVMCompute>("FTVMCompute",
-                           [](const Attrs& attrs, const Array<Tensor>& inputs,
-                              const Type& out_dtype, const Target& target) -> Array<Tensor> {
+                           [](const Attrs& attrs, const Array<top::Tensor>& inputs,
+                              const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
                              return {topi::identity(inputs[0])};
                            });
 
@@ -178,8 +178,8 @@ RELAY_REGISTER_OP("memory.alloc_tensor")
     .set_attr<TNonComputational>("TNonComputational", true)
     .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
     .set_attr<FTVMCompute>("FTVMCompute",
-                           [](const Attrs& attrs, const Array<Tensor>& inputs,
-                              const Type& out_dtype, const Target& target) -> Array<Tensor> {
+                           [](const Attrs& attrs, const Array<top::Tensor>& inputs,
+                              const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
                              return {topi::identity(inputs[0])};
                            });
 
@@ -227,8 +227,8 @@ RELAY_REGISTER_OP("memory.invoke_tvm_op")
     .set_attr<TNonComputational>("TNonComputational", true)
     .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
     .set_attr<FTVMCompute>("FTVMCompute",
-                           [](const Attrs& attrs, const Array<Tensor>& inputs,
-                              const Type& out_dtype, const Target& target) -> Array<Tensor> {
+                           [](const Attrs& attrs, const Array<top::Tensor>& inputs,
+                              const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
                              return {topi::identity(inputs[0])};
                            });
 
@@ -251,8 +251,8 @@ RELAY_REGISTER_OP("memory.kill")
     .set_attr<TNonComputational>("TNonComputational", true)
     .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
     .set_attr<FTVMCompute>("FTVMCompute",
-                           [](const Attrs& attrs, const Array<Tensor>& inputs,
-                              const Type& out_dtype, const Target& target) -> Array<Tensor> {
+                           [](const Attrs& attrs, const Array<top::Tensor>& inputs,
+                              const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
                              return {topi::identity(inputs[0])};
                            });
 
@@ -339,8 +339,8 @@ RELAY_REGISTER_OP("memory.shape_func")
     .set_attr<TNonComputational>("TNonComputational", true)
     .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
     .set_attr<FTVMCompute>("FTVMCompute",
-                           [](const Attrs& attrs, const Array<Tensor>& inputs,
-                              const Type& out_dtype, const Target& target) -> Array<Tensor> {
+                           [](const Attrs& attrs, const Array<top::Tensor>& inputs,
+                              const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
                              return {topi::identity(inputs[0])};
                            });
 
index f1d7111..9fca22d 100644 (file)
@@ -93,10 +93,10 @@ RELAY_REGISTER_OP("nn.bias_add")
 .add_argument("bias", "1D Tensor", "Bias.")
 .set_support_level(1)
 .add_type_rel("BiasAdd", BiasAddRel)
-.set_attr<FTVMCompute>("FTVMCompute", [](const Attrs& attrs, const Array<Tensor>& inputs,
+.set_attr<FTVMCompute>("FTVMCompute", [](const Attrs& attrs, const Array<top::Tensor>& inputs,
                                         const Type& out_type, const Target& target) {
     const auto* param = attrs.as<BiasAddAttrs>();
-    return tvm::Array<tvm::Tensor>{topi::nn::bias_add(inputs[0], inputs[1], param->axis)};
+    return tvm::Array<tvm::top::Tensor>{topi::nn::bias_add(inputs[0], inputs[1], param->axis)};
 });
 
 
@@ -233,11 +233,11 @@ RELAY_REGISTER_OP("nn.leaky_relu")
 .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
 .set_attr<FTVMCompute>(
   "FTVMCompute", [](const Attrs& attrs,
-                    const Array<Tensor>& inputs,
+                    const Array<top::Tensor>& inputs,
                     const Type& out_type,
                     const Target& target) {
     const auto* param = attrs.as<LeakyReluAttrs>();
-    return Array<Tensor>{ topi::leaky_relu(inputs[0], param->alpha) };
+    return Array<top::Tensor>{ topi::leaky_relu(inputs[0], param->alpha) };
 });
 
 
@@ -314,11 +314,11 @@ where :math:`*` is an channelwise multiplication for each sample in the batch.
 .set_attr<FInferCorrectLayout>("FInferCorrectLayout", PReluInferCorrectLayout<PReluAttrs>)
 .set_attr<FTVMCompute>(
   "FTVMCompute", [](const Attrs& attrs,
-                    const Array<Tensor>& inputs,
+                    const Array<top::Tensor>& inputs,
                     const Type& out_type,
                     const Target& target) {
     const auto* param = attrs.as<PReluAttrs>();
-    return Array<Tensor>{ topi::prelu(inputs[0], inputs[1], param->axis)};
+    return Array<top::Tensor>{ topi::prelu(inputs[0], inputs[1], param->axis)};
 });
 
 
@@ -350,12 +350,12 @@ RELAY_REGISTER_OP("nn.softmax")
 .set_support_level(1)
 .add_type_rel("Identity", IdentityRel)
 .set_attr<FTVMCompute>("FTVMCompute", [](const Attrs& attrs,
-                                         const Array<Tensor>& inputs,
+                                         const Array<top::Tensor>& inputs,
                                          const Type& out_type,
                                          const Target& target) {
   const auto* param = attrs.as<SoftmaxAttrs>();
   CHECK(param != nullptr);
-  return Array<Tensor>{ topi::nn::softmax(inputs[0], param->axis) };
+  return Array<top::Tensor>{ topi::nn::softmax(inputs[0], param->axis) };
 });
 
 
@@ -384,14 +384,14 @@ RELAY_REGISTER_OP("nn.log_softmax")
 .set_support_level(1)
 .add_type_rel("Identity", IdentityRel)
 .set_attr<FTVMCompute>("FTVMCompute", [](const Attrs& attrs,
-                                         const Array<Tensor>& inputs,
+                                         const Array<top::Tensor>& inputs,
                                          const Type& out_type,
                                          const Target& target) {
   const auto* param = attrs.as<SoftmaxAttrs>();
   CHECK(param != nullptr);
   CHECK(param->axis == -1 || param->axis == static_cast<int32_t>(inputs[0].ndim()) - 1)
       << "log_softmax currently only works on last dimension";
-  return Array<Tensor>{ topi::nn::log_softmax(inputs[0]) };
+  return Array<top::Tensor>{ topi::nn::log_softmax(inputs[0]) };
 });
 
 
@@ -461,10 +461,10 @@ Example::
 .add_type_rel("BatchFlatten", BatchFlattenRel)
 .set_attr<FTVMCompute>(
   "FTVMCompute", [](const Attrs& attrs,
-                    const Array<Tensor>& inputs,
+                    const Array<top::Tensor>& inputs,
                     const Type& out_type,
                     const Target& target) {
-    return Array<Tensor>{ topi::nn::flatten(inputs[0]) };
+    return Array<top::Tensor>{ topi::nn::flatten(inputs[0]) };
 });
 
 
@@ -488,10 +488,10 @@ RELAY_REGISTER_OP("nn.relu")
 .add_type_rel("Identity", IdentityRel)
 .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
 .set_attr<FTVMCompute>("FTVMCompute", [](const Attrs& attrs,
-                                         const Array<Tensor>& inputs,
+                                         const Array<top::Tensor>& inputs,
                                          const Type& out_type,
                                          const Target& target) {
-  return Array<Tensor>{ topi::relu(inputs[0], 0.0f) };
+  return Array<top::Tensor>{ topi::relu(inputs[0], 0.0f) };
 });
 
 
index 72ea70f..b67f939 100644 (file)
@@ -160,8 +160,8 @@ bool PadRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> PadCompute(const Attrs& attrs,
-                         const Array<Tensor>& inputs,
+Array<top::Tensor> PadCompute(const Attrs& attrs,
+                         const Array<top::Tensor>& inputs,
                          const Type& out_type,
                          const Target& target) {
   const auto* param = attrs.as<PadAttrs>();
@@ -180,7 +180,7 @@ Array<Tensor> PadCompute(const Attrs& attrs,
     pad_after.push_back(pad_width[i][1]);
   }
   const auto* out_ttype = out_type.as<TensorTypeNode>();
-  return Array<Tensor>{ topi::pad(inputs[0], pad_before, pad_after,
+  return Array<top::Tensor>{ topi::pad(inputs[0], pad_before, pad_after,
                                   tvm::make_const(out_ttype->dtype, param->pad_value),
                                   "T_pad",
                                   topi::kElementWise,
index 6605225..65fd09d 100644 (file)
@@ -166,8 +166,8 @@ bool Pool2DRel(const Array<Type>& types,
 }
 
 template<typename AttrType, topi::nn::PoolType mode>
-Array<Tensor> Pool2DCompute(const Attrs& attrs,
-                            const Array<Tensor>& inputs,
+Array<top::Tensor> Pool2DCompute(const Attrs& attrs,
+                            const Array<top::Tensor>& inputs,
                             const Type& out_type,
                             const Target& target) {
   static const Layout kNCHW("NCHW");
@@ -203,11 +203,11 @@ Array<Tensor> Pool2DCompute(const Attrs& attrs,
   }
   if (mode == topi::nn::kAvgPool) {
     bool count_include_pad = reinterpret_cast<const AvgPool2DAttrs*>(param)->count_include_pad;
-    return Array<Tensor>{
+    return Array<top::Tensor>{
       topi::nn::pool(inputs[0], pool_size, strides, padding,
                      mode, ceil_mode, layout.name(), count_include_pad)};
   } else {
-    return Array<Tensor>{
+    return Array<top::Tensor>{
       topi::nn::pool(inputs[0], pool_size, strides, padding,
                      mode, ceil_mode, layout.name())};
   }
@@ -333,8 +333,8 @@ bool GlobalPool2DRel(const Array<Type>& types,
 
 
 template<topi::nn::PoolType mode>
-Array<Tensor> GlobalPool2DCompute(const Attrs& attrs,
-                                  const Array<Tensor>& inputs,
+Array<top::Tensor> GlobalPool2DCompute(const Attrs& attrs,
+                                  const Array<top::Tensor>& inputs,
                                   const Type& out_type,
                                   const Target& target) {
   static const Layout kNCHW("NCHW");
@@ -351,7 +351,7 @@ Array<Tensor> GlobalPool2DCompute(const Attrs& attrs,
   CHECK(inputs[0].ndim() == 4U || inputs[0].ndim() == 5U)
     << "Pool2D only support 4-D input (e.g., NCHW)"
     << " or 5-D input (last dimension is a split of channel)";
-  return Array<Tensor>{
+  return Array<top::Tensor>{
     topi::nn::global_pool(inputs[0], mode, layout.name()) };
 }
 
@@ -467,8 +467,8 @@ bool AdaptivePool2DRel(const Array<Type>& types,
 }
 
 template<topi::nn::PoolType mode>
-Array<Tensor> AdaptivePool2DCompute(const Attrs& attrs,
-                                    const Array<Tensor>& inputs,
+Array<top::Tensor> AdaptivePool2DCompute(const Attrs& attrs,
+                                    const Array<top::Tensor>& inputs,
                                     const Type& out_type,
                                     const Target& target) {
   static const Layout kNCHW("NCHW");
@@ -500,7 +500,7 @@ Array<Tensor> AdaptivePool2DCompute(const Attrs& attrs,
     output_height = output_size[0];
     output_width = output_size[1];
   }
-  return Array<Tensor>{
+  return Array<top::Tensor>{
     topi::nn::adaptive_pool(inputs[0], Array<IndexExpr>{ output_height, output_width },
                             mode, layout.name()) };
 }
@@ -596,7 +596,7 @@ bool Pool2DGradRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
 }
 
 template <typename AttrType, topi::nn::PoolType mode>
-Array<Tensor> Pool2DGradCompute(const Attrs& attrs, const Array<Tensor>& inputs,
+Array<top::Tensor> Pool2DGradCompute(const Attrs& attrs, const Array<top::Tensor>& inputs,
                                 const Type& out_type, const Target& target) {
   static const Layout kNCHW("NCHW");
   const auto* param = attrs.as<AttrType>();
@@ -633,10 +633,10 @@ Array<Tensor> Pool2DGradCompute(const Attrs& attrs, const Array<Tensor>& inputs,
   }
   if (mode == topi::nn::kAvgPool) {
     bool count_include_pad = reinterpret_cast<const AvgPool2DAttrs*>(param)->count_include_pad;
-    return Array<Tensor>{topi::nn::pool_grad(inputs[0], inputs[1], pool_size, strides, padding,
+    return Array<top::Tensor>{topi::nn::pool_grad(inputs[0], inputs[1], pool_size, strides, padding,
         mode, ceil_mode, layout.name(), count_include_pad)};
   } else {
-    return Array<Tensor>{topi::nn::pool_grad(inputs[0], inputs[1], pool_size, strides, padding,
+    return Array<top::Tensor>{topi::nn::pool_grad(inputs[0], inputs[1], pool_size, strides, padding,
         mode, ceil_mode, layout.name())};
   }
 }
@@ -798,8 +798,8 @@ bool Pool1DRel(const Array<Type>& types,
 
 
 template<typename AttrType, topi::nn::PoolType mode>
-Array<Tensor> Pool1DCompute(const Attrs& attrs,
-                            const Array<Tensor>& inputs,
+Array<top::Tensor> Pool1DCompute(const Attrs& attrs,
+                            const Array<top::Tensor>& inputs,
                             const Type& out_type,
                             const Target& target) {
   static const Layout kNCW("NCW");
@@ -825,11 +825,11 @@ Array<Tensor> Pool1DCompute(const Attrs& attrs,
 
   if (mode == topi::nn::kAvgPool) {
     bool count_include_pad = reinterpret_cast<const AvgPool1DAttrs*>(param)->count_include_pad;
-    return Array<Tensor>{
+    return Array<top::Tensor>{
       topi::nn::pool1d(inputs[0], pool_size, strides, padding,
                        mode, ceil_mode, layout.name(), count_include_pad)};
   } else {
-    return Array<Tensor>{
+    return Array<top::Tensor>{
       topi::nn::pool1d(inputs[0], pool_size, strides, padding,
                        mode, ceil_mode, layout.name())};
   }
@@ -993,8 +993,8 @@ bool Pool3DRel(const Array<Type>& types,
 
 
 template<typename AttrType, topi::nn::PoolType mode>
-Array<Tensor> Pool3DCompute(const Attrs& attrs,
-                            const Array<Tensor>& inputs,
+Array<top::Tensor> Pool3DCompute(const Attrs& attrs,
+                            const Array<top::Tensor>& inputs,
                             const Type& out_type,
                             const Target& target) {
   static const Layout kNCDHW("NCDHW");
@@ -1033,11 +1033,11 @@ Array<Tensor> Pool3DCompute(const Attrs& attrs,
   }
   if (mode == topi::nn::kAvgPool) {
     bool count_include_pad = reinterpret_cast<const AvgPool3DAttrs*>(param)->count_include_pad;
-    return Array<Tensor>{
+    return Array<top::Tensor>{
       topi::nn::pool3d(inputs[0], pool_size, strides, padding,
                        mode, ceil_mode, layout.name(), count_include_pad)};
   } else {
-    return Array<Tensor>{
+    return Array<top::Tensor>{
       topi::nn::pool3d(inputs[0], pool_size, strides, padding,
                        mode, ceil_mode, layout.name())};
   }
index f2f8996..00ebddb 100644 (file)
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- * 
+ *
  *   http://www.apache.org/licenses/LICENSE-2.0
- * 
+ *
  * Unless required by applicable law or agreed to in writing,
  * software distributed under the License is distributed on an
  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -32,9 +32,9 @@ namespace relay {
 
 #define RELAY_BINARY_COMPUTE(FTOPI)                        \
   [] (const Attrs& attrs,                                  \
-      const Array<Tensor>& inputs,                         \
+      const Array<top::Tensor>& inputs,                         \
       const Type& out_type,                                \
-      const Target& target) -> Array<Tensor> {             \
+      const Target& target) -> Array<top::Tensor> {             \
     CHECK_EQ(inputs.size(), 2U);                           \
     return {FTOPI(inputs[0], inputs[1])};                  \
   }                                                        \
index dde3ef2..e20b7cf 100644 (file)
@@ -173,8 +173,8 @@ Array<Array<Layout>> ReduceInferCorrectLayout(const Attrs& attrs,
 }
 
 template<typename F>
-Array<Tensor> ReduceCompute(const Attrs& attrs,
-                            const Array<Tensor>& inputs,
+Array<top::Tensor> ReduceCompute(const Attrs& attrs,
+                            const Array<top::Tensor>& inputs,
                             const Type& out_type,
                             const Target& target,
                             F f) {
@@ -320,8 +320,8 @@ bool ReduceRel(const Array<Type>& types,
   .add_argument("data", "Tensor", "The input tensor.")
 
 
-Array<Tensor> ArgMaxCompute(const Attrs& attrs,
-                            const Array<Tensor>& inputs,
+Array<top::Tensor> ArgMaxCompute(const Attrs& attrs,
+                            const Array<top::Tensor>& inputs,
                             const Type& out_type,
                             const Target& target) {
   return ReduceCompute(attrs, inputs, out_type, target, topi::argmax);
@@ -340,8 +340,8 @@ values over a given axis.
 .set_attr<TOpPattern>("TOpPattern", kCommReduce);
 
 
-Array<Tensor> ArgMinCompute(const Attrs& attrs,
-                            const Array<Tensor>& inputs,
+Array<top::Tensor> ArgMinCompute(const Attrs& attrs,
+                            const Array<top::Tensor>& inputs,
                             const Type& out_type,
                             const Target& target) {
   return ReduceCompute(attrs, inputs, out_type, target, topi::argmin);
@@ -358,8 +358,8 @@ values over a given axis.
 .set_attr<FTVMCompute>("FTVMCompute", ArgMinCompute)
 .set_attr<TOpPattern>("TOpPattern", kCommReduce);
 
-Array<Tensor> SumCompute(const Attrs& attrs,
-                         const Array<Tensor>& inputs,
+Array<top::Tensor> SumCompute(const Attrs& attrs,
+                         const Array<top::Tensor>& inputs,
                          const Type& out_type,
                          const Target& target) {
   return ReduceCompute(attrs, inputs, out_type, target, topi::sum);
@@ -392,8 +392,8 @@ Example::
 .set_attr<TOpPattern>("TOpPattern", kCommReduce);
 
 
-Array<Tensor> AllCompute(const Attrs& attrs,
-                         const Array<Tensor>& inputs,
+Array<top::Tensor> AllCompute(const Attrs& attrs,
+                         const Array<top::Tensor>& inputs,
                          const Type& out_type,
                          const Target& target) {
   return ReduceCompute(attrs, inputs, out_type, target, topi::all);
@@ -429,8 +429,8 @@ Example::
 .set_attr<TOpPattern>("TOpPattern", kCommReduce);
 
 
-Array<Tensor> AnyCompute(const Attrs& attrs,
-                         const Array<Tensor>& inputs,
+Array<top::Tensor> AnyCompute(const Attrs& attrs,
+                         const Array<top::Tensor>& inputs,
                          const Type& out_type,
                          const Target& target) {
   return ReduceCompute(attrs, inputs, out_type, target, topi::any);
@@ -466,8 +466,8 @@ Example::
 .set_attr<TOpPattern>("TOpPattern", kCommReduce);
 
 
-Array<Tensor> MaxCompute(const Attrs& attrs,
-                         const Array<Tensor>& inputs,
+Array<top::Tensor> MaxCompute(const Attrs& attrs,
+                         const Array<top::Tensor>& inputs,
                          const Type& out_type,
                          const Target& target) {
   return ReduceCompute(attrs, inputs, out_type, target, topi::max);
@@ -484,8 +484,8 @@ RELAY_REGISTER_REDUCE_OP("max")
 .set_attr<TOpPattern>("TOpPattern", kCommReduce);
 
 
-Array<Tensor> MinCompute(const Attrs& attrs,
-                         const Array<Tensor>& inputs,
+Array<top::Tensor> MinCompute(const Attrs& attrs,
+                         const Array<top::Tensor>& inputs,
                          const Type& out_type,
                          const Target& target) {
   return ReduceCompute(attrs, inputs, out_type, target, topi::min);
@@ -503,8 +503,8 @@ RELAY_REGISTER_REDUCE_OP("min")
 .set_attr<TOpPattern>("TOpPattern", kCommReduce);
 
 
-Array<Tensor> ProdCompute(const Attrs& attrs,
-                          const Array<Tensor>& inputs,
+Array<top::Tensor> ProdCompute(const Attrs& attrs,
+                          const Array<top::Tensor>& inputs,
                           const Type& out_type,
                           const Target& target) {
   return ReduceCompute(attrs, inputs, out_type, target, topi::prod);
@@ -533,8 +533,8 @@ Example::
 .set_attr<TOpPattern>("TOpPattern", kCommReduce);
 
 
-Array<Tensor> MeanCompute(const Attrs& attrs,
-                          const Array<Tensor>& inputs,
+Array<top::Tensor> MeanCompute(const Attrs& attrs,
+                          const Array<top::Tensor>& inputs,
                           const Type& out_type,
                           const Target& target) {
   IndexExpr count = make_const(inputs[0]->dtype, 1);
@@ -598,8 +598,8 @@ bool VarianceRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> VarianceCompute(const Attrs& attrs,
-                              const Array<Tensor>& inputs,
+Array<top::Tensor> VarianceCompute(const Attrs& attrs,
+                              const Array<top::Tensor>& inputs,
                               const Type& out_type,
                               const Target& target) {
   IndexExpr count = make_const(inputs[0]->dtype, 1);
index 59bb432..c2af56d 100644 (file)
@@ -65,8 +65,8 @@ bool CastRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> CastCompute(const Attrs& attrs,
-                          const Array<Tensor>& inputs,
+Array<top::Tensor> CastCompute(const Attrs& attrs,
+                          const Array<top::Tensor>& inputs,
                           const Type& out_type,
                           const Target& target) {
   const CastAttrs *param = attrs.as<CastAttrs>();
@@ -125,8 +125,8 @@ bool CastLikeRel(const Array<Type>& types,
 }
 
 
-Array<Tensor> CastLikeCompute(const Attrs& attrs,
-                              const Array<Tensor>& inputs,
+Array<top::Tensor> CastLikeCompute(const Attrs& attrs,
+                              const Array<top::Tensor>& inputs,
                               const Type& out_type,
                               const Target& target) {
   return { topi::cast(inputs[0], inputs[1]->dtype) };
@@ -156,7 +156,7 @@ RELAY_REGISTER_OP("cast_like")
 .set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout);
 
 
-Array<Tensor> ReinterpretCompute(const Attrs& attrs, const Array<Tensor>& inputs,
+Array<top::Tensor> ReinterpretCompute(const Attrs& attrs, const Array<top::Tensor>& inputs,
                                  const Type& out_type, const Target& target) {
   const CastAttrs* param = attrs.as<CastAttrs>();
   CHECK(param != nullptr);
@@ -230,8 +230,8 @@ bool ExpandDimsRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> ExpandDimsCompute(const Attrs& attrs,
-                                const Array<Tensor>& inputs,
+Array<top::Tensor> ExpandDimsCompute(const Attrs& attrs,
+                                const Array<top::Tensor>& inputs,
                                 const Type& out_type,
                                 const Target& target) {
   const ExpandDimsAttrs *param = attrs.as<ExpandDimsAttrs>();
@@ -269,8 +269,8 @@ RELAY_REGISTER_OP("expand_dims")
 // relay.concatenate
 TVM_REGISTER_NODE_TYPE(ConcatenateAttrs);
 
-Array<Tensor> ConcatenateCompute(const Attrs& attrs,
-                          const Array<Tensor>& inputs,
+Array<top::Tensor> ConcatenateCompute(const Attrs& attrs,
+                          const Array<top::Tensor>& inputs,
                           const Type& out_type,
                           const Target& target) {
   const ConcatenateAttrs *param = attrs.as<ConcatenateAttrs>();
@@ -412,8 +412,8 @@ bool StackRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> StackCompute(const Attrs& attrs,
-                           const Array<Tensor>& inputs,
+Array<top::Tensor> StackCompute(const Attrs& attrs,
+                           const Array<top::Tensor>& inputs,
                            const Type& out_type,
                            const Target& target) {
   const StackAttrs *param = attrs.as<StackAttrs>();
@@ -504,13 +504,13 @@ bool TransposeRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> TransposeCompute(const Attrs& attrs,
-                               const Array<Tensor>& inputs,
+Array<top::Tensor> TransposeCompute(const Attrs& attrs,
+                               const Array<top::Tensor>& inputs,
                                const Type& out_type,
                                const Target& target) {
   const auto* param = attrs.as<TransposeAttrs>();
   CHECK(param != nullptr);
-  return Array<Tensor>{ topi::transpose(inputs[0], param->axes) };
+  return Array<top::Tensor>{ topi::transpose(inputs[0], param->axes) };
 }
 
 Expr MakeTranspose(Expr data,
@@ -687,8 +687,8 @@ bool ReshapeRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> ReshapeCompute(const Attrs& attrs,
-                             const Array<Tensor>& inputs,
+Array<top::Tensor> ReshapeCompute(const Attrs& attrs,
+                             const Array<top::Tensor>& inputs,
                              const Type& out_type,
                              const Target& target) {
   const auto* out_ttype = out_type.as<TensorTypeNode>();
@@ -922,16 +922,16 @@ bool TakeRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> TakeCompute(const Attrs& attrs,
-                          const Array<Tensor>& inputs,
+Array<top::Tensor> TakeCompute(const Attrs& attrs,
+                          const Array<top::Tensor>& inputs,
                           const Type& out_type,
                           const Target& target) {
   const auto* param = attrs.as<TakeAttrs>();
   CHECK(param != nullptr);
   if (!param->axis.defined()) {
-    return Array<Tensor>{ topi::take(inputs[0], inputs[1], param->mode) };
+    return Array<top::Tensor>{ topi::take(inputs[0], inputs[1], param->mode) };
   } else {
-    return Array<Tensor>{ topi::take(inputs[0], inputs[1], param->axis, param->mode) };
+    return Array<top::Tensor>{ topi::take(inputs[0], inputs[1], param->axis, param->mode) };
   }
 }
 
@@ -1009,8 +1009,8 @@ bool FullRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> FullCompute(const Attrs& attrs,
-                          const Array<Tensor>& inputs,
+Array<top::Tensor> FullCompute(const Attrs& attrs,
+                          const Array<top::Tensor>& inputs,
                           const Type& out_type,
                           const Target& target) {
   const auto* out_ttype = out_type.as<TensorTypeNode>();
@@ -1117,8 +1117,8 @@ bool FullLikeRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> FullLikeCompute(const Attrs& attrs,
-                              const Array<Tensor>& inputs,
+Array<top::Tensor> FullLikeCompute(const Attrs& attrs,
+                              const Array<top::Tensor>& inputs,
                               const Type& out_type,
                               const Target& target) {
   return { topi::full_like(inputs[0], inputs[1]()) };
@@ -1217,26 +1217,26 @@ bool ArangeRel(const Array<Type>& types,
   }
 }
 
-inline Tensor DynamicArange(const tvm::Tensor& start,
-                            const tvm::Tensor& stop,
-                            const tvm::Tensor& step,
-                            tvm::DataType dtype,
-                            std::string name = "tensor",
-                            std::string tag = topi::kInjective) {
+inline top::Tensor DynamicArange(const top::Tensor& start,
+                                 const top::Tensor& stop,
+                                 const top::Tensor& step,
+                                 tvm::DataType dtype,
+                                 std::string name = "tensor",
+                                 std::string tag = topi::kInjective) {
   tvm::PrimExpr num_elem = tvm::Var("num_elem");
-  return tvm::compute({num_elem}, [&](const Array<tvm::Var>& indices) {
+  return top::compute({num_elem}, [&](const Array<tvm::Var>& indices) {
     return tvm::cast(dtype, start[0] + step[0] * indices[0]);
   }, name, tag);
 }
 
-Array<Tensor> ArangeCompute(const Attrs& attrs,
-                            const Array<Tensor>& inputs,
-                            const Type& out_type,
-                            const Target& target) {
+Array<top::Tensor> ArangeCompute(const Attrs& attrs,
+                                 const Array<top::Tensor>& inputs,
+                                 const Type& out_type,
+                                 const Target& target) {
   const ArangeAttrs* param = attrs.as<ArangeAttrs>();
-  Tensor start = inputs[0];
-  Tensor stop =  inputs[1];
-  Tensor step = inputs[2];
+  top::Tensor start = inputs[0];
+  top::Tensor stop =  inputs[1];
+  top::Tensor step = inputs[2];
   Array<tvm::PrimExpr> empty = {0};
   return { DynamicArange(start, stop, step, param->dtype) };
 }
@@ -1324,8 +1324,8 @@ bool RepeatRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> RepeatCompute(const Attrs& attrs,
-                            const Array<Tensor>& inputs,
+Array<top::Tensor> RepeatCompute(const Attrs& attrs,
+                            const Array<top::Tensor>& inputs,
                             const Type& out_type,
                             const Target& target) {
   const RepeatAttrs *param = attrs.as<RepeatAttrs>();
@@ -1435,8 +1435,8 @@ bool TileRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> TileCompute(const Attrs& attrs,
-                          const Array<Tensor>& inputs,
+Array<top::Tensor> TileCompute(const Attrs& attrs,
+                          const Array<top::Tensor>& inputs,
                           const Type& out_type,
                           const Target& target) {
   const TileAttrs *param = attrs.as<TileAttrs>();
@@ -1496,8 +1496,8 @@ bool ReverseRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> ReverseCompute(const Attrs& attrs,
-                             const Array<Tensor>& inputs,
+Array<top::Tensor> ReverseCompute(const Attrs& attrs,
+                             const Array<top::Tensor>& inputs,
                              const Type& out_type,
                              const Target& target) {
   const ReverseAttrs *param = attrs.as<ReverseAttrs>();
@@ -1570,8 +1570,8 @@ Expr MakeWhere(const Expr& condition, const Expr& x, const Expr& y) {
   return CallNode::make(op, {condition, x, y});
 }
 
-Array<Tensor> WhereCompute(const Attrs& attrs,
-                           const Array<Tensor>& inputs,
+Array<top::Tensor> WhereCompute(const Attrs& attrs,
+                           const Array<top::Tensor>& inputs,
                            const Type& out_type,
                            const Target& target) {
   return { topi::where(inputs[0], inputs[1], inputs[2]) };
@@ -1687,8 +1687,8 @@ bool SqueezeRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> SqueezeCompute(const Attrs& attrs,
-                             const Array<Tensor>& inputs,
+Array<top::Tensor> SqueezeCompute(const Attrs& attrs,
+                             const Array<top::Tensor>& inputs,
                              const Type& out_type,
                              const Target& target) {
   const SqueezeAttrs *param = attrs.as<SqueezeAttrs>();
@@ -1728,8 +1728,8 @@ Expr MakeCollapseSumLike(Expr data,
   return CallNode::make(op, {data, collapse_type}, Attrs(), {});
 }
 
-Array<Tensor> CollapseSumLikeCompute(const Attrs& attrs,
-                                     const Array<Tensor>& inputs,
+Array<top::Tensor> CollapseSumLikeCompute(const Attrs& attrs,
+                                     const Array<top::Tensor>& inputs,
                                      const Type& out_type,
                                      const Target& target) {
   const auto* out_ttype = out_type.as<TensorTypeNode>();
@@ -1773,8 +1773,8 @@ Expr MakeBroadCastTo(Expr data, Array<IndexExpr> shape) {
   return CallNode::make(op, {data}, Attrs(attrs), {});
 }
 
-Array<Tensor> BroadCastToCompute(const Attrs& attrs,
-                                 const Array<Tensor>& inputs,
+Array<top::Tensor> BroadCastToCompute(const Attrs& attrs,
+                                 const Array<top::Tensor>& inputs,
                                  const Type& out_type,
                                  const Target& target) {
   auto ioattrs = attrs.as<InitOpAttrs>();
@@ -1811,8 +1811,8 @@ Expr MakeBroadCastToLike(Expr data,
   return CallNode::make(op, {data, broadcast_type}, Attrs(), {});
 }
 
-Array<Tensor> BroadCastToLikeCompute(const Attrs& attrs,
-                                     const Array<Tensor>& inputs,
+Array<top::Tensor> BroadCastToLikeCompute(const Attrs& attrs,
+                                     const Array<top::Tensor>& inputs,
                                      const Type& out_type,
                                      const Target& target) {
   const auto* out_ttype = out_type.as<TensorTypeNode>();
@@ -2018,13 +2018,13 @@ Expr MakeStridedSlice(Expr data,
   return CallNode::make(op, {data}, Attrs(attrs), {});
 }
 
-Array<Tensor> StridedSliceCompute(const Attrs& attrs,
-                                  const Array<Tensor>& inputs,
+Array<top::Tensor> StridedSliceCompute(const Attrs& attrs,
+                                  const Array<top::Tensor>& inputs,
                                   const Type& out_type,
                                   const Target& target) {
   const StridedSliceAttrs *param = attrs.as<StridedSliceAttrs>();
   CHECK(param != nullptr);
-  return Array<Tensor>{
+  return Array<top::Tensor>{
     topi::strided_slice(inputs[0], param->begin, param->end, param->strides)
   };
 }
@@ -2175,8 +2175,8 @@ bool SplitRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> SplitCompute(const Attrs& attrs,
-                           const Array<Tensor>& inputs,
+Array<top::Tensor> SplitCompute(const Attrs& attrs,
+                           const Array<top::Tensor>& inputs,
                            const Type& out_type,
                            const Target& target) {
   const auto param = attrs.as<SplitAttrs>();
@@ -2184,11 +2184,11 @@ Array<Tensor> SplitCompute(const Attrs& attrs,
 
   if (const IntImmNode* sections = param->indices_or_sections.as<IntImmNode>()) {
     int64_t num_sections = sections->value;
-    return Array<Tensor>{
+    return Array<top::Tensor>{
       topi::split_sections(inputs[0], num_sections, param->axis) };
   } else {
     auto indices = Downcast<Array<Integer> >(param->indices_or_sections);
-    return Array<Tensor>{ topi::split(inputs[0], indices, param->axis) };
+    return Array<top::Tensor>{ topi::split(inputs[0], indices, param->axis) };
   }
 }
 
@@ -2304,8 +2304,8 @@ Expr MakeSliceLike(Expr data,
   return CallNode::make(op, {data, shape_like}, Attrs(attrs), {});
 }
 
-Array<Tensor> SliceLikeCompute(const Attrs& attrs,
-                               const Array<Tensor>& inputs,
+Array<top::Tensor> SliceLikeCompute(const Attrs& attrs,
+                               const Array<top::Tensor>& inputs,
                                const Type& out_type,
                                const Target& target) {
   const auto* param = attrs.as<SliceLikeAttrs>();
@@ -2342,7 +2342,7 @@ Array<Tensor> SliceLikeCompute(const Attrs& attrs,
         << topi::GetConstInt(src_shape[axis]);
     }
   }
-  return Array<Tensor>{
+  return Array<top::Tensor>{
     topi::strided_slice(inputs[0],
                         GetIntArray(begin_idx),
                         GetIntArray(end_idx),
@@ -2370,13 +2370,13 @@ RELAY_REGISTER_OP("slice_like")
 // relay.layout_transform
 TVM_REGISTER_NODE_TYPE(LayoutTransformAttrs);
 
-Array<Tensor> LayoutTransformCompute(const Attrs& attrs,
-                                     const Array<Tensor>& inputs,
+Array<top::Tensor> LayoutTransformCompute(const Attrs& attrs,
+                                     const Array<top::Tensor>& inputs,
                                      const Type& out_type,
                                      const Target& target) {
   const auto* param = attrs.as<LayoutTransformAttrs>();
   CHECK(param != nullptr);
-  return Array<Tensor>{
+  return Array<top::Tensor>{
     topi::layout_transform(inputs[0], param->src_layout, param->dst_layout)
   };
 }
@@ -2503,8 +2503,8 @@ bool GatherNDRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> GatherNDCompute(const Attrs& attrs,
-                              const Array<Tensor>& inputs,
+Array<top::Tensor> GatherNDCompute(const Attrs& attrs,
+                              const Array<top::Tensor>& inputs,
                               const Type& out_type,
                               const Target& target) {
   return { topi::gather_nd(inputs[0], inputs[1]) };
@@ -2557,13 +2557,14 @@ bool SequenceMaskRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> SequenceMaskCompute(const Attrs& attrs,
-                                  const Array<Tensor>& inputs,
+Array<top::Tensor> SequenceMaskCompute(const Attrs& attrs,
+                                  const Array<top::Tensor>& inputs,
                                   const Type& out_type,
                                   const Target& target) {
   const auto* param = attrs.as<SequenceMaskAttrs>();
   CHECK(param != nullptr);
-  return Array<Tensor>{ topi::sequence_mask(inputs[0], inputs[1], param->mask_value, param->axis) };
+  return Array<top::Tensor>{
+    topi::sequence_mask(inputs[0], inputs[1], param->mask_value, param->axis) };
 }
 
 Expr MakeSequenceMask(Expr data,
@@ -2669,13 +2670,13 @@ bool OneHotRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> OneHotCompute(const Attrs& attrs,
-                            const Array<Tensor>& inputs,
+Array<top::Tensor> OneHotCompute(const Attrs& attrs,
+                            const Array<top::Tensor>& inputs,
                             const Type& out_type,
                             const Target& target) {
   const auto* param = attrs.as<OneHotAttrs>();
   CHECK(param != nullptr);
-  return Array<Tensor> {
+  return Array<top::Tensor> {
     topi::one_hot(inputs[0],
                   inputs[1](),
                   inputs[2](),
index 5189f7d..331653b 100644 (file)
@@ -34,9 +34,9 @@ namespace relay {
 
 #define RELAY_UNARY_COMPUTE(FTOPI)                      \
   [] (const Attrs& attrs,                               \
-      const Array<Tensor>& inputs,                      \
+      const Array<top::Tensor>& inputs,                      \
       const Type& out_type,                             \
-      const Target& target) -> Array<Tensor> {          \
+      const Target& target) -> Array<top::Tensor> {          \
     return {FTOPI(inputs[0])};                          \
   }                                                     \
 
@@ -290,8 +290,8 @@ bool ShapeOfRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> ShapeOfCompute(const Attrs& attrs,
-                             const Array<Tensor>& inputs,
+Array<top::Tensor> ShapeOfCompute(const Attrs& attrs,
+                             const Array<top::Tensor>& inputs,
                              const Type& out_type,
                              const Target& target) {
   CHECK_EQ(inputs.size(), 1);
@@ -341,14 +341,14 @@ bool NdarraySizeRel(const Array<Type>& types,
   return true;
 }
 
-Array<Tensor> NdarraySizeCompute(const Attrs& attrs,
-                          const Array<Tensor>& inputs,
+Array<top::Tensor> NdarraySizeCompute(const Attrs& attrs,
+                          const Array<top::Tensor>& inputs,
                           const Type& out_type,
                           const Target& target) {
   CHECK_EQ(inputs.size(), 1);
   const auto* param = attrs.as<NdarraySizeAttrs>();
   CHECK(param != nullptr);
-  return Array<Tensor>{topi::ndarray_size(inputs[0], param->dtype)};
+  return Array<top::Tensor>{topi::ndarray_size(inputs[0], param->dtype)};
 }
 
 TVM_REGISTER_GLOBAL("relay.op.contrib._make.ndarray_size")
index 616dc2a..9964a82 100644 (file)
@@ -82,12 +82,12 @@ Its function is mostly shape transform.")doc" TVM_ADD_FILELINE)
 .set_attrs_type<YoloReorgAttrs>()
 .add_type_rel("YoloReorg", YoloReorgRel)
 .set_attr<FTVMCompute>("FTVMCompute", [](const Attrs& attrs,
-                                         const Array<Tensor>& inputs,
+                                         const Array<top::Tensor>& inputs,
                                          const Type& out_type,
                                          const Target& target) {
   const auto* params = attrs.as<YoloReorgAttrs>();
   CHECK(params != nullptr);
-  return Array<Tensor>{ topi::vision::reorg(inputs[0], params->stride) };
+  return Array<top::Tensor>{ topi::vision::reorg(inputs[0], params->stride) };
 });
 
 }  // namespace relay
index b027e5e..9746182 100644 (file)
@@ -28,7 +28,7 @@
 #include <tvm/relay/op_attr_types.h>
 #include <tvm/relay/attrs/transform.h>
 #include <tvm/relay/transform.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tuple>
 #include <vector>
 #include <functional>
@@ -78,10 +78,10 @@ class AlterTransformMemorizer : public TransformMemorizer {
     Expr new_e;
     bool modified = false;
     if (falter_layout.count(op)) {
-      tvm::Array<tvm::Tensor> tinfos;
+      tvm::Array<tvm::top::Tensor> tinfos;
       for (auto expr : ref_call->args) {
         auto ttype = expr->type_as<TensorTypeNode>();
-        tinfos.push_back(tvm::placeholder(ttype->shape, ttype->dtype));
+        tinfos.push_back(tvm::top::placeholder(ttype->shape, ttype->dtype));
       }
       Expr altered_value = falter_layout[op](ref_call->attrs, new_args, tinfos);
       if (altered_value.defined()) {
index 20007d2..d435efd 100644 (file)
@@ -28,7 +28,7 @@
 #include <tvm/relay/op_attr_types.h>
 #include <tvm/relay/attrs/transform.h>
 #include <tvm/relay/transform.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tuple>
 #include <vector>
 #include <functional>
@@ -86,10 +86,10 @@ class ConvertTransformMemorizer : public TransformMemorizer {
     Expr new_e;
     bool modified = false;
     if (fconvert_layout.count(op)) {
-      tvm::Array<tvm::Tensor> tinfos;
+      tvm::Array<tvm::top::Tensor> tinfos;
       for (auto expr : ref_call->args) {
         auto ttype = expr->type_as<TensorTypeNode>();
-        tinfos.push_back(tvm::placeholder(ttype->shape, ttype->dtype));
+        tinfos.push_back(tvm::top::placeholder(ttype->shape, ttype->dtype));
       }
       Expr altered_value =
           fconvert_layout[op](ref_call->attrs, new_args, tinfos, operator->()->desired_layout_);
index e236de7..8d89c0a 100644 (file)
@@ -23,7 +23,7 @@
  */
 
 #include <tvm/lowered_func.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/relay/expr_functor.h>
 #include <tvm/relay/analysis.h>
 #include <tvm/relay/transform.h>
index 6360808..12e72cf 100644 (file)
@@ -23,7 +23,7 @@
  * shape, dtype or layout to another op or a sequence of ops.
  */
 
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/relay/expr_functor.h>
 #include <tvm/relay/op_attr_types.h>
 #include <tvm/relay/transform.h>
similarity index 96%
rename from src/op/compute_op.cc
rename to src/top/operation/compute_op.cc
index b82bab5..a8c2328 100644 (file)
@@ -21,7 +21,7 @@
  * \brief Compute Op.
  * \file compute_op.cc
  */
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/arith/analyzer.h>
 #include <tvm/ir.h>
 #include <tvm/ir_pass.h>
 #include "compute_op.h"
 #include "op_util.h"
 #include "../schedule/message_passing.h"
-#include "../arith/compute_expr.h"
-#include "../arith/interval_set.h"
+#include "../../arith/compute_expr.h"
+#include "../../arith/interval_set.h"
 
 namespace tvm {
-
+namespace top {
 using namespace ir;
 
 TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable)
@@ -184,7 +184,7 @@ Operation ComputeOpNode::ReplaceInputs(
   if (this->body[0]->IsInstance<ir::ReduceNode>()) {
     // Specially handle reduce so the replaced op
     // still share all the components
-    PrimExpr new_reduce = op::ReplaceTensor(this->body[0], rmap);
+    PrimExpr new_reduce = top::ReplaceTensor(this->body[0], rmap);
     if (!new_reduce.same_as(this->body[0])) {
       const ir::ReduceNode* r = new_reduce.as<ir::ReduceNode>();
       for (size_t k = 0; k < this->body.size(); ++k) {
@@ -198,7 +198,7 @@ Operation ComputeOpNode::ReplaceInputs(
     }
   } else {
     arr = UpdateArray(this->body, [&rmap] (const PrimExpr& e) {
-        return op::ReplaceTensor(e, rmap);
+        return top::ReplaceTensor(e, rmap);
       });
   }
   if (!arr.same_as(this->body)) {
@@ -363,8 +363,8 @@ Stmt MakeComputeStmt(const ComputeOpNode* self,
   // grab the nest structure
   ComputeLoopNest n = ComputeLoopNest::make(self, stage, dom_map, debug_keep_trivial_loop);
   // Normal loop structure
-  n.init_nest.emplace_back(op::MakeIfNest(n.init_predicates));
-  n.main_nest.emplace_back(op::MakeIfNest(n.main_predicates));
+  n.init_nest.emplace_back(MakeIfNest(n.init_predicates));
+  n.main_nest.emplace_back(MakeIfNest(n.main_predicates));
   if (self->reduce_axis.size() != 0) {
     // make reduction.
     Stmt init, provide;
@@ -374,7 +374,7 @@ Stmt MakeComputeStmt(const ComputeOpNode* self,
     }
     MakeReduction(self, source, &init, &provide);
     init = MergeNest(n.init_nest, init);
-    init = op::Substitute(init, n.init_vmap);
+    init = Substitute(init, n.init_vmap);
     // common nest
     std::vector<std::vector<Stmt> > common(
         n.main_nest.begin(), n.main_nest.begin() + n.num_common_loop + 1);
@@ -388,7 +388,7 @@ Stmt MakeComputeStmt(const ComputeOpNode* self,
     }
     // run substitution in the on the full nest, because  loop condition
     // could depend on outer loops.
-    return op::Substitute(provide, n.main_vmap);
+    return Substitute(provide, n.main_vmap);
   } else {
     std::vector<Stmt> provides;
     for (size_t i = 0; i < self->body.size(); ++i) {
@@ -398,7 +398,7 @@ Stmt MakeComputeStmt(const ComputeOpNode* self,
     provide = MergeNest(n.main_nest, provide);
     // run substitution in the on the full nest, because  loop condition
     // could depend on outer loops.
-    return op::Substitute(provide, n.main_vmap);
+    return Substitute(provide, n.main_vmap);
   }
 }
 
@@ -472,10 +472,10 @@ ComputeLoopNest ComputeLoopNest::make(
   CHECK_EQ(stage->op.operator->(), self);
   ComputeLoopNest ret;
   // make main loop nest
-  ret.main_nest = op::MakeLoopNest(
+  ret.main_nest = MakeLoopNest(
       stage, dom_map, 0, false, std::unordered_set<IterVar>(), &ret.main_vmap,
       debug_keep_trivial_loop);
-  ret.main_predicates = schedule::MakeBoundCheck(
+  ret.main_predicates = MakeBoundCheck(
       stage, dom_map, ret.main_vmap, false,
       std::unordered_set<IterVar>());
   for (auto& e : ret.main_predicates) {
@@ -495,7 +495,7 @@ ComputeLoopNest ComputeLoopNest::make(
       update_state[self->axis[i]] = 1;
     }
     // find which iter var is related to reduction and which is related to axis.
-    schedule::PassDownBitMaskOr(stage, &update_state);
+    top::PassDownBitMaskOr(stage, &update_state);
     auto leaf_iter_vars = stage->leaf_iter_vars;
     // first first loop that is related to reduction.
     size_t begin_loop = leaf_iter_vars.size();
@@ -514,10 +514,10 @@ ComputeLoopNest ComputeLoopNest::make(
       int flag = kv.second;
       if (flag == 2) skip_iter.insert(kv.first);
     }
-    ret.init_nest = op::MakeLoopNest(
+    ret.init_nest = MakeLoopNest(
         stage, dom_map, begin_loop, true,
         skip_iter, &(ret.init_vmap), debug_keep_trivial_loop);
-    ret.init_predicates = schedule::MakeBoundCheck(
+    ret.init_predicates = MakeBoundCheck(
         stage, dom_map, ret.init_vmap, true, skip_iter);
     for (auto& e : ret.init_predicates) {
       e = likely(e);
@@ -637,4 +637,6 @@ Stmt TransformUpdate(const Stage& stage,
   return IfThenElseNode::make(arith::ComputeReduce<ir::OrNode>(conds, const_true(1)),
                           update, body);
 }
+
+}  // namespace top
 }  // namespace tvm
similarity index 95%
rename from src/op/compute_op.h
rename to src/top/operation/compute_op.h
index 3fe98e8..093dd22 100644 (file)
  * \brief Helper utilities to implement compute_op.
  * \file compute_op.h
  */
-#ifndef TVM_OP_COMPUTE_OP_H_
-#define TVM_OP_COMPUTE_OP_H_
+#ifndef TVM_TOP_OPERATION_COMPUTE_OP_H_
+#define TVM_TOP_OPERATION_COMPUTE_OP_H_
 
 #include <tvm/ir.h>
 #include <tvm/expr.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <vector>
 #include <unordered_map>
 
 namespace tvm {
+namespace top {
 // loop nest structure for general compute
 // This the loop nest structured used in compute.
 // Does not include the loop body.
@@ -106,6 +107,7 @@ Stmt TransformUpdate(const Stage& stage,
                      const ComputeLoopNest& n,
                      Stmt body,
                      Stmt update);
+}  // namespace top
 }  // namespace tvm
 
-#endif  // TVM_OP_COMPUTE_OP_H_
+#endif  // TVM_TOP_OPERATION_COMPUTE_OP_H_
similarity index 93%
rename from src/op/cross_thread_reduction.cc
rename to src/top/operation/cross_thread_reduction.cc
index 9de4bde..bf5c9b1 100644 (file)
@@ -26,6 +26,7 @@
 #include "op_util.h"
 
 namespace tvm {
+namespace top {
 using namespace ir;
 
 Stmt MakeCrossThreadReduction(
@@ -38,9 +39,9 @@ Stmt MakeCrossThreadReduction(
     args.push_back(iv->var);
   }
   std::unordered_map<IterVar, PrimExpr> value_map;
-  auto nest = op::MakeLoopNest(
+  auto nest = MakeLoopNest(
       stage, dom_map, 0, false, std::unordered_set<IterVar>(), &value_map, debug_keep_trivial_loop);
-  auto conds = schedule::MakeBoundCheck(
+  auto conds = MakeBoundCheck(
       stage, dom_map, value_map, false,
       std::unordered_set<IterVar>());
 
@@ -101,8 +102,8 @@ Stmt MakeCrossThreadReduction(
       LoadNode::make(t, res_handles[idx], 0, const_true(t.lanes())), args);
   }
   Stmt assign_body = SeqStmt::Flatten(assigns);
-  assign_body = MergeNest(op::MakeIfNest(thread_head_check), assign_body);
-  assign_body = MergeNest(op::MakeIfNest(conds), assign_body);
+  assign_body = MergeNest(MakeIfNest(thread_head_check), assign_body);
+  assign_body = MergeNest(MakeIfNest(conds), assign_body);
   Stmt body = SeqStmt::Flatten(reduce_body, assign_body);
   for (size_t idx = size; idx != 0; --idx) {
     body = AllocateNode::make(
@@ -110,7 +111,8 @@ Stmt MakeCrossThreadReduction(
     body = AttrStmtNode::make(
       res_handles[idx - 1], attr::storage_scope, StringImmNode::make("local"), body);
   }
-  body = op::Substitute(body, value_map);
+  body = Substitute(body, value_map);
   return MergeNest(nest, body);
 }
+}  // namespace top
 }  // namespace tvm
similarity index 98%
rename from src/op/extern_op.cc
rename to src/top/operation/extern_op.cc
index fb9f491..3fc73dc 100644 (file)
  * \brief External computation rule.
  * \file extern_op.cc
  */
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/arith/analyzer.h>
 #include <tvm/ir.h>
 #include <unordered_set>
 #include "op_util.h"
 
 namespace tvm {
+namespace top {
 using namespace ir;
 // ExternOpNode
 TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable)
@@ -94,7 +95,7 @@ Operation ExternOpNode::ReplaceInputs(
     const std::unordered_map<Tensor, Tensor>& rmap) const {
   CHECK_EQ(self.operator->(), this);
   auto n = make_object<ExternOpNode>(*this);
-  n->body = op::ReplaceTensor(this->body, rmap);
+  n->body = ReplaceTensor(this->body, rmap);
   for (size_t i = 0; i < n->inputs.size(); ++i) {
     Tensor t = n->inputs[i];
     if (rmap.count(t)) {
@@ -181,4 +182,5 @@ Stmt ExternOpNode::BuildProvide(
   }
   return ret;
 }
+}  // namespace top
 }  // namespace tvm
similarity index 97%
rename from src/op/hybrid_op.cc
rename to src/top/operation/hybrid_op.cc
index 8687ad6..d959826 100644 (file)
@@ -21,7 +21,7 @@
  * \brief Hybrid computation rule.
  * \file hybrid_op.cc
  */
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/arith/analyzer.h>
 #include <tvm/ir.h>
 #include <tvm/ir_functor_ext.h>
@@ -34,6 +34,7 @@
 #include "hybrid_op.h"
 
 namespace tvm {
+namespace top {
 using namespace ir;
 // HybridOpNode
 TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable)
@@ -76,7 +77,7 @@ Operation HybridOpNode::make(std::string name,
   n->attrs = std::move(attrs);
   n->inputs = std::move(inputs);
   n->outputs = std::move(outputs);
-  n->axis = op::GatherLoopVars(body);
+  n->axis = top::GatherLoopVars(body);
   n->body = std::move(body);
   Operation res = Operation(n);
   return res;
@@ -109,7 +110,7 @@ Operation HybridOpNode::ReplaceInputs(
     const std::unordered_map<Tensor, Tensor> &rmap) const {
   CHECK_EQ(self.operator->(), this);
   auto n = make_object<HybridOpNode>(*this);
-  n->body = op::ReplaceTensor(this->body, rmap);
+  n->body = top::ReplaceTensor(this->body, rmap);
   for (size_t i = 0; i < n->inputs.size(); ++i) {
     Tensor t = n->inputs[i];
     if (rmap.count(t)) {
@@ -209,18 +210,15 @@ Stmt HybridOpNode::BuildProvide(
    * This is a major difference that HybridOpNode is NOT the same as
    * ExternOpNode.
    * */
-  ret = op::ReplaceTensor(ret, rmap);
-  ret = op::ReplaceProvideTensor(ret, rmap);
+  ret = top::ReplaceTensor(ret, rmap);
+  ret = top::ReplaceProvideTensor(ret, rmap);
 
-  ret = op::ApplySchedule(stage, dom_map, ret);
+  ret = top::ApplySchedule(stage, dom_map, ret);
   return ret;
 }
 
-namespace op {
-
-
 Stmt ApplyLoopShapes(const Stage &stage,
-                 const std::unordered_map<IterVar, Range> &dom_map, Stmt stmt) {
+                     const std::unordered_map<IterVar, Range> &dom_map, Stmt stmt) {
   class LoopSpliter : public StmtExprMutator {
     PrimExpr factor;
     const VarNode *parent;
@@ -508,5 +506,5 @@ Stmt ReplaceProvideTensor(Stmt stmt,
   Stmt ret = repl(stmt);
   return repl.found ? ret : stmt;
 }
-}  // namespace op
+}  // namespace top
 }  // namespace tvm
similarity index 92%
rename from src/op/hybrid_op.h
rename to src/top/operation/hybrid_op.h
index f180129..c4586cb 100644 (file)
  * \brief Helper utilities to implement hybrid_op.
  * \file hybrid_op.h
  */
-#ifndef TVM_OP_HYBRID_OP_H_
-#define TVM_OP_HYBRID_OP_H_
+#ifndef TVM_TOP_OPERATION_HYBRID_OP_H_
+#define TVM_TOP_OPERATION_HYBRID_OP_H_
 
 #include <tvm/expr.h>
-#include <tvm/schedule.h>
+#include <tvm/top/schedule.h>
+
 #include <unordered_map>
 #include <unordered_set>
 #include <vector>
-#include "../pass/ir_util.h"
-#include "../pass/arg_binder.h"
-#include "../schedule/message_passing.h"
 
+#include "../schedule/message_passing.h"
+#include "../../pass/ir_util.h"
+#include "../../pass/arg_binder.h"
 
 namespace tvm {
-namespace op {
+namespace top {
 
 /*!
  * \brief Find all the iteration variables in the given statement body.
@@ -90,7 +91,7 @@ Stmt ApplyLoopOrder(const Stage &stage,
                     const std::unordered_map<IterVar, Range> &dom_map,
                     const std::unordered_map<IterVar, IterVar> &rebased, Stmt stmt);
 
-}  // namespace op
+}  // namespace top
 }  // namespace tvm
 
-#endif  // TVM_OP_HYBRID_OP_H_
+#endif  // TVM_TOP_OPERATION_HYBRID_OP_H_
similarity index 97%
rename from src/op/op_util.cc
rename to src/top/operation/op_util.cc
index 52d3b5a..fcf8318 100644 (file)
 #include <tvm/ir.h>
 #include <tvm/ir_pass.h>
 #include <tvm/ir_functor_ext.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <string>
 #include "op_util.h"
 #include "../schedule/message_passing.h"
-#include "../arith/compute_expr.h"
+#include "../../arith/compute_expr.h"
 
 namespace tvm {
-namespace op {
+namespace top {
 
 using namespace arith;
 using namespace ir;
@@ -172,7 +172,7 @@ MakeLoopNest(const Stage& stage,
     }
   }
   // message passing to get offset of root iter vars.
-  schedule::PassUpIndex(stage, dom_map, &value_map);
+  top::PassUpIndex(stage, dom_map, &value_map);
   return nest;
 }
 
@@ -220,7 +220,7 @@ Stmt ReplaceTensor(Stmt stmt,
   return repl.found ? ret : stmt;
 }
 PrimExpr ReplaceTensor(PrimExpr expr,
-                   const std::unordered_map<Tensor, Tensor>& replace) {
+                       const std::unordered_map<Tensor, Tensor>& replace) {
   TensorReplacer repl(replace);
   PrimExpr ret = repl(expr);
   return repl.found ? ret : expr;
@@ -266,5 +266,5 @@ ir::ForType IterVarTypeToForType(IterVarType iter_type) {
   }
 }
 
-}  // namespace op
+}  // namespace top
 }  // namespace tvm
similarity index 93%
rename from src/op/op_util.h
rename to src/top/operation/op_util.h
index cea050b..babdabc 100644 (file)
  * \file op_util.h
  * \brief Common utility used in operator construction.
  */
-#ifndef TVM_OP_OP_UTIL_H_
-#define TVM_OP_OP_UTIL_H_
+#ifndef TVM_TOP_OPERATION_OP_UTIL_H_
+#define TVM_TOP_OPERATION_OP_UTIL_H_
 
 #include <tvm/expr.h>
-#include <tvm/schedule.h>
+#include <tvm/top/schedule.h>
 #include <unordered_map>
 #include <unordered_set>
 #include <vector>
-#include "../pass/ir_util.h"
-#include "../pass/arg_binder.h"
+#include "../../pass/ir_util.h"
+#include "../../pass/arg_binder.h"
 #include "../schedule/message_passing.h"
 
 namespace tvm {
-namespace op {
+namespace top {
 
 using ir::MergeNest;
 
@@ -102,6 +102,6 @@ IterVarType ForTypeToIterVarType(ir::ForType for_type);
  */
 ir::ForType IterVarTypeToForType(IterVarType iter_type);
 
-}  // namespace op
+}  // namespace top
 }  // namespace tvm
-#endif  // TVM_OP_OP_UTIL_H_
+#endif  // TVM_TOP_OPERATION_OP_UTIL_H_
similarity index 97%
rename from src/op/placeholder_op.cc
rename to src/top/operation/placeholder_op.cc
index 22e0ad4..284752b 100644 (file)
  * \brief Placeholder op.
  * \file placeholder_op.cc
  */
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 
 namespace tvm {
+namespace top {
 
 // PlaceholderOpNode
 TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable)
@@ -102,4 +103,5 @@ Stmt PlaceholderOpNode::BuildProvide(
     bool debug_keep_trivial_loop) const {
   return Stmt();
 }
+}  // namespace top
 }  // namespace tvm
similarity index 98%
rename from src/op/scan_op.cc
rename to src/top/operation/scan_op.cc
index f7b16f2..8f54872 100644 (file)
  * \brief Scan Operator.
  * \file scan_op.cc
  */
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/ir.h>
 #include <tvm/ir_pass.h>
 #include "op_util.h"
 #include "../schedule/graph.h"
 
 namespace tvm {
-
+namespace top {
 using namespace ir;
 
 TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable)
@@ -215,7 +215,6 @@ void ScanOpNode::GatherBound(
     const std::unordered_map<Tensor, TensorDom>& tensor_dom,
     std::unordered_map<IterVar, Range>* out_dom_map) const {
   CHECK_EQ(self.operator->(), this);
-  using namespace schedule;
   CHECK(!out_dom_map->count(this->scan_axis));
   std::vector<Tensor> output(this->num_outputs());
   for (size_t i = 0; i < output.size(); ++i) {
@@ -297,12 +296,13 @@ Stmt ScanOpNode::BuildProvide(
   }
   std::unordered_map<IterVar, PrimExpr> vmap;
   std::unordered_set<IterVar> empty;
-  auto nest = op::MakeLoopNest(
+  auto nest = MakeLoopNest(
       stage, dom_map, 0, false, empty, &vmap, debug_keep_trivial_loop);
   nest[begin_scan].push_back(init);
   nest.push_back(
-      op::MakeIfNest(
-          schedule::MakeBoundCheck(stage, dom_map, vmap, false, empty)));
+      MakeIfNest(
+          MakeBoundCheck(stage, dom_map, vmap, false, empty)));
   return MergeNest(nest, provide);
 }
+}  // namespace top
 }  // namespace tvm
similarity index 92%
rename from src/op/tensor_compute_op.cc
rename to src/top/operation/tensor_compute_op.cc
index 4c758dd..49b00fc 100644 (file)
  * \brief Tensor Compute Op.
  * \file tensor_compute_op.cc
  */
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/arith/analyzer.h>
 #include <tvm/ir.h>
 #include <tvm/ir_pass.h>
 #include <unordered_set>
 #include "./op_util.h"
 #include "./compute_op.h"
-#include "../arith/compute_expr.h"
+#include "../../arith/compute_expr.h"
 
 namespace tvm {
+namespace top {
 using namespace ir;
 // TensorComputeOpNode
 TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable)
@@ -81,12 +82,12 @@ Operation TensorComputeOpNode::ReplaceInputs(
   CHECK_EQ(self.operator->(), this);
   auto n = make_object<TensorComputeOpNode>(*this);
   auto intrin = make_object<TensorIntrinNode>(*(this->intrin.operator->()));
-  intrin->body = op::ReplaceTensor(this->intrin->body, rmap);
+  intrin->body = ReplaceTensor(this->intrin->body, rmap);
   if (intrin->reduce_init.defined()) {
-    intrin->reduce_init = op::ReplaceTensor(this->intrin->reduce_init, rmap);
+    intrin->reduce_init = ReplaceTensor(this->intrin->reduce_init, rmap);
   }
   if (intrin->reduce_update.defined()) {
-    intrin->reduce_update = op::ReplaceTensor(this->intrin->reduce_update, rmap);
+    intrin->reduce_update = ReplaceTensor(this->intrin->reduce_update, rmap);
   }
   for (size_t i = 0; i < n->inputs.size(); ++i) {
     Tensor t = n->inputs[i];
@@ -208,7 +209,7 @@ Stmt TensorComputeOpNode::BuildProvide(
   if (this->reduce_axis.size() == 0) {
     std::vector<std::vector<Stmt> > nest(
         n.main_nest.begin(), n.main_nest.begin() + tloc + 1);
-    nest.emplace_back(op::MakeIfNest(n.main_predicates));
+    nest.emplace_back(MakeIfNest(n.main_predicates));
     CHECK_EQ(n.init_predicates.size(), 0U);
     CHECK(this->intrin->body.defined())
         << "Normal store op for intrin " << this << " is not defined";
@@ -216,7 +217,7 @@ Stmt TensorComputeOpNode::BuildProvide(
     body = MergeNest(input_bind_nest, body);
     body = ir::Substitute(body, vmap);
     body = MergeNest(binder.asserts(), body);
-    body = op::Substitute(body, n.main_vmap);
+    body = top::Substitute(body, n.main_vmap);
     Stmt ret =  MergeNest(nest, body);
     return ret;
   } else {
@@ -229,22 +230,22 @@ Stmt TensorComputeOpNode::BuildProvide(
         n.main_nest.begin(), n.main_nest.begin() + n.num_common_loop + 1);
     std::vector<std::vector<Stmt> > update_nest(
         n.main_nest.begin() + n.num_common_loop + 1, n.main_nest.begin() + tloc + 1);
-    update_nest.emplace_back(op::MakeIfNest(n.main_predicates));
+    update_nest.emplace_back(MakeIfNest(n.main_predicates));
 
     if (this->intrin->reduce_init.defined()) {
       // init nest
       std::vector<std::vector<Stmt> > init_nest(
           n.init_nest.begin(), n.init_nest.begin() + tloc + 1);
-      init_nest.emplace_back(op::MakeIfNest(n.init_predicates));
+      init_nest.emplace_back(MakeIfNest(n.init_predicates));
       Stmt init = MergeNest(output_bind_nest, this->intrin->reduce_init);
-      init = op::Substitute(init, n.init_vmap);
+      init = top::Substitute(init, n.init_vmap);
       init = MergeNest(init_nest, init);
       // The update
       Stmt update = MergeNest(output_bind_nest, this->intrin->reduce_update);
       update = MergeNest(input_bind_nest, update);
       update = ir::Substitute(update, vmap);
       update = MergeNest(binder.asserts(), update);
-      update = op::Substitute(update, n.main_vmap);
+      update = top::Substitute(update, n.main_vmap);
       update = MergeNest(update_nest, update);
       return MergeNest(common, SeqStmt::Flatten(init, update));
     } else {
@@ -258,11 +259,11 @@ Stmt TensorComputeOpNode::BuildProvide(
       update = MergeNest(input_bind_nest, update);
       update = ir::Substitute(update, vmap);
       update = MergeNest(binder.asserts(), update);
-      update = op::Substitute(update, n.main_vmap);
+      update = top::Substitute(update, n.main_vmap);
       update = MergeNest(update_nest, update);
       return MergeNest(common, update);
     }
   }
 }
-
+}  // namespace top
 }  // namespace tvm
similarity index 97%
rename from src/op/tensorize.cc
rename to src/top/operation/tensorize.cc
index 4460d90..413bb42 100644 (file)
@@ -32,9 +32,9 @@
 #include "../schedule/message_passing.h"
 
 namespace tvm {
+namespace top {
 
 using namespace ir;
-using namespace op;
 
 // Detect the region of input and output to be tensrized.
 // out_dom: the domain of root iter vars in output op
@@ -82,7 +82,7 @@ size_t InferTensorizeRegion(
   }
   CHECK(found_point);
   // Get domain of the tensorized scope.
-  schedule::PassUpDomain(stage, dom_map, &up_state);
+  top::PassUpDomain(stage, dom_map, &up_state);
   // Get domains if inputs
   std::unordered_map<Tensor, TensorDom> in_dom;
   std::unordered_map<const VarNode*, IntSet> temp_dmap;
@@ -445,15 +445,15 @@ Stmt MakeTensorize(const ComputeOpNode* self,
     // Do no need to split reduction
     std::vector<std::vector<Stmt> > nest(
         n.main_nest.begin(), n.main_nest.begin() + tloc + 1);
-    nest.emplace_back(op::MakeIfNest(n.main_predicates));
+    nest.emplace_back(MakeIfNest(n.main_predicates));
     CHECK_EQ(n.init_predicates.size(), 0U);
     CHECK(intrin->body.defined())
         << "Normal store op for intrin " << intrin << " is not defined";
     Stmt body = MergeNest(output_bind_nest, intrin->body);
     body = MergeNest(input_bind_nest, body);
-    body = Substitute(body, vmap);
+    body = ir::Substitute(body, vmap);
     body = MergeNest(binder.asserts(), body);
-    body = Substitute(body, n.main_vmap);
+    body = top::Substitute(body, n.main_vmap);
     return MergeNest(nest, body);
   } else {
     // Need to split reduction
@@ -465,22 +465,22 @@ Stmt MakeTensorize(const ComputeOpNode* self,
         n.main_nest.begin(), n.main_nest.begin() + n.num_common_loop + 1);
     std::vector<std::vector<Stmt> > update_nest(
         n.main_nest.begin() + n.num_common_loop + 1, n.main_nest.begin() + tloc + 1);
-    update_nest.emplace_back(op::MakeIfNest(n.main_predicates));
+    update_nest.emplace_back(MakeIfNest(n.main_predicates));
 
     if (intrin->reduce_init.defined()) {
       // init nest
       std::vector<std::vector<Stmt> > init_nest(
           n.init_nest.begin(), n.init_nest.begin() + tloc + 1);
-      init_nest.emplace_back(op::MakeIfNest(n.init_predicates));
+      init_nest.emplace_back(MakeIfNest(n.init_predicates));
       Stmt init = MergeNest(output_bind_nest, intrin->reduce_init);
-      init = Substitute(init, n.init_vmap);
+      init = top::Substitute(init, n.init_vmap);
       init = MergeNest(init_nest, init);
       // The update
       Stmt update = MergeNest(output_bind_nest, intrin->reduce_update);
       update = MergeNest(input_bind_nest, update);
-      update = Substitute(update, vmap);
+      update = ir::Substitute(update, vmap);
       update = MergeNest(binder.asserts(), update);
-      update = Substitute(update, n.main_vmap);
+      update = top::Substitute(update, n.main_vmap);
       update = MergeNest(update_nest, update);
       return MergeNest(common, SeqStmt::Flatten(init, update));
     } else {
@@ -492,9 +492,9 @@ Stmt MakeTensorize(const ComputeOpNode* self,
                                     intrin->reduce_update);
       update = MergeNest(output_bind_nest, update);
       update = MergeNest(input_bind_nest, update);
-      update = Substitute(update, vmap);
+      update = ir::Substitute(update, vmap);
       update = MergeNest(binder.asserts(), update);
-      update = Substitute(update, n.main_vmap);
+      update = top::Substitute(update, n.main_vmap);
       update = MergeNest(update_nest, update);
       return MergeNest(common, update);
     }
@@ -533,4 +533,5 @@ TVM_REGISTER_GLOBAL("test.op.MatchTensorizeBody")
                               intrin,
                               &vrange);
   });
+}  // namespace top
 }  // namespace tvm
similarity index 96%
rename from src/schedule/auto_inline_elem_wise.cc
rename to src/top/schedule/auto_inline_elem_wise.cc
index 3e32923..9b08813 100644 (file)
 /*!
  * \file auto_inline_elem_wise.cc
  */
-#include <tvm/schedule_pass.h>
-#include <tvm/operation.h>
+#include <tvm/top/schedule_pass.h>
+#include <tvm/top/operation.h>
 #include <tvm/ir_functor_ext.h>
 
 namespace tvm {
-namespace schedule {
+namespace top {
 
 using namespace ir;
 
@@ -111,5 +111,5 @@ void AutoInlineInjective(Schedule sch) {
   }
 }
 
-}  // namespace schedule
+}  // namespace top
 }  // namespace tvm
similarity index 98%
rename from src/schedule/bound.cc
rename to src/top/schedule/bound.cc
index 5f363db..8fffc53 100644 (file)
  * \file bound.cc
  * \brief The bound inference logic.
  */
-#include <tvm/schedule_pass.h>
-#include <tvm/operation.h>
+#include <tvm/top/schedule_pass.h>
+#include <tvm/top/operation.h>
 #include <tvm/ir_pass.h>
 #include <unordered_map>
 #include <unordered_set>
 #include "graph.h"
 #include "message_passing.h"
-#include "../runtime/thread_storage_scope.h"
+#include "../../runtime/thread_storage_scope.h"
 
 namespace tvm {
-namespace schedule {
+namespace top {
 
 using runtime::StorageRank;
 using runtime::StorageScope;
@@ -259,5 +259,5 @@ Map<IterVar, Range> InferBound(const Schedule& sch) {
   return Map<IterVar, Range>(ret.begin(), ret.end());
 }
 
-}  // namespace schedule
+}  // namespace top
 }  // namespace tvm
similarity index 98%
rename from src/schedule/graph.cc
rename to src/top/schedule/graph.cc
index 3cf30f4..62df842 100644 (file)
  */
 #include <tvm/ir.h>
 #include <tvm/ir_functor_ext.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <utility>
 #include <unordered_set>
 #include <unordered_map>
 #include "graph.h"
 
 namespace tvm {
-namespace schedule {
+namespace top {
 // key to specific tensor dimension.
 struct TensorDimKey {
   ir::FunctionRef f;
@@ -55,13 +55,13 @@ struct TensorDimKey {
     return !operator==(other);
   }
 };
-}  // namespace schedule
+}  // namespace top
 }  // namespace tvm
 
 namespace std {
 template <>
-struct hash<::tvm::schedule::TensorDimKey> {
-  std::size_t operator()(const ::tvm::schedule::TensorDimKey& k) const {
+struct hash<::tvm::top::TensorDimKey> {
+  std::size_t operator()(const ::tvm::top::TensorDimKey& k) const {
     size_t lhs = ::tvm::ObjectHash()(k.f);
     size_t rhs = static_cast<size_t>(k.value_index) << 16UL |
         static_cast<size_t>(k.dim);
@@ -73,7 +73,7 @@ struct hash<::tvm::schedule::TensorDimKey> {
 
 
 namespace tvm {
-namespace schedule {
+namespace top {
 
 // construct a read graph that gives readers of each operation
 // that the root depend on
@@ -429,5 +429,5 @@ Map<IterVar, PrimExpr> ScanFixPointAnalysis(const Operation& scan_op) {
   return ret;
 }
 
-}  // namespace schedule
+}  // namespace top
 }  // namespace tvm
similarity index 95%
rename from src/schedule/graph.h
rename to src/top/schedule/graph.h
index 99ba6e3..f379f98 100644 (file)
  * \file graph.h
  * \brief Utilities to get information about schedule graph.
  */
-#ifndef TVM_SCHEDULE_GRAPH_H_
-#define TVM_SCHEDULE_GRAPH_H_
+#ifndef TVM_TOP_SCHEDULE_GRAPH_H_
+#define TVM_TOP_SCHEDULE_GRAPH_H_
 
 #include <tvm/expr.h>
-#include <tvm/schedule.h>
-#include <tvm/operation.h>
+#include <tvm/top/schedule.h>
+#include <tvm/top/operation.h>
 #include <unordered_map>
 #include <unordered_set>
 #include <vector>
 
 namespace tvm {
-namespace schedule {
+namespace top {
 
 /*!
  * \brief data structure of Operation->Tensors it reads
@@ -125,7 +125,7 @@ Array<Operation> ScanGetBody(const Operation& scan_op);
  */
 Map<IterVar, PrimExpr> ScanFixPointAnalysis(const Operation& scan);
 
-}  // namespace schedule
+}  // namespace top
 }  // namespace tvm
 
-#endif  // TVM_SCHEDULE_GRAPH_H_
+#endif  // TVM_TOP_SCHEDULE_GRAPH_H_
similarity index 99%
rename from src/schedule/message_passing.cc
rename to src/top/schedule/message_passing.cc
index 816ea44..4732681 100644 (file)
 #include <tvm/ir.h>
 #include <tvm/ir_pass.h>
 #include "message_passing.h"
-#include "../arith/compute_expr.h"
+#include "../../arith/compute_expr.h"
 
 namespace tvm {
-namespace schedule {
+namespace top {
 
 using namespace ir;
 
@@ -539,5 +539,5 @@ std::vector<PrimExpr> MakeBoundCheck(
   }
   return preds;
 }
-}  // namespace schedule
+}  // namespace top
 }  // namespace tvm
similarity index 95%
rename from src/schedule/message_passing.h
rename to src/top/schedule/message_passing.h
index 4b81bcc..42b72a7 100644 (file)
  * \brief Common utilities to do message passing
  *  on the schedule hyper graph.
  */
-#ifndef TVM_SCHEDULE_MESSAGE_PASSING_H_
-#define TVM_SCHEDULE_MESSAGE_PASSING_H_
+#ifndef TVM_TOP_SCHEDULE_MESSAGE_PASSING_H_
+#define TVM_TOP_SCHEDULE_MESSAGE_PASSING_H_
 
 #include <tvm/expr.h>
-#include <tvm/schedule.h>
-#include <tvm/operation.h>
+#include <tvm/top/schedule.h>
+#include <tvm/top/operation.h>
 #include <tvm/arith/analyzer.h>
 #include <unordered_map>
 #include <unordered_set>
 #include <vector>
 
 namespace tvm {
-namespace schedule {
+namespace top {
 /*!
  * \brief Downward inference of domain of each IterVar.
  *  Caller set the range of the root, then the function
@@ -128,6 +128,6 @@ MakeBoundCheck(
     bool skip_ivar_domain,
     const std::unordered_set<IterVar>& skip_iter);
 
-}  // namespace schedule
+}  // namespace top
 }  // namespace tvm
-#endif  // TVM_SCHEDULE_MESSAGE_PASSING_H_
+#endif  // TVM_TOP_SCHEDULE_MESSAGE_PASSING_H_
similarity index 97%
rename from src/schedule/schedule_dataflow_rewrite.cc
rename to src/top/schedule/schedule_dataflow_rewrite.cc
index f06cf96..5f9ba39 100644 (file)
 /*!
  * \file schedule_dataflow_rewrite.cc
  */
-#include <tvm/schedule.h>
-#include <tvm/operation.h>
+#include <tvm/top/schedule.h>
+#include <tvm/top/operation.h>
 #include <tvm/ir_functor_ext.h>
 #include <tvm/ir_pass.h>
 #include <unordered_set>
 #include "message_passing.h"
-#include "../pass/ir_util.h"
-#include "../arith/compute_expr.h"
+#include "../../pass/ir_util.h"
+#include "../../arith/compute_expr.h"
 
 namespace tvm {
-
+namespace top {
 // find first occurance location in leaf
 template<typename T>
 size_t FindNodeRef(ArrayNode* array_node, const T& v) {
@@ -211,7 +211,7 @@ void PrepareAxisMapping(Stage orig_stage,
     dom_map[iv] = iv->dom;
     analyzer.Bind(iv->var, iv->dom);
   }
-  schedule::PassDownDomain(orig_stage, &dom_map, &analyzer, true);
+  top::PassDownDomain(orig_stage, &dom_map, &analyzer, true);
   {
     // The source->cache
     std::unordered_map<IterVar, PrimExpr> value_map;
@@ -235,8 +235,8 @@ void PrepareAxisMapping(Stage orig_stage,
     for (IterVar iv : op->reduce_axis) {
       skip_bound_check.insert(iv);
     }
-    schedule::PassUpIndex(orig_stage, dom_map, &value_map, true);
-    predicates = schedule::MakeBoundCheck(
+    PassUpIndex(orig_stage, dom_map, &value_map, true);
+    predicates = MakeBoundCheck(
         orig_stage, dom_map, value_map, true, skip_bound_check);
     // The root axis
     for (IterVar iv : op->axis) {
@@ -347,7 +347,7 @@ Array<Tensor> CacheWriteWithReLayout(Schedule sch,
     for (IterVar iv : compute->axis) {
       value_map[iv] = iv->var;
     }
-    schedule::PassDownIndex(orig_stage, dom_map, &value_map, true);
+    top::PassDownIndex(orig_stage, dom_map, &value_map, true);
     for (IterVar iv : orig_stage->leaf_iter_vars) {
       if (red_axis.count(iv)) continue;
       args.push_back(value_map.at(iv));
@@ -437,7 +437,7 @@ Array<Tensor> CacheWriteWithReLayoutTensor(Schedule sch,
     for (IterVar iv : compute_axis) {
       value_map[iv] = iv->var;
     }
-    schedule::PassDownIndex(orig_stage, dom_map, &value_map, true);
+    PassDownIndex(orig_stage, dom_map, &value_map, true);
     for (IterVar iv : orig_stage->leaf_iter_vars) {
       if (red_axis.count(iv)) continue;
       args.push_back(value_map.at(iv));
@@ -692,8 +692,8 @@ Array<Tensor> Schedule::rfactor(const Tensor& tensor,
   // Find touched reduction axis.
   std::unordered_map<IterVar, int> touch_map;
   touch_map[axis] = 1;
-  schedule::PassUpBitMaskOr(reduce_stage, &touch_map, true);
-  schedule::PassDownBitMaskOr(reduce_stage, &touch_map, true);
+  top::PassUpBitMaskOr(reduce_stage, &touch_map, true);
+  top::PassDownBitMaskOr(reduce_stage, &touch_map, true);
   // skip reduction iteration.
   std::unordered_set<IterVar> skip_bound_check;
   // Verify normal axis are not touched.
@@ -715,7 +715,7 @@ Array<Tensor> Schedule::rfactor(const Tensor& tensor,
     }
     analyzer.Bind(iv->var, iv->dom);
   }
-  schedule::PassDownDomain(reduce_stage, &dom_map, &analyzer, true);
+  top::PassDownDomain(reduce_stage, &dom_map, &analyzer, true);
   for (IterVar iv : reduce_stage->leaf_iter_vars) {
     if (touch_map.count(iv)) {
       Range dom = dom_map.at(iv);
@@ -726,8 +726,8 @@ Array<Tensor> Schedule::rfactor(const Tensor& tensor,
       }
     }
   }
-  schedule::PassUpIndex(reduce_stage, dom_map, &value_map, true);
-  std::vector<PrimExpr> predicates = schedule::MakeBoundCheck(
+  top::PassUpIndex(reduce_stage, dom_map, &value_map, true);
+  std::vector<PrimExpr> predicates = MakeBoundCheck(
       reduce_stage, dom_map, value_map, true, skip_bound_check);
 
   // Get the factored op node.
@@ -881,5 +881,5 @@ Array<Tensor> Schedule::rfactor(const Tensor& tensor,
   reduce_stage->relations = Array<IterVarRelation>();
   return factor_tensors;
 }
-
+}  // namespace top
 }  // namespace tvm
similarity index 99%
rename from src/schedule/schedule_lang.cc
rename to src/top/schedule/schedule_lang.cc
index fe56b66..5523530 100644 (file)
 /*!
  * \file schedule_lang.cc
  */
-#include <tvm/schedule.h>
-#include <tvm/operation.h>
+#include <tvm/top/schedule.h>
+#include <tvm/top/operation.h>
 #include <unordered_set>
 #include "graph.h"
 
 namespace tvm {
-
-namespace {
+namespace top {
 
 // find first occurance location in leaf
 template<typename T>
@@ -84,8 +83,6 @@ void Split(StageNode* self,
   leaf_vars->data.insert(leaf_vars->data.begin() + pos, outer);
 }
 
-}  // namespace
-
 Stage::Stage(Operation op) {
   auto n = make_object<StageNode>();
   n->op = op;
@@ -594,7 +591,7 @@ Stage Schedule::create_group(const Array<Tensor>& outputs,
   self->InitCache();
   const auto& op2stage_cache = self->op2stage_cache_;
   // Get the ops.
-  Array<Operation> ops = schedule::GetSubGraph(
+  Array<Operation> ops = top::GetSubGraph(
       RemapTensor(self, outputs),
       RemapTensor(self, inputs),
       include_inputs);
@@ -718,8 +715,8 @@ Schedule ScheduleNode::make(Array<Operation> ops) {
   auto n = make_object<ScheduleNode>();
   Schedule sch(n);
   n->outputs = ops;
-  auto g = schedule::CreateReadGraph(n->outputs);
-  Array<Operation> post_order = schedule::PostDFSOrder(n->outputs, g);
+  auto g = top::CreateReadGraph(n->outputs);
+  Array<Operation> post_order = top::PostDFSOrder(n->outputs, g);
   // output set.
   std::unordered_set<Operation> output_set;
   for (Operation x : ops) {
@@ -851,4 +848,5 @@ TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable)
     auto* op = static_cast<const ScheduleNode*>(node.get());
     p->stream << "schedule(" << op << ")";
   });
+}  // namespace top
 }  // namespace tvm
similarity index 98%
rename from src/schedule/schedule_ops.cc
rename to src/top/schedule/schedule_ops.cc
index 1eb595c..1176d82 100644 (file)
 #include <tvm/ir.h>
 #include <tvm/ir_pass.h>
 #include <tvm/ir_functor_ext.h>
-#include <tvm/operation.h>
-#include <tvm/schedule_pass.h>
+#include <tvm/top/operation.h>
+#include <tvm/top/schedule_pass.h>
 #include <utility>
 #include <unordered_map>
 #include <unordered_set>
 #include "graph.h"
-#include "../op/op_util.h"
-#include "../pass/ir_util.h"
+#include "../operation/op_util.h"
+#include "../../pass/ir_util.h"
 
 namespace tvm {
-namespace schedule {
+namespace top {
 
 using namespace ir;
 
@@ -423,5 +423,5 @@ Stmt ScheduleOps(
   return post_proc(std::move(body));
 }
 
-}  // namespace schedule
+}  // namespace top
 }  // namespace tvm
similarity index 97%
rename from src/lang/tensor.cc
rename to src/top/tensor.cc
index 35b4029..c8e3aea 100644 (file)
 /*!
  * \file tensor.cc
  */
-#include <tvm/tensor.h>
-#include <tvm/operation.h>
-#include <tvm/tensor_intrin.h>
+#include <tvm/top/tensor.h>
+#include <tvm/top/operation.h>
+#include <tvm/top/tensor_intrin.h>
 #include <memory>
 
 namespace tvm {
-
+namespace top {
 // Tensor
 PrimExpr Tensor::operator()(Array<Var> indices) const {
   Array<PrimExpr> arr(indices.begin(), indices.end());
@@ -132,4 +132,5 @@ TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable)
 
 TVM_REGISTER_NODE_TYPE(TensorIntrinCallNode);
 
+}  // namespace top
 }  // namespace tvm
index 15bdd47..30834c5 100644 (file)
@@ -20,7 +20,7 @@
 #include <dmlc/logging.h>
 #include <gtest/gtest.h>
 #include <topi/cuda/injective.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/runtime/registry.h>
 #include <tvm/packed_func_ext.h>
 #include <tvm/build_module.h>
@@ -30,6 +30,7 @@
 
 TEST(BuildModule, Basic) {
   using namespace tvm;
+  using namespace tvm::top;
   auto n = var("n");
   Array<PrimExpr> shape;
   shape.push_back(n);
@@ -75,6 +76,7 @@ TEST(BuildModule, Heterogeneous) {
    */
 
   using namespace tvm;
+  using namespace tvm::top;
   const runtime::PackedFunc* pf = runtime::Registry::Get("module._Enabled");
   bool enabled = (*pf)("cuda");
   if (!enabled) {
index d9b6200..af8ede3 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <dmlc/logging.h>
 #include <gtest/gtest.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 
 TEST(Expr, Basic) {
   using namespace tvm;
index 6b694ef..e9f0df6 100644 (file)
@@ -20,7 +20,7 @@
 #include <dmlc/logging.h>
 #include <gtest/gtest.h>
 #include <tvm/ir_pass.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 
 TEST(IRSIMPLIFY, MinMax) {
   auto x = tvm::var("x");
index df2a6b6..462d0fe 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <gtest/gtest.h>
 #include <tvm/build_module.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/relay/expr.h>
 #include <tvm/relay/type.h>
 #include <tvm/relay/analysis.h>
index 7d03d2e..68d5d0d 100644 (file)
@@ -18,7 +18,7 @@
  */
 
 #include <gtest/gtest.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/relay/expr.h>
 #include <tvm/relay/type.h>
 #include <tvm/relay/analysis.h>
index 8321c58..4c383b5 100644 (file)
@@ -28,7 +28,7 @@
 #include <tvm/relay/type.h>
 #include <tvm/runtime/packed_func.h>
 #include <tvm/runtime/registry.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 
 TVM_REGISTER_GLOBAL("schedule")
     .set_body([](tvm::TVMArgs args, tvm::TVMRetValue* rv) {
index 6333f15..e41b881 100644 (file)
@@ -20,7 +20,7 @@
 #include <dmlc/logging.h>
 #include <gtest/gtest.h>
 #include <tvm/ir_pass.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 
 TEST(SimplePasses, HasSideEffect) {
   using namespace tvm;
@@ -28,7 +28,7 @@ TEST(SimplePasses, HasSideEffect) {
   Array<PrimExpr> shape;
   shape.push_back(n);
 
-  auto A = placeholder(shape, DataType::Float(32), "A");
+  auto A = top::placeholder(shape, DataType::Float(32), "A");
 
   CHECK(!tvm::ir::HasSideEffect(A[0]));
 }
index c994c0f..5d6dc23 100644 (file)
 
 #include <dmlc/logging.h>
 #include <gtest/gtest.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 
 TEST(Tensor, Basic) {
   using namespace tvm;
+  using namespace tvm::top;
+
   Var m("m"), n("n"), l("l");
 
   Tensor A = placeholder({m, l}, DataType::Float(32), "A");
@@ -37,15 +39,17 @@ TEST(Tensor, Basic) {
 
 TEST(Tensor, Reduce) {
   using namespace tvm;
+  using namespace tvm::top;
+
   Var m("m"), n("n"), l("l");
-  Tensor A = placeholder({m, l}, DataType::Float(32), "A");
-  Tensor B = placeholder({n, l}, DataType::Float(32), "B");
+  top::Tensor A = top::placeholder({m, l}, DataType::Float(32), "A");
+  top::Tensor B = top::placeholder({n, l}, DataType::Float(32), "B");
   IterVar rv = reduce_axis(Range{0, l}, "k");
 
-  auto C = compute({m, n}, [&](Var i, Var j) {
+  auto C = top::compute({m, n}, [&](Var i, Var j) {
       return sum(max(1 + A[i][rv] + 1, B[j][rv]), {rv});
       }, "C");
-  LOG(INFO) << C->op.as<ComputeOpNode>()->body;
+  LOG(INFO) << C->op.as<top::ComputeOpNode>()->body;
 }
 
 int main(int argc, char ** argv) {
index 55a9145..5f89bdf 100644 (file)
@@ -17,7 +17,7 @@
  * under the License.
  */
 
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <topi/elemwise.h>
 #include <gtest/gtest.h>
 
index a3720cd..73a6245 100644 (file)
@@ -32,7 +32,7 @@
 #include <gtest/gtest.h>
 #include <topi/generic/injective.h>
 #include <tvm/build_module.h>
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <tvm/packed_func_ext.h>
 #include <tvm/relay/analysis.h>
 #include <tvm/relay/expr.h>
index ce16e23..47dfc3e 100644 (file)
@@ -43,7 +43,7 @@ namespace topi {
  *
  * \return A Tensor whose op member is a broadcast operation
  */
-inline tvm::Tensor broadcast_to(const tvm::Tensor& t,
+inline tvm::top::Tensor broadcast_to(const tvm::top::Tensor& t,
                                 const tvm::Array<tvm::PrimExpr>& output_shape,
                                 std::string name = "T_broadcast_to",
                                 std::string tag = kBroadcast) {
@@ -58,7 +58,7 @@ inline tvm::Tensor broadcast_to(const tvm::Tensor& t,
   auto l = [&](tvm::Array<tvm::Var> ovars) {
     return t(detail::InputIndexFromBroadcast(ovars, t, bh.vars2, bh.all_vars));
   };
-  return tvm::compute(
+  return tvm::top::compute(
       tvm::Array<tvm::PrimExpr>(bh.common_shape.begin(), bh.common_shape.end()),
       l,
       name,
@@ -70,44 +70,44 @@ inline tvm::Tensor broadcast_to(const tvm::Tensor& t,
                             const tvm::PrimExpr& b) {                 \
     ComputeRule;                                                      \
   }                                                                   \
-  inline tvm::Tensor Name(const tvm::Tensor& A,                       \
-                          const tvm::Tensor& B,                       \
-                          std::string name = "T_" #Name,              \
-                          std::string tag = kBroadcast) {             \
+  inline tvm::top::Tensor Name(const tvm::top::Tensor& A,             \
+                               const tvm::top::Tensor& B,             \
+                               std::string name = "T_" #Name,         \
+                               std::string tag = kBroadcast) {        \
     auto l = [](tvm::PrimExpr a, tvm::PrimExpr b) { ComputeRule; };   \
     return detail::WithBroadcast(l, A, B, name, tag);                 \
   }                                                                   \
-  inline tvm::Tensor Name(const tvm::Tensor& A,                       \
+  inline tvm::top::Tensor Name(const tvm::top::Tensor& A,             \
                           const tvm::PrimExpr& B,                     \
                           std::string name = "T_" #Name,              \
                           std::string tag = kElementWise) {           \
-    auto l = [](tvm::PrimExpr a, tvm::PrimExpr b) { ComputeRule; }; \
-    return compute(A->shape, [&](const ::tvm::Array<::tvm::Var>& i) { \
+    auto l = [](tvm::PrimExpr a, tvm::PrimExpr b) { ComputeRule; };   \
+    return tvm::top::compute(A->shape, [&](const ::tvm::Array<::tvm::Var>& i) { \
         return l(A(i), B);                                            \
       }, name, tag);                                                  \
   }                                                                   \
-  inline tvm::Tensor Name(const tvm::PrimExpr& A,                     \
-                          const tvm::Tensor& B,                       \
-                          std::string name = "T_" #Name,              \
-                          std::string tag = kElementWise) {           \
+  inline tvm::top::Tensor Name(const tvm::PrimExpr& A,                \
+                               const tvm::top::Tensor& B,             \
+                               std::string name = "T_" #Name,         \
+                               std::string tag = kElementWise) {      \
     auto l = [&](tvm::PrimExpr a, tvm::PrimExpr b) { ComputeRule; };  \
-    return compute(B->shape, [&](const ::tvm::Array<::tvm::Var>& i) { \
+    return tvm::top::compute(B->shape, [&](const ::tvm::Array<::tvm::Var>& i) { \
         return l(A, B(i));                                            \
       }, name, tag);                                                  \
   }
 
 
 #define TOPI_DEFINE_OP_OVERLOAD(Name, OpName)                       \
-  inline tvm::Tensor Name(const tvm::Tensor& A,                     \
-                          const tvm::Tensor& B) {                   \
+  inline tvm::top::Tensor Name(const tvm::top::Tensor& A,           \
+                              const tvm::top::Tensor& B) {          \
     return topi::OpName(A, B);                                      \
   }                                                                 \
-  inline tvm::Tensor Name(const tvm::PrimExpr& A,                   \
-                          const tvm::Tensor& B) {                   \
+  inline tvm::top::Tensor Name(const tvm::PrimExpr& A,              \
+                               const tvm::top::Tensor& B) {         \
     return topi::OpName(A, B);                                      \
   }                                                                 \
-  inline tvm::Tensor Name(const tvm::Tensor& A,                     \
-                          const tvm::PrimExpr& B) {                 \
+  inline tvm::top::Tensor Name(const tvm::top::Tensor& A,           \
+                               const tvm::PrimExpr& B) {            \
     return topi::OpName(A, B);                                      \
   }
 
index c644903..44685fc 100644 (file)
 #ifndef TOPI_CONTRIB_CUBLAS_H_
 #define TOPI_CONTRIB_CUBLAS_H_
 
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "topi/detail/extern.h"
 
 namespace topi {
 namespace contrib {
 using namespace tvm;
+using namespace tvm::top;
 using namespace topi::detail;
 /*!
 * \brief Create an op that multiplies lhs and rhs with cuBLAS
index a61499d..062ad40 100644 (file)
 #ifndef TOPI_CONTRIB_ROCBLAS_H_
 #define TOPI_CONTRIB_ROCBLAS_H_
 
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "topi/detail/extern.h"
 
 namespace topi {
 namespace contrib {
 using namespace tvm;
+using namespace tvm::top;
 /*!
 * \brief Create an op that multiplies lhs and rhs with rocBLAS
 *
index 781258a..637a861 100644 (file)
@@ -24,7 +24,7 @@
 #ifndef TOPI_CUDA_DENSE_H_
 #define TOPI_CUDA_DENSE_H_
 
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 #include "topi/tags.h"
 #include "topi/detail/array_utils.h"
@@ -34,6 +34,7 @@
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace cuda {
 /*!
@@ -47,10 +48,10 @@ namespace cuda {
 *
 * \return Tensor with shape [batch, out_dim]
 */
-inline tvm::Tensor dense_cuda(const Target& target,
-                              const tvm::Tensor& data,
-                              const tvm::Tensor& weight,
-                              const tvm::Tensor& bias,
+inline tvm::top::Tensor dense_cuda(const Target& target,
+                              const tvm::top::Tensor& data,
+                              const tvm::top::Tensor& weight,
+                              const tvm::top::Tensor& bias,
                               const DataType& out_dtype) {
   CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data";
   CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight";
@@ -66,7 +67,7 @@ inline tvm::Tensor dense_cuda(const Target& target,
     CHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported.";
     auto mm = topi::contrib::cublas_matmul(data, weight, false, true);
     if (bias.defined()) {
-      mm = tvm::compute({ batch, out_dim },
+      mm = tvm::top::compute({ batch, out_dim },
                         [&](Var i, Var j) {
                           return mm(i, j) + bias(j);
                         }, "tensor", kBroadcast);
index 663bc1f..5e42126 100644 (file)
 
 #include "topi/tags.h"
 #include "topi/detail/fuse.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace cuda {
 
@@ -39,7 +40,7 @@ namespace cuda {
  *
  * \param sch The schedule to update.
  * \param out The tensor representing the injective op.
- * 
+ *
  * \return The updated schedule.
  */
 inline Schedule schedule_injective_from_existing(Schedule sch, const Tensor& out) {
@@ -67,7 +68,7 @@ inline Schedule schedule_injective(const Target &target, const Array<Tensor>& ou
     out_ops.push_back(t->op);
   }
   auto s = create_schedule(out_ops);
-  tvm::schedule::AutoInlineInjective(s);
+  tvm::top::AutoInlineInjective(s);
   for (auto out : outs) {
     schedule_injective_from_existing(s, out);
   }
index 3025786..708f8d5 100644 (file)
 #ifndef TOPI_CUDA_NORMALIZATION_H_
 #define TOPI_CUDA_NORMALIZATION_H_
 
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 #include "topi/tags.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 namespace cuda {
 /*!
 * \brief Create a CUDA schedule for LRN
index df4ab33..d2a5c1f 100644 (file)
 #include "topi/tags.h"
 #include "topi/detail/fuse.h"
 #include "topi/detail/array_utils.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace cuda {
 
index a82b363..2445674 100644 (file)
 
 #include "topi/tags.h"
 #include "topi/detail/fuse.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace cuda {
 /*!
index 33be899..6f12de0 100644 (file)
 
 #include "topi/tags.h"
 #include "topi/detail/fuse.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace cuda {
 
@@ -50,9 +51,9 @@ inline Schedule schedule_softmax(const Target &target, const Array<Tensor>& outs
   auto s = create_schedule(out_ops);
 
   auto softmax = outs[0];
-  tvm::Tensor max_elem;
-  tvm::Tensor expsum;
-  tvm::Tensor exp;
+  tvm::top::Tensor max_elem;
+  tvm::top::Tensor expsum;
+  tvm::top::Tensor exp;
   bool has_exp = false;
 
   auto tag = softmax->op.as<ComputeOpNode>()->tag;
index 93e7c3e..0c0feec 100644 (file)
 #ifndef TOPI_DETAIL_ARRAY_UTILS_H_
 #define TOPI_DETAIL_ARRAY_UTILS_H_
 
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 
 namespace topi {
 namespace detail {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
  * \brief Search an array for a specific item
index 2d326e7..2e644ee 100644 (file)
@@ -29,7 +29,7 @@
 #include <string>
 
 #include "tvm/ir_pass.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/expr_operator.h"
 #include "topi/detail/constant_utils.h"
 
@@ -100,7 +100,7 @@ inline BroadcastHelper BroadcastShape(const tvm::Array<tvm::PrimExpr>& shape1,
 
 inline tvm::Array<tvm::PrimExpr> InputIndexFromBroadcast(
     const tvm::Array<tvm::Var>& ovars,
-    const tvm::Tensor& T,
+    const tvm::top::Tensor& T,
     const std::deque<tvm::Var>& my_vars,
     const std::deque<tvm::Var>& all_vars) {
   tvm::Array<tvm::PrimExpr> ivars;
@@ -127,9 +127,9 @@ inline tvm::Array<tvm::PrimExpr> InputIndexFromBroadcast(
 }
 
 template <typename FBinaryExpr>
-inline tvm::Tensor WithBroadcast(FBinaryExpr op,
-                                 const tvm::Tensor& A,
-                                 const tvm::Tensor& B,
+inline tvm::top::Tensor WithBroadcast(FBinaryExpr op,
+                                 const tvm::top::Tensor& A,
+                                 const tvm::top::Tensor& B,
                                  const std::string& name = "tensor",
                                  const std::string& tag = "") {
   auto bh = BroadcastShape(A->shape, B->shape);
@@ -137,7 +137,7 @@ inline tvm::Tensor WithBroadcast(FBinaryExpr op,
     return op(A(InputIndexFromBroadcast(ovars, A, bh.vars1, bh.all_vars)),
               B(InputIndexFromBroadcast(ovars, B, bh.vars2, bh.all_vars)));
   };
-  return tvm::compute(
+  return tvm::top::compute(
       tvm::Array<tvm::PrimExpr>(bh.common_shape.begin(), bh.common_shape.end()),
       l,
       name,
index e6de76f..2100493 100644 (file)
@@ -33,6 +33,7 @@
 namespace topi {
 namespace detail {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
  * \brief Test whether the given Expr is a constant integer
index 8bdda80..717ce4d 100644 (file)
@@ -24,7 +24,7 @@
 #ifndef TOPI_DETAIL_EXTERN_H_
 #define TOPI_DETAIL_EXTERN_H_
 
-#include <tvm/operation.h>
+#include <tvm/top/operation.h>
 #include <vector>
 #include <string>
 
@@ -32,6 +32,7 @@
 namespace topi {
 namespace detail {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
  * \brief Construct a buffer to pass to an external function
index d573a3f..5a77db6 100644 (file)
 #ifndef TOPI_DETAIL_FUSE_H_
 #define TOPI_DETAIL_FUSE_H_
 
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 
 namespace topi {
 namespace detail {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
  * \brief Fuse all of the given args
index ec757e9..12b1541 100644 (file)
@@ -32,6 +32,7 @@
 namespace topi {
 namespace detail {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
  * \brief Get padding size for each side given padding height and width
index 5526a7d..c8da45d 100644 (file)
 
 #include <vector>
 
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/expr_operator.h"
 
 namespace topi {
 namespace detail {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
 * \brief Flatten the indices to 1D
index e52452e..e306880 100644 (file)
@@ -28,6 +28,7 @@
 namespace topi {
 namespace detail {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
  * \brief Check whether input shape has dimension of size 0;
index df7cff0..46515e7 100644 (file)
@@ -33,6 +33,7 @@
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 // Unary intrinsic operators
 #define TOPI_DECLARE_UNARY_OP(OpName)                           \
index 9dff8d5..540e4a1 100644 (file)
 
 #include "topi/tags.h"
 #include "topi/detail/fuse.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace generic {
 /*!
@@ -66,7 +67,7 @@ inline Schedule default_schedule_auto_inline(const Target& target, Array<Tensor>
   }
   auto s = create_schedule(out_ops);
   auto x = outs[0];
-  tvm::schedule::AutoInlineInjective(s);
+  tvm::top::AutoInlineInjective(s);
   auto axis = s[x]->op.as<ComputeOpNode>()->axis;
   if (axis.size() > 0) {
     detail::Fuse(s[x], axis);
index 03e362c..ea1e7b6 100644 (file)
 
 #include "topi/tags.h"
 #include "topi/detail/fuse.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 #include "injective.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace generic {
 /*!
@@ -49,7 +50,7 @@ inline Schedule schedule_extern(const Target& target, Array<Tensor> outs) {
   }
   auto s = create_schedule(out_ops);
 
-  tvm::schedule::AutoInlineInjective(s);
+  tvm::top::AutoInlineInjective(s);
   for (auto out : outs) {
     if (out->op->IsInstance<ExternOpNode>()) {
       continue;
index fa7df4c..7df2e15 100644 (file)
 
 #include "topi/tags.h"
 #include "topi/detail/fuse.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace generic {
 
@@ -39,7 +40,7 @@ namespace generic {
  *
  * \param sch The schedule to update.
  * \param out The tensor representing the injective op.
- * 
+ *
  * \return The updated schedule.
  */
 inline Schedule schedule_injective_from_existing(Schedule sch, const Tensor& out) {
@@ -61,7 +62,7 @@ inline Schedule schedule_injective(const Target &target, const Array<Tensor>& ou
     out_ops.push_back(t->op);
   }
   auto s = create_schedule(out_ops);
-  tvm::schedule::AutoInlineInjective(s);
+  tvm::top::AutoInlineInjective(s);
   auto x = outs[0];
   schedule_injective_from_existing(s, x);
 
index f169ec9..7c1bad3 100644 (file)
 #include "topi/elemwise.h"
 #include "topi/detail/ravel_unravel.h"
 #include "topi/detail/constant_utils.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/expr_operator.h"
 
 namespace topi {
 namespace image {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
  * \brief Sample a point in a tensor using bilinear interpolation.
index 3f65c75..b86c00c 100644 (file)
 #include "topi/detail/constant_utils.h"
 #include "tvm/ir.h"
 #include "tvm/ir_pass.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/expr_operator.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 namespace detail {
 
 template <typename T>
@@ -61,11 +62,11 @@ tvm::PrimExpr Map(const tvm::Array<tvm::PrimExpr>& exprs, T op) {
  * \return A Tensor whose op member is the relu operation
  */
 template <typename T>
-inline tvm::Tensor relu(const tvm::Tensor& t,
+inline tvm::top::Tensor relu(const tvm::top::Tensor& t,
                         T threshold = static_cast<T>(0),
                         std::string name = "T_relu",
                         std::string tag = kElementWise) {
-  return tvm::compute(
+  return tvm::top::compute(
       t->shape,
       [&](const tvm::Array<tvm::Var>& i) {
         auto threshold_const = tvm::make_const(t->dtype, threshold);
@@ -85,11 +86,11 @@ inline tvm::Tensor relu(const tvm::Tensor& t,
 *
 * \return A Tensor whose op member is the leaky relu operation
 */
-inline tvm::Tensor leaky_relu(const tvm::Tensor& t,
+inline tvm::top::Tensor leaky_relu(const tvm::top::Tensor& t,
                               double alpha = 0.1,
                               std::string name = "T_leaky_relu",
                               std::string tag = kElementWise) {
-  return tvm::compute(
+  return tvm::top::compute(
     t->shape,
     [&](const tvm::Array<tvm::Var>& i) {
       auto value = t(i);
@@ -111,8 +112,8 @@ inline tvm::Tensor leaky_relu(const tvm::Tensor& t,
  *
  * \return A Tensor whose op member is the parametric relu operation
  */
-inline tvm::Tensor prelu(const tvm::Tensor &x,
-                         const tvm::Tensor &slope,
+inline tvm::top::Tensor prelu(const tvm::top::Tensor &x,
+                         const tvm::top::Tensor &slope,
                          const int axis = 1,
                          std::string name = "T_prelu",
                          std::string tag = kBroadcast) {
@@ -122,7 +123,7 @@ inline tvm::Tensor prelu(const tvm::Tensor &x,
         topi::detail::GetConstInt(x->shape[axis]))
         << "Wrong slope shape received.";
 
-  return tvm::compute(x->shape,
+  return tvm::top::compute(x->shape,
                      [&](const tvm::Array<tvm::Var> &indices) {
                         auto xval = x(indices);
                         return tvm::ir::SelectNode::make(
@@ -171,7 +172,7 @@ inline tvm::Tensor prelu(const tvm::Tensor &x,
  *
  *
  */
-inline tvm::Tensor pad(const tvm::Tensor& t,
+inline tvm::top::Tensor pad(const tvm::top::Tensor& t,
                        const tvm::Array<tvm::PrimExpr>& pad_before,
                        tvm::Array<tvm::PrimExpr> pad_after = tvm::Array<tvm::PrimExpr>(),
                        PrimExpr pad_value = PrimExpr(),
@@ -251,7 +252,7 @@ inline tvm::Tensor pad(const tvm::Tensor& t,
     }
     return t(indices);
   };
-  return tvm::compute(output_shape, l, name, tag);
+  return tvm::top::compute(output_shape, l, name, tag);
 }
 
 /*!
@@ -274,8 +275,8 @@ inline tvm::Tensor pad(const tvm::Tensor& t,
  * \return A Tensor whose op member is the 2-D convolution operation (NCHW
  * layout)
  */
-inline tvm::Tensor conv2d_nchw(const tvm::Tensor& I,
-                               const tvm::Tensor& W,
+inline tvm::top::Tensor conv2d_nchw(const tvm::top::Tensor& I,
+                               const tvm::top::Tensor& W,
                                int pad_h = 0,
                                int pad_w = 0,
                                int stride_h = 1,
@@ -303,7 +304,7 @@ inline tvm::Tensor conv2d_nchw(const tvm::Tensor& I,
         T(b, i, stride_h * h + kh, stride_w * w + kw) * W(o, i, kh, kw),
         {i, kh, kw});
   };
-  return tvm::compute(output_shape, l, name, tag);
+  return tvm::top::compute(output_shape, l, name, tag);
 }
 
 /*!
@@ -325,8 +326,8 @@ inline tvm::Tensor conv2d_nchw(const tvm::Tensor& I,
  * \return A Tensor whose op member is the 2-D convolution operation
  * (HWCN layout)
  */
-inline tvm::Tensor conv2d_hwcn(const tvm::Tensor& I,
-                               const tvm::Tensor& W,
+inline tvm::top::Tensor conv2d_hwcn(const tvm::top::Tensor& I,
+                               const tvm::top::Tensor& W,
                                int pad_h = 0,
                                int pad_w = 0,
                                int stride_h = 1,
@@ -352,7 +353,7 @@ inline tvm::Tensor conv2d_hwcn(const tvm::Tensor& I,
         T(stride_h * h + kh, stride_w * w + kw, i, b) * W(kh, kw, i, o),
         {i, kh, kw});
   };
-  return tvm::compute(output_shape, l, name, tag);
+  return tvm::top::compute(output_shape, l, name, tag);
 }
 
 
@@ -376,8 +377,8 @@ inline tvm::Tensor conv2d_hwcn(const tvm::Tensor& I,
  * \return A Tensor whose op member is the 2-D depthwise convolution operation
  * (NCHW layout)
  */
-inline tvm::Tensor depthwise_conv2d_nchw(const tvm::Tensor& I,
-                                         const tvm::Tensor& W,
+inline tvm::top::Tensor depthwise_conv2d_nchw(const tvm::top::Tensor& I,
+                                         const tvm::top::Tensor& W,
                                          int pad_h = 0,
                                          int pad_w = 0,
                                          int stride_h = 1,
@@ -406,11 +407,11 @@ inline tvm::Tensor depthwise_conv2d_nchw(const tvm::Tensor& I,
                     W(indexdiv(i, pCM), indexmod(o, pCM), kh, kw),
                     {i, kh, kw});
   };
-  return tvm::compute(output_shape, l, name, tag);
+  return tvm::top::compute(output_shape, l, name, tag);
 }
 
-inline tvm::Tensor depthwise_conv2d_nhwc(const tvm::Tensor& I,
-                                         const tvm::Tensor& W,
+inline tvm::top::Tensor depthwise_conv2d_nhwc(const tvm::top::Tensor& I,
+                                         const tvm::top::Tensor& W,
                                          int pad_h = 0,
                                          int pad_w = 0,
                                          int stride_h = 1,
@@ -439,7 +440,7 @@ inline tvm::Tensor depthwise_conv2d_nhwc(const tvm::Tensor& I,
                     W(kh, kw, indexdiv(i, pCM), indexmod(o, pCM)),
                     {kh, kw, i});
   };
-  return tvm::compute(output_shape, l, name, tag);
+  return tvm::top::compute(output_shape, l, name, tag);
 }
 
 /*!
@@ -462,8 +463,8 @@ inline tvm::Tensor depthwise_conv2d_nhwc(const tvm::Tensor& I,
  * \return A Tensor whose op member is the 2-D groupconvolution operation
  * (NCHW layout)
  */
-inline tvm::Tensor group_conv2d_ngchw(const tvm::Tensor& I,
-                                      const tvm::Tensor& W,
+inline tvm::top::Tensor group_conv2d_ngchw(const tvm::top::Tensor& I,
+                                      const tvm::top::Tensor& W,
                                       int pad_h = 0,
                                       int pad_w = 0,
                                       int stride_h = 1,
@@ -498,7 +499,7 @@ inline tvm::Tensor group_conv2d_ngchw(const tvm::Tensor& I,
         I(b, g, i, stride_h * h + kh, stride_w * w + kw) * W(g, i, o, kh, kw),
         {i, kh, kw});
   };
-  return tvm::compute(output_shape, l, name, tag);
+  return tvm::top::compute(output_shape, l, name, tag);
 }
 
 }  // namespace topi
index 60b4175..a3bd96d 100644 (file)
 #include <string>
 
 #include "topi/tags.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 
 namespace topi {
 namespace nn {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
 * \brief Creates an operation that calculates matrix multiplication in batch.
@@ -41,8 +42,8 @@ using namespace tvm;
 *
 * \return Tensor with shape [batch, M, N]
 */
-inline tvm::Tensor batch_matmul(const tvm::Tensor& x,
-                                const tvm::Tensor& y) {
+inline tvm::top::Tensor batch_matmul(const tvm::top::Tensor& x,
+                                const tvm::top::Tensor& y) {
   CHECK_EQ(x->shape.size(), 3) << "batch_matmul requires 3-D data";
   CHECK_EQ(y->shape.size(), 3) << "batch_matmul requires 3-D data";
 
@@ -52,7 +53,7 @@ inline tvm::Tensor batch_matmul(const tvm::Tensor& x,
   auto N = y->shape[1];
 
   auto k = tvm::reduce_axis(Range(0, K), "k");
-  auto result = tvm::compute(
+  auto result = tvm::top::compute(
       { batch, M, N },
       [&](Var b, Var i, Var j) {
         return tvm::sum(x(b, i, k) * y(b, j, k), { k });
index 148d892..2d6f47c 100644 (file)
@@ -29,7 +29,7 @@
 #include "topi/tags.h"
 #include "topi/broadcast.h"
 #include "topi/transform.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/expr_operator.h"
 
 namespace topi {
@@ -43,7 +43,9 @@ namespace nn {
 * \param axis The axis to add the bias to.
 * \return Tensor with shape [batch, in_dim]
 */
-inline tvm::Tensor bias_add(const tvm::Tensor& data, const tvm::Tensor& bias, int axis) {
+inline tvm::top::Tensor bias_add(const tvm::top::Tensor& data,
+                                 const tvm::top::Tensor& bias,
+                                 int axis) {
   int data_ndim = data->shape.size();
   if (axis < 0) {
     axis += data_ndim;
index e2af3ae..7c92034 100644 (file)
@@ -26,7 +26,7 @@
 
 #include <string>
 
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/ir_pass.h"
 #include "topi/tags.h"
 #include "topi/detail/constant_utils.h"
@@ -34,6 +34,7 @@
 namespace topi {
 namespace nn {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
 * \brief Binarization and bit-packing along a certain axis.
@@ -46,7 +47,7 @@ using namespace tvm;
 *
 * \return Output tensor with dtype uint32
 */
-inline tvm::Tensor binarize_pack(const tvm::Tensor& data,
+inline tvm::top::Tensor binarize_pack(const tvm::top::Tensor& data,
                                  int axis,
                                  std::string name = "PackedInput",
                                  std::string tag = "binarize_pack") {
@@ -62,7 +63,7 @@ inline tvm::Tensor binarize_pack(const tvm::Tensor& data,
                      ishape[i]);
   }
 
-  return tvm::compute(
+  return tvm::top::compute(
     oshape,
     [&](const Array<Var>& indices) {
       Array<PrimExpr> start_idx;
@@ -98,8 +99,8 @@ inline tvm::Tensor binarize_pack(const tvm::Tensor& data,
 *
 * \return Tensor with shape [batch, out_dim], dtype is float32
 */
-inline tvm::Tensor binary_dense(const tvm::Tensor& data,
-                                const tvm::Tensor& weight) {
+inline tvm::top::Tensor binary_dense(const tvm::top::Tensor& data,
+                                const tvm::top::Tensor& weight) {
   CHECK_EQ(data->shape.size(), 2) << "binary_dense requires 2-D data";
   CHECK_EQ(weight->shape.size(), 2) << "binary_dense requires 2-D weight";
   CHECK_EQ(data->dtype, DataType::UInt(32)) << "binary_dense requires uint32 data";
@@ -110,13 +111,13 @@ inline tvm::Tensor binary_dense(const tvm::Tensor& data,
   auto out_dim = weight->shape[0];
 
   auto k = tvm::reduce_axis(Range(0, in_dim), "k");
-  auto matmul = tvm::compute(
+  auto matmul = tvm::top::compute(
     { batch, out_dim },
     [&](Var i, Var j) {
       return tvm::sum(popcount(data(i, k) ^ weight(j, k)), { k });
     }, "tensor", "binary_dense");
 
-  return tvm::compute(
+  return tvm::top::compute(
     { batch, out_dim },
     [&](Var i, Var j) {
       return 32 * in_dim - 2.0f * matmul(i, j);
index f1bb07a..7cdc8d7 100644 (file)
 #include <string>
 
 #include "topi/tags.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 
 namespace topi {
 namespace nn {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
 * \brief Creates an operation that calculates data * weight^T + bias
@@ -43,9 +44,9 @@ using namespace tvm;
 *
 * \return Tensor with shape [batch, out_dim]
 */
-inline tvm::Tensor dense(const tvm::Tensor& data,
-                         const tvm::Tensor& weight,
-                         const tvm::Tensor& bias,
+inline tvm::top::Tensor dense(const tvm::top::Tensor& data,
+                         const tvm::top::Tensor& weight,
+                         const tvm::top::Tensor& bias,
                          const DataType& out_dtype) {
   CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data";
   CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight";
@@ -58,7 +59,7 @@ inline tvm::Tensor dense(const tvm::Tensor& data,
   auto out_dim = weight->shape[0];
 
   auto k = tvm::reduce_axis(Range(0, in_dim), "k");
-  auto matmul = tvm::compute(
+  auto matmul = tvm::top::compute(
     { batch, out_dim },
     [&](Var i, Var j) {
       return tvm::sum(tvm::cast(out_dtype, data(i, k)) *
@@ -66,7 +67,7 @@ inline tvm::Tensor dense(const tvm::Tensor& data,
     }, "tensor", "dense");
 
   if (bias.defined()) {
-    matmul = tvm::compute(
+    matmul = tvm::top::compute(
       { batch, out_dim },
       [&](Var i, Var j) {
         return matmul(i, j) + tvm::cast(out_dtype, bias(j));
index 334b170..6ffb3da 100644 (file)
 
 #include <string>
 
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/ir_pass.h"
 #include "topi/tags.h"
 
 namespace topi {
 namespace nn {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
 * \brief Create a new expression of the logical and of all
@@ -79,7 +80,7 @@ inline Tensor dilate(const Tensor& x,
       (x->shape[i] - 1) * cast(DataType::Int(32), strides[i] + 1)));
   }
 
-  return tvm::compute(
+  return tvm::top::compute(
     out_shape,
     [&](const Array<Var>& indices) {
       Array<PrimExpr> not_zero;
index 6b542f7..de11b6d 100644 (file)
 
 #include "topi/tags.h"
 #include "topi/detail/constant_utils.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/expr_operator.h"
 
 
 namespace topi {
 namespace nn {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
 * \brief Flattens the input tensor into a 2-D tensor by collapsing higher dimensions.
@@ -64,7 +65,7 @@ inline Tensor flatten(const Tensor& x,
   }
   std::reverse(extra_shape.begin(), extra_shape.end());
 
-  return tvm::compute(
+  return tvm::top::compute(
     oshape, [&](Var i, Var j) {
       PrimExpr idx = j;
       std::vector<PrimExpr> index;
index 3f60359..6670e6d 100644 (file)
 #include <string>
 #include <algorithm>
 #include "topi/tags.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 namespace topi {
 namespace nn {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
 * \brief L2 normalization inference operator
@@ -59,7 +60,7 @@ inline Tensor l2_normalize(const Tensor& data,
   Tensor sum_value = topi::sum(dot_value, axis, true);
   Tensor expand_sum = topi::broadcast_to(sum_value, input_shape);
   return topi::divide(data,
-                      topi::sqrt(tvm::compute(expand_sum->shape,
+                      topi::sqrt(tvm::top::compute(expand_sum->shape,
                                               [&](const Array<Var>& i){
                                                 return (max(expand_sum(i), eps));
                                               }, name, tag)));
index 0cce997..cd3b9b2 100644 (file)
 #include <string>
 
 #include "topi/tags.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 
 namespace topi {
 namespace nn {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
 * \brief Local response normalization inference operator
@@ -67,21 +68,21 @@ inline Tensor lrn(const Tensor& data,
   auto rxs = tvm::reduce_axis(Range(0, size), "rxs");
   Tensor sqr_sum;
   if (axis == 1) {
-    sqr_sum = tvm::compute(input_shape,
+    sqr_sum = tvm::top::compute(input_shape,
                            [&](Var i, Var l, Var j, Var k) {
                            return tvm::sum(pad_data(i, l + rxs, j, k) *
                                            pad_data(i, l + rxs, j, k),
                                            {rxs});
                            });
   } else if (axis == 3) {
-    sqr_sum = tvm::compute(input_shape,
+    sqr_sum = tvm::top::compute(input_shape,
                            [&](Var i, Var l, Var j, Var k) {
                            return tvm::sum(pad_data(i, l, j, k + rxs) *
                                            pad_data(i, l, j, k + rxs),
                                            {rxs});
                            });
   }
-  auto sqrt_sum_up = tvm::compute(
+  auto sqrt_sum_up = tvm::top::compute(
       input_shape,
       [&](Var i, Var j, Var k, Var l) {
         return tvm::pow(bias +
index 03043ff..4cd2fe1 100644 (file)
 #include <string>
 
 #include "topi/tags.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 
 namespace topi {
 namespace nn {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
 * \brief Scale and shift with NCHW order
@@ -49,7 +50,7 @@ inline Tensor scale_shift_nchw(const Tensor& x,
                                const Tensor& shift,
                                std::string name = "ScaleShift",
                                std::string tag = kBroadcast) {
-  return tvm::compute(
+  return tvm::top::compute(
     x->shape,
     [&](Var b, Var c, Var h, Var w) {
       return x(b, c, h, w) * scale(c) + shift(w);
@@ -72,7 +73,7 @@ inline Tensor scale_shift_nhwc(const Tensor& x,
                                const Tensor& shift,
                                std::string name = "ScaleShift",
                                std::string tag = kBroadcast) {
-  return tvm::compute(
+  return tvm::top::compute(
     x->shape,
     [&](Var b, Var h, Var w, Var c) {
       return x(b, h, w, c) * scale(c) + shift(w);
index a074ee1..ac284a0 100644 (file)
@@ -37,6 +37,7 @@
 namespace topi {
 namespace nn {
 using namespace tvm;
+using namespace tvm::top;
 
 /*! \brief Pooling type */
 enum PoolType : int {
@@ -124,7 +125,7 @@ inline Tensor pool_impl(const Tensor& x,
   if (pool_type == kMaxPool) {
     auto temp = do_pad ? pad(
         x, pad_before, pad_after, tvm::min_value(x->dtype), "pad_temp") : x;
-    return tvm::compute(out_shape, [&](const Array<Var>& output) {
+    return tvm::top::compute(out_shape, [&](const Array<Var>& output) {
       Array<PrimExpr> indices;
       for (const Var& var : output) indices.push_back(var);
       indices.Set(height_axis, output[height_axis] * stride_height + dheight);
@@ -136,7 +137,7 @@ inline Tensor pool_impl(const Tensor& x,
     auto temp = do_pad ? pad(x, pad_before, pad_after, 0, "pad_temp") : x;
 
     // TVM compute for summing the pooling window.
-    auto pool_sum = tvm::compute(out_shape,
+    auto pool_sum = tvm::top::compute(out_shape,
     [&](const Array<Var>& output) {
       Array<PrimExpr> indices;
       for (const Var& var : output) indices.push_back(var);
@@ -146,7 +147,7 @@ inline Tensor pool_impl(const Tensor& x,
     }, "tensor", "pool_sum");
 
     // TVM compute for dividing the reduced window sum by kernel size.
-    return tvm::compute(out_shape,
+    return tvm::top::compute(out_shape,
     [&](const Array<Var>& output) {
       Array<PrimExpr> indices;
       for (const Var& var : output) indices.push_back(var);
@@ -244,7 +245,7 @@ inline Tensor pool_grad_impl(const Tensor& out_grad,
         x, pad_before, pad_after, tvm::min_value(x->dtype), "pad_temp") : x;
 
     auto mp_argmax =
-        tvm::compute(out_shape,
+        tvm::top::compute(out_shape,
                      [&](const Array<Var>& inds) {
                        Array<PrimExpr> window_inds{inds.begin(), inds.end()};
                        window_inds.Set(height_axis, inds[height_axis] * stride_height + dheight);
@@ -256,7 +257,7 @@ inline Tensor pool_grad_impl(const Tensor& out_grad,
 
     auto mp_inds = mp_argmax[0];
 
-    return tvm::compute(
+    return tvm::top::compute(
         x->shape,
         [&](const Array<Var>& inds) {
           Array<PrimExpr> pad_inds {inds.begin(), inds.end()};
@@ -287,7 +288,7 @@ inline Tensor pool_grad_impl(const Tensor& out_grad,
   } else if (pool_type == kAvgPool) {
     auto windowh = tvm::reduce_axis(Range(0, (kernel_height + stride_height - 1) / stride_height));
     auto windoww = tvm::reduce_axis(Range(0, (kernel_width + stride_width - 1) / stride_width));
-    return tvm::compute(
+    return tvm::top::compute(
         x->shape,
         [&](const Array<Var>& inds) {
           PrimExpr pad_h_idx = inds[height_axis] + pad_top;
@@ -512,7 +513,7 @@ inline Tensor adaptive_pool_impl(const Tensor& x,
   out_shape.Set(width_axis, out_width);
 
   if (pool_type == kMaxPool) {
-    return tvm::compute(out_shape, [&](const Array<Var>& output) {
+    return tvm::top::compute(out_shape, [&](const Array<Var>& output) {
       Array<PrimExpr> indices;
       for (const Var& var : output) indices.push_back(var);
       auto i_start_h = start_index(output[height_axis], out_height, height);
@@ -526,7 +527,7 @@ inline Tensor adaptive_pool_impl(const Tensor& x,
       return tvm::max(x(indices), { dheight, dwidth });  // NOLINT(*)
     }, "tensor", "adaptive_pool_max");
   } else if (pool_type == kAvgPool) {
-    auto pool_sum = tvm::compute(out_shape, [&](const Array<Var>& output) {
+    auto pool_sum = tvm::top::compute(out_shape, [&](const Array<Var>& output) {
       Array<PrimExpr> indices;
       for (const Var& var : output) indices.push_back(var);
       auto i_start_h = start_index(output[height_axis], out_height, height);
@@ -542,7 +543,7 @@ inline Tensor adaptive_pool_impl(const Tensor& x,
       return tvm::sum(x(indices), { dheight, dwidth });
     }, "tensor", "adaptive_pool_sum");
 
-    return tvm::compute(out_shape, [&](const Array<Var>& output) {
+    return tvm::top::compute(out_shape, [&](const Array<Var>& output) {
       Array<PrimExpr> indices;
       for (const Var& var : output) indices.push_back(var);
       auto i_start_h = start_index(output[height_axis], out_height, height);
@@ -696,7 +697,7 @@ inline Tensor pool_impl_nd(const Tensor& x,
   if (pool_type == kMaxPool) {
     auto temp = do_pad ? pad(
         x, pad_before, pad_after, tvm::min_value(x->dtype), "pad_temp") : x;
-    return tvm::compute(out_shape, [&](const Array<Var>& output) {
+    return tvm::top::compute(out_shape, [&](const Array<Var>& output) {
       Array<PrimExpr> indices;
       for (const Var& var : output) indices.push_back(var);
 
@@ -712,7 +713,7 @@ inline Tensor pool_impl_nd(const Tensor& x,
     auto temp = do_pad ? pad(x, pad_before, pad_after, 0, "pad_temp") : x;
 
     // TVM compute for summing the pooling window.
-    auto pool_sum = tvm::compute(out_shape,
+    auto pool_sum = tvm::top::compute(out_shape,
     [&](const Array<Var>& output) {
       Array<PrimExpr> indices;
       for (const Var& var : output) indices.push_back(var);
@@ -725,7 +726,7 @@ inline Tensor pool_impl_nd(const Tensor& x,
     }, "tensor", "pool_sum");
 
     // TVM compute for dividing the reduced window sum by kernel size.
-    return tvm::compute(out_shape,
+    return tvm::top::compute(out_shape,
     [&](const Array<Var>& output) {
       Array<PrimExpr> indices;
       for (const Var& var : output) indices.push_back(var);
index 58ecc95..72e1745 100644 (file)
 
 #include "topi/reduction.h"
 #include "topi/tags.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/expr_operator.h"
 
 namespace topi {
 namespace nn {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
 * \brief Softmax activation
@@ -109,14 +110,14 @@ inline Tensor softmax(const Tensor &x,
     return exp(indices) / expsum(non_reduce_indices);
   };
 
-  auto max_elem = tvm::compute(reduced_shape, _compute_max);
-  auto exp = tvm::compute(input_shape, [&](const Array<Var> &indices) {
+  auto max_elem = tvm::top::compute(reduced_shape, _compute_max);
+  auto exp = tvm::top::compute(input_shape, [&](const Array<Var> &indices) {
       return _compute_exp(max_elem, indices);
   });
-  auto expsum = tvm::compute(reduced_shape, [&](const Array<Var> &indices) {
+  auto expsum = tvm::top::compute(reduced_shape, [&](const Array<Var> &indices) {
       return _compute_expsum(exp, indices);
   });
-  return tvm::compute(input_shape, [&](const Array<Var> &indices) {
+  return tvm::top::compute(input_shape, [&](const Array<Var> &indices) {
       return _normalize(exp, expsum, indices);
   }, name, tag, attrs);
 }
@@ -139,16 +140,16 @@ inline Tensor log_softmax(const Tensor& x,
   PrimExpr n = x->shape[1];
 
   auto k = tvm::reduce_axis(Range(0, n), "k");
-  auto max_elem = tvm::compute(
+  auto max_elem = tvm::top::compute(
     { m }, [&](Var i) {
       return tvm::max(x(i, k), Array<IterVar>{ k }); });
   k = tvm::reduce_axis(Range(0, n), "k");
 
-  auto expsum = tvm::compute(
+  auto expsum = tvm::top::compute(
     { m }, [&](Var i) {
       return tvm::sum(tvm::exp(x(i, k) - max_elem(i)), { k }); });
 
-  return tvm::compute(
+  return tvm::top::compute(
     x->shape, [&](Var i, Var j) {
       return x(i, j) - max_elem(i) - tvm::log(expsum(i));
     }, name, tag);
index b6230c7..f624f36 100644 (file)
@@ -34,6 +34,7 @@
 namespace topi {
 namespace nn {
 using namespace tvm;
+using namespace tvm::top;
 using namespace topi::image;
 
 /*!
index ac843b1..197ef2b 100644 (file)
 #include "topi/transform.h"
 #include "topi/detail/ravel_unravel.h"
 #include "topi/detail/constant_utils.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/expr_operator.h"
 
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 /*! \brief The operation to use for CommReduce */
 using FReduce = std::function<PrimExpr(PrimExpr source, const Array<IterVar>& axis)>;
@@ -167,7 +168,7 @@ inline Tensor DoCommReduce(const Tensor& data,
     return func(data(eval_range), r_axes);
   };
 
-  return tvm::compute(target_shape, compute, data->op->name + "_red", kCommReduce);
+  return tvm::top::compute(target_shape, compute, data->op->name + "_red", kCommReduce);
 }
 
 /*!
@@ -251,11 +252,11 @@ inline Tensor CommReduceIdx(const Tensor& data,
     return func({ idx, data(eval_range) }, reduce_axes, nullptr);
   };
 
-  auto temp_idx_val = tvm::compute(target_shape, compute,
+  auto temp_idx_val = tvm::top::compute(target_shape, compute,
                                    data->op->name + "_red_temp", kCommReduceIdx);
   auto temp_idx = temp_idx_val[0];
   auto temp_val = temp_idx_val[1];
-  return tvm::compute(
+  return tvm::top::compute(
     target_shape,
     [&temp_idx](const Array<Var>& indices) { return temp_idx(indices); },
     data->op->name + "_red",
index 7eb9f9a..0029748 100644 (file)
@@ -24,7 +24,7 @@
 #ifndef TOPI_ROCM_DENSE_H_
 #define TOPI_ROCM_DENSE_H_
 
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 #include "topi/tags.h"
 #include "topi/detail/array_utils.h"
@@ -35,6 +35,7 @@
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace rocm {
 /*!
@@ -48,10 +49,10 @@ namespace rocm {
 *
 * \return Tensor with shape [batch, out_dim]
 */
-inline tvm::Tensor dense_rocm(const Target& target,
-                              const tvm::Tensor& data,
-                              const tvm::Tensor& weight,
-                              const tvm::Tensor& bias,
+inline tvm::top::Tensor dense_rocm(const Target& target,
+                              const tvm::top::Tensor& data,
+                              const tvm::top::Tensor& weight,
+                              const tvm::top::Tensor& bias,
                               const DataType& out_dtype) {
   CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data";
   CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight";
@@ -67,7 +68,7 @@ inline tvm::Tensor dense_rocm(const Target& target,
     CHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported.";
     auto mm = topi::contrib::rocblas_matmul(data, weight, false, true);
     if (bias.defined()) {
-      mm = tvm::compute({ batch, out_dim },
+      mm = tvm::top::compute({ batch, out_dim },
                         [&](Var i, Var j) {
                           return mm(i, j) + bias(j);
                         }, "tensor", kBroadcast);
index 7b367b7..b2999f4 100644 (file)
 
 #include "topi/tags.h"
 #include "topi/detail/fuse.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 
 #include "topi/cuda/injective.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace rocm {
 
@@ -41,7 +42,7 @@ namespace rocm {
  *
  * \param sch The schedule to update.
  * \param out The tensor representing the injective op.
- * 
+ *
  * \return The updated schedule.
  */
 inline Schedule schedule_injective_from_existing(Schedule sch, const Tensor& out) {
index c6ffd32..7dde9dc 100644 (file)
 #ifndef TOPI_ROCM_NORMALIZATION_H_
 #define TOPI_ROCM_NORMALIZATION_H_
 
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 #include "topi/tags.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 namespace rocm {
 /*!
 * \brief Create a rocm schedule for LRN
index b0ab7cd..14f0aa0 100644 (file)
 #include "topi/tags.h"
 #include "topi/detail/fuse.h"
 #include "topi/detail/array_utils.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 
 #include "topi/cuda/pooling.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace rocm {
 
index 22e7c7d..4b788ee 100644 (file)
 
 #include "topi/tags.h"
 #include "topi/detail/fuse.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 
 #include "topi/cuda/reduction.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace rocm {
 /*!
index fbc67ea..43f2731 100644 (file)
 
 #include "topi/tags.h"
 #include "topi/detail/fuse.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 
 #include "topi/cuda/softmax.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace rocm {
 
index 66e2773..41a64eb 100644 (file)
 #include "topi/detail/ravel_unravel.h"
 #include "topi/detail/constant_utils.h"
 #include "topi/detail/tensor_utils.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/expr_operator.h"
 #include "tvm/data_layout.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 using namespace topi::detail;
 
 /*!
@@ -1041,8 +1042,8 @@ inline Tensor gather_nd(const Tensor& data,
  *
  * \return A Tensor whose op member is the matmul operation
  */
-inline tvm::Tensor matmul(const tvm::Tensor& A,
-                           const tvm::Tensor& B,
+inline tvm::top::Tensor matmul(const tvm::top::Tensor& A,
+                           const tvm::top::Tensor& B,
                            bool trans_a = false,
                            bool trans_b = false,
                            std::string name = "T_matmul",
@@ -1054,7 +1055,7 @@ inline tvm::Tensor matmul(const tvm::Tensor& A,
     return tvm::sum((trans_a ? A[k][i] : A[i][k]) * (trans_b ? B[j][k] : B[k][j]),
                     {k});
   };
-  return tvm::compute(output_shape, l, name, tag);
+  return tvm::top::compute(output_shape, l, name, tag);
 }
 
 /*!
@@ -1069,7 +1070,7 @@ inline tvm::Tensor matmul(const tvm::Tensor& A,
  * \return A Tensor computing the result
  */
 inline Tensor tensordot(const Tensor& A,
-                        const tvm::Tensor& B,
+                        const tvm::top::Tensor& B,
                         int axes = 2,
                         std::string name = "T_tensordot",
                         std::string tag = kMatMul) {
@@ -1124,7 +1125,7 @@ inline Tensor tensordot(const Tensor& A,
  * \return A Tensor computing the result
  */
 inline Tensor tensordot(const Tensor& A,
-                        const tvm::Tensor& B,
+                        const tvm::top::Tensor& B,
                         Array<PrimExpr> A_axes,
                         Array<PrimExpr> B_axes,
                         std::string name = "T_tensordot",
index df3fade..c5ddea9 100644 (file)
 #include "topi/reduction.h"
 #include "topi/tags.h"
 #include "topi/transform.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/expr_operator.h"
 
 namespace topi {
 namespace vision {
 using namespace tvm;
+using namespace tvm::top;
 
 /*!
 * \brief Reorg operation
@@ -60,7 +61,7 @@ inline Tensor reorg(const Tensor &data,
   int w_in = GetConstInt(input_shape[3]);
   int out_c = c_in / (stride * stride);
 
-  auto out = tvm::compute(input_shape,
+  auto out = tvm::top::compute(input_shape,
                           [&](Var b, Var k, Var j, Var i) {
                           return data(b * stride * stride,
                                       indexmod(k, out_c) * stride * stride,
index 1158749..578360e 100644 (file)
 
 #include "topi/tags.h"
 #include "topi/detail/fuse.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace x86 {
 /*!
index 294871b..e3ac011 100644 (file)
 
 #include "topi/tags.h"
 #include "topi/detail/fuse.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace x86 {
 /*!
@@ -54,7 +55,7 @@ inline Schedule MakeDefaultSchedule(const Target &target,
   auto axis = s[x]->op.as<ComputeOpNode>()->axis;
 
   if (auto_inline) {
-    tvm::schedule::AutoInlineInjective(s);
+    tvm::top::AutoInlineInjective(s);
     if (axis.size() > 0) {
       detail::Fuse(s[x], axis);
     }
index 7cb79ae..12c413e 100644 (file)
 
 #include "topi/tags.h"
 #include "topi/detail/fuse.h"
-#include "tvm/operation.h"
+#include "tvm/top/operation.h"
 #include "tvm/build_module.h"
 
 namespace topi {
 using namespace tvm;
+using namespace tvm::top;
 
 namespace x86 {
 
@@ -39,7 +40,7 @@ namespace x86 {
  *
  * \param sch The schedule to update.
  * \param out The tensor representing the injective op.
- * 
+ *
  * \return The updated schedule.
  */
 inline Schedule schedule_injective_from_existing(Schedule sch, const Tensor& out) {
@@ -69,7 +70,7 @@ inline Schedule schedule_injective(const Target &target, const Array<Tensor>& ou
     out_ops.push_back(t->op);
   }
   auto s = create_schedule(out_ops);
-  tvm::schedule::AutoInlineInjective(s);
+  tvm::top::AutoInlineInjective(s);
 
   auto x = outs[0];
   schedule_injective_from_existing(s, x);
index 21e61cf..8197e89 100644 (file)
@@ -93,7 +93,7 @@ Array<Integer> ArrayOrInt(TVMArgValue arg) {
 inline bool IsTensorType(TVMArgValue arg) {
   return (arg.type_code() == kTVMObjectHandle &&
           static_cast<Object*>(
-              arg.value().v_handle)->IsInstance<tvm::TensorNode>());
+              arg.value().v_handle)->IsInstance<tvm::top::TensorNode>());
 }
 
 
@@ -109,13 +109,13 @@ TVM_REGISTER_GLOBAL("topi.TEST_create_target")
       bool lhs_is_tensor = IsTensorType(args[0]);                       \
       bool rhs_is_tensor = IsTensorType(args[1]);                       \
       if (lhs_is_tensor && rhs_is_tensor) {                             \
-        *rv = Op(args[0].operator tvm::Tensor(),                        \
-                 args[1].operator tvm::Tensor());                       \
+        *rv = Op(args[0].operator tvm::top::Tensor(),                        \
+                 args[1].operator tvm::top::Tensor());                       \
       } else if (!lhs_is_tensor && rhs_is_tensor) {                     \
         *rv = Op(args[0].operator tvm::PrimExpr(),                          \
-                 args[1].operator tvm::Tensor());                       \
+                 args[1].operator tvm::top::Tensor());                       \
       } else if (lhs_is_tensor && !rhs_is_tensor) {                     \
-        *rv = Op(args[0].operator tvm::Tensor(),                        \
+        *rv = Op(args[0].operator tvm::top::Tensor(),                        \
                  args[1].operator tvm::PrimExpr());                         \
       } else if (!lhs_is_tensor && !rhs_is_tensor) {                    \
         *rv = Op(args[0].operator tvm::PrimExpr(),                          \
@@ -757,7 +757,7 @@ TVM_REGISTER_GLOBAL("topi.util.is_empty_shape")
 
 /*! \brief Builder function for instantiating schedules. */
 using FTVMScheduleBuilder = std::function<
-  tvm::Schedule(const tvm::Target& target, const tvm::Array<tvm::Tensor>& outs)>;
+  tvm::top::Schedule(const tvm::Target& target, const tvm::Array<tvm::top::Tensor>& outs)>;
 
 /*!
  * \brief Helper function for registering generic functions matching the
@@ -826,7 +826,7 @@ TVM_REGISTER_GENERIC_FUNC(schedule_binary_dense)
 
 /*! \brief Builder function for instantiating schedules from existing schedules. */
 using FTVMScheduleFromExistingBuilder = std::function<
-  tvm::Schedule(tvm::Schedule sch, const tvm::Tensor& out)>;
+  tvm::top::Schedule(tvm::top::Schedule sch, const tvm::top::Tensor& out)>;
 
 /*!
  * \brief Helper function for registering generic functions matching the
@@ -850,10 +850,10 @@ TVM_REGISTER_GENERIC_FUNC(schedule_injective_from_existing)
   topi::cuda::schedule_injective_from_existing));
 
 /*! \brief Builder function for instantiating dense ops. */
-using FTVMDenseOpBuilder = std::function<tvm::Tensor(const Target& target,
-                                                     const tvm::Tensor& data,
-                                                     const tvm::Tensor& weight,
-                                                     const tvm::Tensor& bias,
+using FTVMDenseOpBuilder = std::function<tvm::top::Tensor(const Target& target,
+                                                     const tvm::top::Tensor& data,
+                                                     const tvm::top::Tensor& weight,
+                                                     const tvm::top::Tensor& bias,
                                                      const DataType& out_dtype)>;
 
 /*!
@@ -879,9 +879,9 @@ inline PackedFunc WrapDenseOp(FTVMDenseOpBuilder builder) {
 
 TVM_REGISTER_GENERIC_FUNC(dense)
 .set_default(WrapDenseOp([](const Target& target,
-                            const tvm::Tensor& data,
-                            const tvm::Tensor& weight,
-                            const tvm::Tensor& bias,
+                            const tvm::top::Tensor& data,
+                            const tvm::top::Tensor& weight,
+                            const tvm::top::Tensor& bias,
                             const DataType& out_dtype) {
   return topi::nn::dense(data, weight, bias, out_dtype);
 }))