Bring up namespace te -- Tensor expression language DSL.
src/node/*.cc
src/ir/*.cc
src/arith/*.cc
- src/top/*.cc
+ src/te/*.cc
src/autotvm/*.cc
src/tir/*.cc
src/driver/*.cc
namespace tvm {
// forward delcare Tensor
-namespace top {
+namespace te {
class Tensor;
}
namespace arith {
* \return The domain that covers all the calls or provides within the given statement.
*/
Domain DomainTouched(Stmt body,
- const top::Tensor &tensor,
+ const te::Tensor &tensor,
bool consider_calls,
bool consider_provides);
#include <tvm/runtime/packed_func.h>
#include <tvm/target/target.h>
#include <tvm/support/with.h>
-#include <tvm/top/schedule_pass.h>
+#include <tvm/te/schedule_pass.h>
#include <tvm/tir/lowered_func.h>
#include <string>
* \return The lowered function.
*/
TVM_DLL Array<tir::LoweredFunc> lower(
- top::Schedule sch,
- const Array<top::Tensor>& args,
+ te::Schedule sch,
+ const Array<te::Tensor>& args,
const std::string& name,
- const std::unordered_map<top::Tensor, tir::Buffer>& binds,
+ const std::unordered_map<te::Tensor, tir::Buffer>& binds,
const BuildConfig& config);
/*!
* \brief Split host/device function and running necessary pass before build
#ifndef TVM_RELAY_OP_ATTR_TYPES_H_
#define TVM_RELAY_OP_ATTR_TYPES_H_
-#include <tvm/top/tensor.h>
-#include <tvm/top/schedule.h>
+#include <tvm/te/tensor.h>
+#include <tvm/te/schedule.h>
#include <tvm/relay/type.h>
#include <tvm/relay/expr.h>
#include <tvm/target/target.h>
* \return The output compute description of the operator.
*/
using FTVMCompute = runtime::TypedPackedFunc<
- Array<top::Tensor>(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+ Array<te::Tensor>(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target)>;
* \return schedule The computation schedule.
*/
using FTVMSchedule = runtime::TypedPackedFunc<
- top::Schedule(const Attrs& attrs,
- const Array<top::Tensor>& outs,
+ te::Schedule(const Attrs& attrs,
+ const Array<te::Tensor>& outs,
const Target& target)>;
/*!
using FTVMAlterOpLayout = runtime::TypedPackedFunc<
Expr(const Attrs& attrs,
const Array<Expr>& args,
- const Array<top::Tensor>& tinfos)>;
+ const Array<te::Tensor>& tinfos)>;
/*!
* \brief Convert the layout of operators or replace the
using FTVMConvertOpLayout = runtime::TypedPackedFunc<
Expr(const Attrs& attrs,
const Array<Expr>& args,
- const Array<top::Tensor>& tinfos,
+ const Array<te::Tensor>& tinfos,
const std::string& desired_layout)>;
/*!
* \brief Legalizes an expression with another expression. This function will be
using Shape = Array<IndexExpr>;
using FShapeFunc = runtime::TypedPackedFunc<
- Array<top::Tensor>(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+ Array<te::Tensor>(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Array<IndexExpr>& out_ndims)>;
} // namespace relay
*/
/*!
- * \file tvm/top/operation.h
+ * \file tvm/te/operation.h
* \brief Operation node can generate one or multiple Tensors
*/
-#ifndef TVM_TOP_OPERATION_H_
-#define TVM_TOP_OPERATION_H_
+#ifndef TVM_TE_OPERATION_H_
+#define TVM_TE_OPERATION_H_
#include <tvm/arith/analyzer.h>
-#include <tvm/top/tensor.h>
-#include <tvm/top/schedule.h>
+#include <tvm/te/tensor.h>
+#include <tvm/te/schedule.h>
#include <tvm/tir/expr.h>
#include <tvm/tir/op.h>
#include <vector>
#include <unordered_map>
-
-
namespace tvm {
-namespace top {
+/*! \brief Tensor expression language DSL. */
+namespace te {
/*!
* \brief Temporary data structure to store union
inline const OperationNode* Operation::operator->() const {
return static_cast<const OperationNode*>(get());
}
-} // namespace top
+} // namespace te
} // namespace tvm
-#endif // TVM_TOP_OPERATION_H_
+#endif // TVM_TE_OPERATION_H_
*/
/*!
- * \file tvm/top/schedule.h
+ * \file tvm/te/schedule.h
* \brief Define a schedule.
*/
// Acknowledgement: Many schedule primitives originate from Halide and Loopy.
-#ifndef TVM_TOP_SCHEDULE_H_
-#define TVM_TOP_SCHEDULE_H_
+#ifndef TVM_TE_SCHEDULE_H_
+#define TVM_TE_SCHEDULE_H_
#include <tvm/tir/expr.h>
-#include <tvm/top/tensor.h>
-#include <tvm/top/tensor_intrin.h>
-
+#include <tvm/te/tensor.h>
+#include <tvm/te/tensor_intrin.h>
#include <string>
#include <unordered_map>
-
namespace tvm {
-namespace top {
+namespace te {
// Node container for Stage
class StageNode;
// Node container for Schedule
inline const IterVarAttrNode* IterVarAttr::operator->() const {
return static_cast<const IterVarAttrNode*>(get());
}
-} // namespace top
+} // namespace te
} // namespace tvm
-#endif // TVM_TOP_SCHEDULE_H_
+#endif // TVM_TE_SCHEDULE_H_
*/
/*!
- * \file tvm/top/schedule_pass.h
+ * \file tvm/te/schedule_pass.h
* \brief Collection of Schedule pass functions.
*
* These passes works on the schedule hyper-graph
* and infers information such as bounds, check conditions
* read/write dependencies between the IterVar
*/
-#ifndef TVM_TOP_SCHEDULE_PASS_H_
-#define TVM_TOP_SCHEDULE_PASS_H_
+#ifndef TVM_TE_SCHEDULE_PASS_H_
+#define TVM_TE_SCHEDULE_PASS_H_
-#include <tvm/top/schedule.h>
+#include <tvm/te/schedule.h>
namespace tvm {
-namespace top {
+namespace te {
/*!
* \brief Infer the bound of all iteration variables relates to the schedule.
*/
TVM_DLL void AutoInlineInjective(Schedule sch);
-} // namespace top
+} // namespace te
} // namespace tvm
-#endif // TVM_TOP_SCHEDULE_PASS_H_
+#endif // TVM_TE_SCHEDULE_PASS_H_
*/
/*!
- * \file tvm/top/tensor.h
+ * \file tvm/te/tensor.h
* \brief Dataflow tensor object
*/
-#ifndef TVM_TOP_TENSOR_H_
-#define TVM_TOP_TENSOR_H_
+#ifndef TVM_TE_TENSOR_H_
+#define TVM_TE_TENSOR_H_
#include <tvm/node/container.h>
#include <tvm/arith/bound.h>
#include <utility>
#include <type_traits>
-
-
namespace tvm {
-namespace top {
+namespace te {
using arith::IntSet;
using namespace tvm::tir;
DEFINE_OVERLOAD_SLICE_BINARY_OP(>); // NOLINT(*)
DEFINE_OVERLOAD_SLICE_BINARY_OP(<); // NOLINT(*)
-} // namespace top
+} // namespace te
} // namespace tvm
namespace std {
template <>
-struct hash<::tvm::top::Operation> : public ::tvm::ObjectHash {
+struct hash<::tvm::te::Operation> : public ::tvm::ObjectHash {
};
template <>
-struct hash<::tvm::top::Tensor> {
- std::size_t operator()(const ::tvm::top::Tensor& k) const {
+struct hash<::tvm::te::Tensor> {
+ std::size_t operator()(const ::tvm::te::Tensor& k) const {
::tvm::ObjectHash hasher;
if (k.defined() && k->op.defined()) {
return hasher(k->op);
}
};
} // namespace std
-#endif // TVM_TOP_TENSOR_H_
+#endif // TVM_TE_TENSOR_H_
*/
/*!
- * \file tvm/top/tensor_intrin.h
+ * \file tvm/te/tensor_intrin.h
* \brief Tensor intrinsic operations.
*/
-#ifndef TVM_TOP_TENSOR_INTRIN_H_
-#define TVM_TOP_TENSOR_INTRIN_H_
+#ifndef TVM_TE_TENSOR_INTRIN_H_
+#define TVM_TE_TENSOR_INTRIN_H_
-#include <tvm/top/tensor.h>
+#include <tvm/te/tensor.h>
#include <tvm/tir/buffer.h>
#include <string>
-
namespace tvm {
-namespace top {
+namespace te {
// Internal node container of tensor intrinsics.
class TensorIntrinNode;
return static_cast<const TensorIntrinCallNode*>(get());
}
-} // namespace top
+} // namespace te
} // namespace tvm
-#endif // TVM_TOP_TENSOR_INTRIN_H_
+#endif // TVM_TE_TENSOR_INTRIN_H_
#ifndef TVM_TIR_IR_PASS_H_
#define TVM_TIR_IR_PASS_H_
-#include <tvm/top/schedule.h>
+#include <tvm/te/schedule.h>
#include <tvm/tir/expr.h>
#include <tvm/tir/buffer.h>
#include <tvm/tir/lowered_func.h>
* \return Transformed stmt.
*/
Stmt StorageFlatten(Stmt stmt,
- Map<top::Tensor, Buffer> extern_buffer,
+ Map<te::Tensor, Buffer> extern_buffer,
int cache_line_size,
bool create_bound_attribute = false);
* \return Transformed stmt.
*/
Stmt RewriteForTensorCore(Stmt stmt,
- top::Schedule schedule,
- Map<top::Tensor, Buffer> extern_buffer);
+ te::Schedule schedule,
+ Map<te::Tensor, Buffer> extern_buffer);
/*!
* \brief Verify if there is any argument bound to compact buffer.
#include <tvm/tir/expr.h>
#include <tvm/runtime/registry.h>
-#include <tvm/top/tensor.h>
+#include <tvm/te/tensor.h>
namespace tvm {
namespace arith {
*/
#include <dmlc/memory_io.h>
#include <tvm/tir/expr.h>
-#include <tvm/top/tensor.h>
+#include <tvm/te/tensor.h>
#include <tvm/runtime/registry.h>
#include <tvm/node/serialization.h>
*/
#include <tvm/tir/expr.h>
#include <tvm/tir/expr.h>
-#include <tvm/top/tensor.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/tensor.h>
+#include <tvm/te/operation.h>
#include <tvm/tir/buffer.h>
-#include <tvm/top/schedule.h>
+#include <tvm/te/schedule.h>
#include <tvm/runtime/registry.h>
#include <tvm/driver/driver.h>
.set_body_method(&BijectiveLayout::BackwardShape);
} // namespace tir
-namespace top {
+namespace te {
TVM_REGISTER_GLOBAL("_Tensor")
.set_body_typed(TensorNode::make);
TVM_REGISTER_GLOBAL("_ScheduleRFactor")
.set_body_method(&Schedule::rfactor);
-} // namespace top
+} // namespace te
TVM_REGISTER_GLOBAL("_CommReducerCombine")
.set_body_method<tir::CommReducer>(&tir::CommReducerNode::operator());
TVM_REGISTER_GLOBAL("ir_pass.RewriteForTensorCore")
.set_body_typed
([](const Stmt& stmt,
- const top::Schedule& schedule,
- const Map<top::Tensor, Buffer>& extern_buffer) {
+ const te::Schedule& schedule,
+ const Map<te::Tensor, Buffer>& extern_buffer) {
return RewriteForTensorCore(stmt, schedule, extern_buffer);
});
* \file api_schedule.cc
*/
#include <tvm/tir/expr.h>
-#include <tvm/top/tensor.h>
-#include <tvm/top/schedule.h>
-#include <tvm/top/schedule_pass.h>
+#include <tvm/te/tensor.h>
+#include <tvm/te/schedule.h>
+#include <tvm/te/schedule_pass.h>
#include <tvm/runtime/registry.h>
-#include "../top/schedule/graph.h"
+#include "../te/schedule/graph.h"
namespace tvm {
-namespace top {
+namespace te {
TVM_REGISTER_GLOBAL("schedule.AutoInlineElemWise")
.set_body_typed(AutoInlineElemWise);
REGISTER_SCHEDULE_PASS(ScanGetBody);
REGISTER_SCHEDULE_PASS(ScanFixPointAnalysis);
-} // namespace top
+} // namespace te
} // namespace tvm
* \file api_test.cc
*/
#include <tvm/tir/expr.h>
-#include <tvm/top/tensor.h>
+#include <tvm/te/tensor.h>
#include <tvm/ir/attrs.h>
#include <tvm/runtime/registry.h>
#include <tvm/ir/env_func.h>
#include <tvm/tir/expr.h>
#include <tvm/tir/ir_pass.h>
#include <tvm/tir/stmt_functor.h>
-#include <tvm/top/tensor.h>
+#include <tvm/te/tensor.h>
#include <tvm/runtime/registry.h>
#include <unordered_set>
// Find Read region of the tensor in the stmt.
class FuncTouchedDomain final : public StmtExprVisitor {
public:
- FuncTouchedDomain(const top::Tensor &tensor, bool consider_calls, bool consider_provides)
+ FuncTouchedDomain(const te::Tensor &tensor, bool consider_calls, bool consider_provides)
: tensor_(tensor), consider_calls_(consider_calls), consider_provides_(consider_provides) {}
Domain Find(const Stmt& stmt) {
}
}
- const top::Tensor &tensor_;
+ const te::Tensor &tensor_;
bool consider_calls_, consider_provides_;
std::vector<std::vector<IntSet> > bounds_;
std::unordered_map<const VarNode*, IntSet> dom_map_;
};
Domain DomainTouched(Stmt stmt,
- const top::Tensor &tensor,
+ const te::Tensor &tensor,
bool consider_calls,
bool consider_provides) {
return FuncTouchedDomain(tensor, consider_calls, consider_provides).Find(stmt);
#include <tvm/tir/stmt_functor.h>
#include <tvm/target/codegen.h>
#include <tvm/tir/lowered_func.h>
-#include <tvm/top/schedule.h>
+#include <tvm/te/schedule.h>
#include <map>
#include <string>
#include <unordered_map>
namespace tvm {
namespace contrib {
-using namespace top;
+using namespace te;
using namespace tir;
/*!
* \brief A base class to generate Hybrid Script.
*/
#include <dmlc/thread_local.h>
#include <tvm/driver/driver.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/tir/ir_pass.h>
#include <tvm/target/codegen.h>
#include <tvm/runtime/registry.h>
data_alignment, offset_factor, buffer_type);
}
-void GetBinds(const Array<top::Tensor>& args,
+void GetBinds(const Array<te::Tensor>& args,
bool compact,
- const std::unordered_map<top::Tensor, tir::Buffer>& binds,
- Map<top::Tensor, tir::Buffer>* out_binds,
+ const std::unordered_map<te::Tensor, tir::Buffer>& binds,
+ Map<te::Tensor, tir::Buffer>* out_binds,
Array<ObjectRef>* out_arg_list,
const BuildConfig& config) {
*out_binds = binds;
* \param config The build configuration.
* \return The built Stmt.
*/
-tir::Stmt BuildStmt(top::Schedule sch,
- const Array<top::Tensor>& args,
- const std::unordered_map<top::Tensor, tir::Buffer>& binds,
+tir::Stmt BuildStmt(te::Schedule sch,
+ const Array<te::Tensor>& args,
+ const std::unordered_map<te::Tensor, tir::Buffer>& binds,
bool loop_partition,
Array<ObjectRef> *out_arg_list,
const BuildConfig& config) {
sch = sch.normalize();
// Phase 0
- auto bounds = top::InferBound(sch);
- auto stmt = top::ScheduleOps(sch, bounds, false);
+ auto bounds = te::InferBound(sch);
+ auto stmt = te::ScheduleOps(sch, bounds, false);
stmt = tir::InjectPrefetch(stmt);
bool compact = tir::VerifyCompactBuffer(stmt);
- Map<top::Tensor, tir::Buffer> out_binds;
+ Map<te::Tensor, tir::Buffer> out_binds;
GetBinds(args, compact, binds, &out_binds, out_arg_list, config);
// Phase 1
return stmt;
}
-Array<LoweredFunc> lower(top::Schedule sch,
- const Array<top::Tensor>& args,
+Array<LoweredFunc> lower(te::Schedule sch,
+ const Array<te::Tensor>& args,
const std::string& name,
- const std::unordered_map<top::Tensor, tir::Buffer>& binds,
+ const std::unordered_map<te::Tensor, tir::Buffer>& binds,
const BuildConfig& config) {
Array<ObjectRef> out_arg_list;
auto stmt = BuildStmt(sch, args, binds, true, &out_arg_list, config);
// and are only used in minimum cases where they are clearly marked.
//
// Rationale: convert from IterVar and top::Tensor
-#include <tvm/top/tensor.h>
+#include <tvm/te/tensor.h>
#include <tvm/tir/expr.h>
namespace tvm {
if (ptr->IsInstance<tir::IterVarNode>()) {
return tir::IterVar(ptr)->var;
}
- if (ptr->IsInstance<top::TensorNode>()) {
- return top::Tensor(ptr)();
+ if (ptr->IsInstance<te::TensorNode>()) {
+ return te::Tensor(ptr)();
}
CHECK(ObjectTypeChecker<PrimExpr>::Check(ptr.get()))
<< "Expect type " << ObjectTypeChecker<PrimExpr>::TypeName()
* \brief Internal compialtion engine.
*/
#include <tvm/ir/type_functor.h>
-#include <tvm/top/schedule.h>
-#include <tvm/top/operation.h>
-#include <tvm/top/schedule_pass.h>
+#include <tvm/te/schedule.h>
+#include <tvm/te/operation.h>
+#include <tvm/te/schedule_pass.h>
#include <tvm/runtime/registry.h>
#include <tvm/relay/attrs/device_copy.h>
#include <tvm/relay/analysis.h>
// The getter to get schedule from compile engine.
// Get schedule from functor.
class ScheduleGetter :
- public ExprFunctor<Array<top::Tensor>(const Expr&)> {
+ public ExprFunctor<Array<te::Tensor>(const Expr&)> {
public:
explicit ScheduleGetter(Target target)
: target_(target), device_copy_op_(Op::Get("device_copy")) {}
- std::pair<top::Schedule, CachedFunc> Create(const Function& prim_func) {
+ std::pair<te::Schedule, CachedFunc> Create(const Function& prim_func) {
static auto fschedule =
Op::GetAttr<FTVMSchedule>("FTVMSchedule");
auto cache_node = make_object<CachedFuncNode>();
cache_node->target = target_;
for (Var param : prim_func->params) {
- Array<tvm::top::Tensor> inputs;
+ Array<tvm::te::Tensor> inputs;
if (const auto* ttype = param->checked_type().as<TensorTypeNode>()) {
- tvm::top::Tensor tensor = tvm::top::placeholder(
+ tvm::te::Tensor tensor = tvm::te::placeholder(
GetShape(ttype->shape), ttype->dtype);
cache_node->inputs.push_back(tensor);
inputs.push_back(tensor);
const auto* ttype = field.as<TensorTypeNode>();
// TODO(@icemelon): Allow recursive tuple
CHECK(ttype != nullptr);
- tvm::top::Tensor tensor = tvm::top::placeholder(
+ tvm::te::Tensor tensor = tvm::te::placeholder(
GetShape(ttype->shape), ttype->dtype);
cache_node->inputs.push_back(tensor);
inputs.push_back(tensor);
// Fusion over tupled results may leave identity relationships
// between inputs and outputs, and those should not be scheduled.
// Hence schedule only non PlaceholderOp outputs.
- tvm::Array<top::Tensor> tensor_outs;
+ tvm::Array<te::Tensor> tensor_outs;
for (const auto& tensor : cache_node->outputs) {
- if (!tensor->op.as<top::PlaceholderOpNode>()) {
+ if (!tensor->op.as<te::PlaceholderOpNode>()) {
tensor_outs.push_back(tensor);
}
}
- top::Schedule schedule;
+ te::Schedule schedule;
// No need to register schedule for device copy op.
if (master_attrs_.as<DeviceCopyAttrs>() == nullptr) {
schedule =
return std::make_pair(schedule, cfunc);
}
- Array<top::Tensor> VisitExpr(const Expr& expr) {
+ Array<te::Tensor> VisitExpr(const Expr& expr) {
auto it = memo_.find(expr);
if (it != memo_.end()) {
return it->second;
} else {
- Array<top::Tensor> res = ExprFunctor::VisitExpr(expr);
+ Array<te::Tensor> res = ExprFunctor::VisitExpr(expr);
memo_[expr] = res;
return res;
}
}
- Array<top::Tensor> VisitExpr_(const VarNode* op) final {
+ Array<te::Tensor> VisitExpr_(const VarNode* op) final {
LOG(FATAL) << "Free variable " << op->name_hint();
return {};
}
- Array<top::Tensor> VisitExpr_(const ConstantNode* op) final {
+ Array<te::Tensor> VisitExpr_(const ConstantNode* op) final {
using tir::make_const;
CHECK(op->is_scalar());
void* data = op->data->data;
DataType dtype = DataType(op->data->dtype);
- auto value = top::compute({}, [&](const Array<tvm::tir::Var>&) {
+ auto value = te::compute({}, [&](const Array<tvm::tir::Var>&) {
if (dtype == DataType::Int(32)) {
return make_const(dtype, static_cast<const int32_t*>(data)[0]);
} else if (dtype == DataType::Int(64)) {
return {value};
}
- Array<top::Tensor> VisitExpr_(const CallNode* call_node) final {
+ Array<te::Tensor> VisitExpr_(const CallNode* call_node) final {
static auto fcompute =
Op::GetAttr<FTVMCompute>("FTVMCompute");
static auto fpattern =
Op::GetAttr<TOpPattern>("TOpPattern");
- Array<top::Tensor> inputs;
+ Array<te::Tensor> inputs;
int count_tuple = 0;
for (Expr arg : call_node->args) {
if (arg->checked_type().as<TupleTypeNode>()) {
++count_tuple;
}
- for (top::Tensor tensor : VisitExpr(arg)) {
+ for (te::Tensor tensor : VisitExpr(arg)) {
inputs.push_back(tensor);
}
}
CHECK(call_node->op.as<OpNode>())
<< "Primitive function only allows call into primitive ops";
Op op = Downcast<Op>(call_node->op);
- Array<top::Tensor> outputs;
+ Array<te::Tensor> outputs;
// Skip fcompute for device copy operators as it is not registered.
if (op == device_copy_op_) {
const auto* copy_input = inputs[0].operator->();
- outputs.push_back(top::TensorNode::make(copy_input->shape, copy_input->dtype,
- top::Operation(), 0));
+ outputs.push_back(te::TensorNode::make(copy_input->shape, copy_input->dtype,
+ te::Operation(), 0));
} else {
outputs = fcompute[op](call_node->attrs, inputs,
call_node_type, target_);
return outputs;
}
- Array<top::Tensor> VisitExpr_(const FunctionNode* op) final {
+ Array<te::Tensor> VisitExpr_(const FunctionNode* op) final {
LOG(FATAL) << "Do not support sub function";
- return Array<top::Tensor>();
+ return Array<te::Tensor>();
}
- Array<top::Tensor> VisitExpr_(const LetNode* op) final {
- Array<top::Tensor> val = VisitExpr(op->value);
+ Array<te::Tensor> VisitExpr_(const LetNode* op) final {
+ Array<te::Tensor> val = VisitExpr(op->value);
CHECK(!memo_.count(op->var));
memo_[op->var] = val;
return VisitExpr(op->body);
}
- Array<top::Tensor> VisitExpr_(const TupleNode* op) final {
- Array<top::Tensor> fields;
+ Array<te::Tensor> VisitExpr_(const TupleNode* op) final {
+ Array<te::Tensor> fields;
for (Expr field : op->fields) {
CHECK(field->checked_type().as<TensorTypeNode>())
<< "Only allow Tuple of Tensor";
- Array<top::Tensor> res = VisitExpr(field);
+ Array<te::Tensor> res = VisitExpr(field);
CHECK_EQ(res.size(), 1);
fields.push_back(res[0]);
}
return fields;
}
- Array<top::Tensor> VisitExpr_(const TupleGetItemNode* op) final {
+ Array<te::Tensor> VisitExpr_(const TupleGetItemNode* op) final {
const auto* tuple_type = op->tuple->type_as<TupleTypeNode>();
- Array<top::Tensor> tuple = VisitExpr(op->tuple);
+ Array<te::Tensor> tuple = VisitExpr(op->tuple);
CHECK_EQ(tuple_type->fields.size(), tuple.size());
CHECK_GE(op->index, 0);
CHECK_LT(static_cast<size_t>(op->index), tuple.size());
Attrs master_attrs_;
int master_op_pattern_{0};
std::ostringstream readable_name_stream_;
- std::unordered_map<Expr, Array<top::Tensor>, ObjectHash, ObjectEqual> memo_;
- Array<top::Operation> scalars_;
+ std::unordered_map<Expr, Array<te::Tensor>, ObjectHash, ObjectEqual> memo_;
+ Array<te::Operation> scalars_;
// Cache device copy op for equivalence checking to reduce registry lookup
// overhead for each invocation of call node when retrieving schedules.
const Op& device_copy_op_;
};
// Creates shape function from functor.
-class MakeShapeFunc : public ExprFunctor<Array<top::Tensor>(const Expr&)> {
+class MakeShapeFunc : public ExprFunctor<Array<te::Tensor>(const Expr&)> {
public:
MakeShapeFunc() {}
- std::pair<top::Schedule, CachedFunc> Create(const Function& prim_func) {
+ std::pair<te::Schedule, CachedFunc> Create(const Function& prim_func) {
for (auto param : prim_func->params) {
param_states_[param] = kNoNeed;
- Array<tvm::top::Tensor> data_inputs;
- Array<tvm::top::Tensor> shape_inputs;
+ Array<tvm::te::Tensor> data_inputs;
+ Array<tvm::te::Tensor> shape_inputs;
auto add_placeholder = [&data_inputs, &shape_inputs](const TensorTypeNode* ttype) {
// Add data placeholder
Shape shape = GetShape(ttype->shape);
- tvm::top::Tensor data_tensor = tvm::top::placeholder(shape, ttype->dtype);
+ tvm::te::Tensor data_tensor = tvm::te::placeholder(shape, ttype->dtype);
data_inputs.push_back(data_tensor);
// Add shape placeholder
int64_t ndim = shape.size();
if (ndim > 0) {
sshape.push_back(tvm::Integer(ndim));
}
- tvm::top::Tensor shape_tensor = tvm::top::placeholder(sshape, DataType::Int(64));
+ tvm::te::Tensor shape_tensor = tvm::te::placeholder(sshape, DataType::Int(64));
shape_inputs.push_back(shape_tensor);
};
CachedFunc cfunc(cache_node);
// generate schedule for shape func
- Array<top::Operation> out_ops;
+ Array<te::Operation> out_ops;
for (auto t : cache_node->outputs) {
out_ops.push_back(t->op);
}
- auto schedule = top::create_schedule(out_ops);
- tvm::top::AutoInlineInjective(schedule);
+ auto schedule = te::create_schedule(out_ops);
+ tvm::te::AutoInlineInjective(schedule);
for (const auto& scalar : scalars_) {
auto scalar_op = scalar->op;
if (schedule->Contain(scalar_op)) {
return std::make_pair(schedule, cfunc);
}
- Array<top::Tensor> VisitExpr(const Expr& expr) {
+ Array<te::Tensor> VisitExpr(const Expr& expr) {
auto it = memo_.find(expr);
if (it != memo_.end()) {
return it->second;
} else {
- Array<top::Tensor> res = ExprFunctor::VisitExpr(expr);
+ Array<te::Tensor> res = ExprFunctor::VisitExpr(expr);
if (expr.as<VarNode>() == nullptr) {
// Do not memoize vars because shape functions could use either the data
// or the shape of a var each time.
}
}
- Array<top::Tensor> VisitExpr_(const VarNode* var_node) final {
+ Array<te::Tensor> VisitExpr_(const VarNode* var_node) final {
auto var = GetRef<Var>(var_node);
auto it = param_states_.find(var);
if (it == param_states_.end()) {
}
}
- Array<top::Tensor> VisitExpr_(const ConstantNode* op) final {
+ Array<te::Tensor> VisitExpr_(const ConstantNode* op) final {
using tir::make_const;
CHECK(data_dependants_.size());
CHECK(op->is_scalar());
if (data_dependant) {
void* data = op->data->data;
DataType dtype = DataType(op->data->dtype);
- auto value = tvm::top::compute({}, [&](const Array<tvm::tir::Var>&) {
+ auto value = tvm::te::compute({}, [&](const Array<tvm::tir::Var>&) {
if (dtype == DataType::Int(32)) {
return make_const(dtype, static_cast<const int32_t*>(data)[0]);
} else if (dtype == DataType::Int(64)) {
scalars_.push_back(value);
return {value};
} else {
- auto value = tvm::top::compute({}, [&](const Array<tvm::tir::Var>&) {
+ auto value = tvm::te::compute({}, [&](const Array<tvm::tir::Var>&) {
return tir::make_const(DataType::Int(64), 0);
}, "shape_const", topi::kBroadcast);
scalars_.push_back(value);
}
}
- Array<top::Tensor> VisitExpr_(const CallNode* call_node) final {
+ Array<te::Tensor> VisitExpr_(const CallNode* call_node) final {
static auto fshape_func = Op::GetAttr<FShapeFunc>("FShapeFunc");
static auto tshape_data_dependant = Op::GetAttr<TShapeDataDependant>(
"TShapeDataDependant");
data_dependants_.push_back(tshape_data_dependant[op]);
// Visit all inputs
- Array<top::Tensor> inputs;
+ Array<te::Tensor> inputs;
int count_tuple = 0;
for (Expr arg : call_node->args) {
if (arg->checked_type().as<TupleTypeNode>()) {
++count_tuple;
}
- for (top::Tensor tensor : VisitExpr(arg)) {
+ for (te::Tensor tensor : VisitExpr(arg)) {
inputs.push_back(tensor);
}
}
return outputs;
}
- Array<top::Tensor> VisitExpr_(const FunctionNode* op) final {
+ Array<te::Tensor> VisitExpr_(const FunctionNode* op) final {
LOG(FATAL) << "Do not support sub function";
- return Array<top::Tensor>();
+ return Array<te::Tensor>();
}
- Array<top::Tensor> VisitExpr_(const LetNode* op) final {
- Array<top::Tensor> val = VisitExpr(op->value);
+ Array<te::Tensor> VisitExpr_(const LetNode* op) final {
+ Array<te::Tensor> val = VisitExpr(op->value);
CHECK(!memo_.count(op->var));
memo_[op->var] = val;
return VisitExpr(op->body);
}
- Array<top::Tensor> VisitExpr_(const TupleNode* op) final {
- Array<top::Tensor> fields;
+ Array<te::Tensor> VisitExpr_(const TupleNode* op) final {
+ Array<te::Tensor> fields;
for (Expr field : op->fields) {
CHECK(field->checked_type().as<TensorTypeNode>())
<< "Only allow Tuple of Tensor";
- Array<top::Tensor> res = VisitExpr(field);
+ Array<te::Tensor> res = VisitExpr(field);
CHECK_EQ(res.size(), 1);
fields.push_back(res[0]);
}
/*! \brief Map from parameter to its shape function usage state */
std::unordered_map<Expr, int, ObjectHash, ObjectEqual> param_states_;
/*! \brief Map from parameter to list of data placeholder */
- std::unordered_map<Expr, Array<top::Tensor>, ObjectHash, ObjectEqual> param_data_;
+ std::unordered_map<Expr, Array<te::Tensor>, ObjectHash, ObjectEqual> param_data_;
/*! \brief Map from parameter to list of shape placeholder */
- std::unordered_map<Expr, Array<top::Tensor>, ObjectHash, ObjectEqual> param_shapes_;
+ std::unordered_map<Expr, Array<te::Tensor>, ObjectHash, ObjectEqual> param_shapes_;
/*! \brief Memoized visit result */
- std::unordered_map<Expr, Array<top::Tensor>, ObjectHash, ObjectEqual> memo_;
+ std::unordered_map<Expr, Array<te::Tensor>, ObjectHash, ObjectEqual> memo_;
/*! \brief Stack of data dependencies for shape function */
std::vector<bool> data_dependants_;
/*! \brief Scalars used in the shape function */
- Array<top::Tensor> scalars_;
+ Array<te::Tensor> scalars_;
};
class CompileEngineImpl : public CompileEngineNode {
* \return Pair of schedule and cache.
* The funcs field in cache is not yet populated.
*/
- std::pair<top::Schedule, CachedFunc> CreateSchedule(
+ std::pair<te::Schedule, CachedFunc> CreateSchedule(
const Function& source_func, const Target& target) {
return ScheduleGetter(target).Create(source_func);
}
cache_node->func_name = GetUniqueName(cache_node->func_name);
// NOTE: array will copy on write.
- Array<top::Tensor> all_args = cache_node->inputs;
- for (top::Tensor arg : cache_node->outputs) {
+ Array<te::Tensor> all_args = cache_node->inputs;
+ for (te::Tensor arg : cache_node->outputs) {
all_args.push_back(arg);
}
// lower the function
spair.first, all_args, cache_node->func_name, key->source_func);
} else {
tvm::BuildConfig bcfg = BuildConfig::Create();
- std::unordered_map<top::Tensor, tir::Buffer> binds;
+ std::unordered_map<te::Tensor, tir::Buffer> binds;
cache_node->funcs = tvm::lower(spair.first, all_args, cache_node->func_name, binds, bcfg);
}
value->cached_func = CachedFunc(cache_node);
cache_node->func_name = GetUniqueName(cache_node->func_name);
cache_node->target = key->target;
- Array<top::Tensor> all_args = cache_node->inputs;
- for (top::Tensor arg : cache_node->outputs) {
+ Array<te::Tensor> all_args = cache_node->inputs;
+ for (te::Tensor arg : cache_node->outputs) {
all_args.push_back(arg);
}
tvm::BuildConfig bcfg = BuildConfig::Create();
- std::unordered_map<top::Tensor, tir::Buffer> binds;
+ std::unordered_map<te::Tensor, tir::Buffer> binds;
cache_node->funcs = tvm::lower(spair.first, all_args, cache_node->func_name, binds, bcfg);
value->cached_func = CachedFunc(cache_node);
return value;
/*! \brief Function name */
std::string func_name;
/* \brief The inputs to the function */
- tvm::Array<top::Tensor> inputs;
+ tvm::Array<te::Tensor> inputs;
/* \brief The outputs to the function */
- tvm::Array<top::Tensor> outputs;
+ tvm::Array<te::Tensor> outputs;
/*! \brief The lowered functions to support the function. */
tvm::Array<tir::LoweredFunc> funcs;
/*! \brief Parameter usage states in the shape function. */
#include <tvm/driver/driver.h>
#include <tvm/target/codegen.h>
#include <tvm/tir/ir_pass.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <typeinfo>
#include <string>
* \brief A compiler from relay::Module to the VM byte code.
*/
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/ir/error.h>
#include <tvm/relay/expr_functor.h>
#include <tvm/relay/interpreter.h>
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<top::Tensor>& inputs,
- const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
+ [](const Attrs& attrs, const Array<te::Tensor>& inputs,
+ const Type& out_dtype, const Target& target) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<TOpIsStateful>("TOpIsStateful", false)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<top::Tensor>& inputs,
- const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
+ [](const Attrs& attrs, const Array<te::Tensor>& inputs,
+ const Type& out_dtype, const Target& target) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<top::Tensor>& inputs,
- const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
+ [](const Attrs& attrs, const Array<te::Tensor>& inputs,
+ const Type& out_dtype, const Target& target) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<top::Tensor>& inputs,
- const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
+ [](const Attrs& attrs, const Array<te::Tensor>& inputs,
+ const Type& out_dtype, const Target& target) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<top::Tensor>& inputs,
- const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
- Array<top::Tensor> outputs;
+ [](const Attrs& attrs, const Array<te::Tensor>& inputs,
+ const Type& out_dtype, const Target& target) -> Array<te::Tensor> {
+ Array<te::Tensor> outputs;
for (size_t i = 0; i < inputs.size(); ++i) {
outputs.push_back(topi::identity(inputs[i]));
}
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<top::Tensor>& inputs,
- const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
+ [](const Attrs& attrs, const Array<te::Tensor>& inputs,
+ const Type& out_dtype, const Target& target) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<FInferCorrectLayout>("FInferCorrectLayout",
ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<top::Tensor>& inputs,
- const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
+ [](const Attrs& attrs, const Array<te::Tensor>& inputs,
+ const Type& out_dtype, const Target& target) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
TVM_REGISTER_NODE_TYPE(DebugAttrs);
-Array<top::Tensor> DebugCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> DebugCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
- return Array<top::Tensor>{ topi::identity(inputs[0]) };
+ return Array<te::Tensor>{ topi::identity(inputs[0]) };
}
RELAY_REGISTER_OP("debug")
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<top::Tensor>& inputs,
- const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
+ [](const Attrs& attrs, const Array<te::Tensor>& inputs,
+ const Type& out_dtype, const Target& target) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<top::Tensor>& inputs,
- const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
+ [](const Attrs& attrs, const Array<te::Tensor>& inputs,
+ const Type& out_dtype, const Target& target) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<top::Tensor>& inputs,
- const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
+ [](const Attrs& attrs, const Array<te::Tensor>& inputs,
+ const Type& out_dtype, const Target& target) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<top::Tensor>& inputs,
- const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
+ [](const Attrs& attrs, const Array<te::Tensor>& inputs,
+ const Type& out_dtype, const Target& target) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.set_attr<TNonComputational>("TNonComputational", true)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute",
- [](const Attrs& attrs, const Array<top::Tensor>& inputs,
- const Type& out_dtype, const Target& target) -> Array<top::Tensor> {
+ [](const Attrs& attrs, const Array<te::Tensor>& inputs,
+ const Type& out_dtype, const Target& target) -> Array<te::Tensor> {
return {topi::identity(inputs[0])};
});
.add_argument("bias", "1D Tensor", "Bias.")
.set_support_level(1)
.add_type_rel("BiasAdd", BiasAddRel)
-.set_attr<FTVMCompute>("FTVMCompute", [](const Attrs& attrs, const Array<top::Tensor>& inputs,
+.set_attr<FTVMCompute>("FTVMCompute", [](const Attrs& attrs, const Array<te::Tensor>& inputs,
const Type& out_type, const Target& target) {
const auto* param = attrs.as<BiasAddAttrs>();
- return tvm::Array<tvm::top::Tensor>{topi::nn::bias_add(inputs[0], inputs[1], param->axis)};
+ return tvm::Array<tvm::te::Tensor>{topi::nn::bias_add(inputs[0], inputs[1], param->axis)};
});
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>(
"FTVMCompute", [](const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* param = attrs.as<LeakyReluAttrs>();
- return Array<top::Tensor>{ topi::leaky_relu(inputs[0], param->alpha) };
+ return Array<te::Tensor>{ topi::leaky_relu(inputs[0], param->alpha) };
});
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", PReluInferCorrectLayout<PReluAttrs>)
.set_attr<FTVMCompute>(
"FTVMCompute", [](const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* param = attrs.as<PReluAttrs>();
- return Array<top::Tensor>{ topi::prelu(inputs[0], inputs[1], param->axis)};
+ return Array<te::Tensor>{ topi::prelu(inputs[0], inputs[1], param->axis)};
});
.set_support_level(1)
.add_type_rel("Identity", IdentityRel)
.set_attr<FTVMCompute>("FTVMCompute", [](const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* param = attrs.as<SoftmaxAttrs>();
CHECK(param != nullptr);
- return Array<top::Tensor>{ topi::nn::softmax(inputs[0], param->axis) };
+ return Array<te::Tensor>{ topi::nn::softmax(inputs[0], param->axis) };
});
.set_support_level(1)
.add_type_rel("Identity", IdentityRel)
.set_attr<FTVMCompute>("FTVMCompute", [](const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* param = attrs.as<SoftmaxAttrs>();
CHECK(param != nullptr);
CHECK(param->axis == -1 || param->axis == static_cast<int32_t>(inputs[0].ndim()) - 1)
<< "log_softmax currently only works on last dimension";
- return Array<top::Tensor>{ topi::nn::log_softmax(inputs[0]) };
+ return Array<te::Tensor>{ topi::nn::log_softmax(inputs[0]) };
});
.add_type_rel("BatchFlatten", BatchFlattenRel)
.set_attr<FTVMCompute>(
"FTVMCompute", [](const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
- return Array<top::Tensor>{ topi::nn::flatten(inputs[0]) };
+ return Array<te::Tensor>{ topi::nn::flatten(inputs[0]) };
});
.add_type_rel("Identity", IdentityRel)
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout)
.set_attr<FTVMCompute>("FTVMCompute", [](const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
- return Array<top::Tensor>{ topi::relu(inputs[0], 0.0f) };
+ return Array<te::Tensor>{ topi::relu(inputs[0], 0.0f) };
});
return true;
}
-Array<top::Tensor> PadCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> PadCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* param = attrs.as<PadAttrs>();
pad_after.push_back(pad_width[i][1]);
}
const auto* out_ttype = out_type.as<TensorTypeNode>();
- return Array<top::Tensor>{ topi::pad(inputs[0], pad_before, pad_after,
+ return Array<te::Tensor>{ topi::pad(inputs[0], pad_before, pad_after,
tvm::tir::make_const(out_ttype->dtype, param->pad_value),
"T_pad",
topi::kElementWise,
}
template<typename AttrType, topi::nn::PoolType mode>
-Array<top::Tensor> Pool2DCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> Pool2DCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
static const Layout kNCHW("NCHW");
}
if (mode == topi::nn::kAvgPool) {
bool count_include_pad = reinterpret_cast<const AvgPool2DAttrs*>(param)->count_include_pad;
- return Array<top::Tensor>{
+ return Array<te::Tensor>{
topi::nn::pool(inputs[0], pool_size, strides, padding,
mode, ceil_mode, layout.name(), count_include_pad)};
} else {
- return Array<top::Tensor>{
+ return Array<te::Tensor>{
topi::nn::pool(inputs[0], pool_size, strides, padding,
mode, ceil_mode, layout.name())};
}
template<topi::nn::PoolType mode>
-Array<top::Tensor> GlobalPool2DCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> GlobalPool2DCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
static const Layout kNCHW("NCHW");
CHECK(inputs[0].ndim() == 4U || inputs[0].ndim() == 5U)
<< "Pool2D only support 4-D input (e.g., NCHW)"
<< " or 5-D input (last dimension is a split of channel)";
- return Array<top::Tensor>{
+ return Array<te::Tensor>{
topi::nn::global_pool(inputs[0], mode, layout.name()) };
}
}
template<topi::nn::PoolType mode>
-Array<top::Tensor> AdaptivePool2DCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> AdaptivePool2DCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
static const Layout kNCHW("NCHW");
output_height = output_size[0];
output_width = output_size[1];
}
- return Array<top::Tensor>{
+ return Array<te::Tensor>{
topi::nn::adaptive_pool(inputs[0], Array<IndexExpr>{ output_height, output_width },
mode, layout.name()) };
}
}
template <typename AttrType, topi::nn::PoolType mode>
-Array<top::Tensor> Pool2DGradCompute(const Attrs& attrs, const Array<top::Tensor>& inputs,
+Array<te::Tensor> Pool2DGradCompute(const Attrs& attrs, const Array<te::Tensor>& inputs,
const Type& out_type, const Target& target) {
static const Layout kNCHW("NCHW");
const auto* param = attrs.as<AttrType>();
}
if (mode == topi::nn::kAvgPool) {
bool count_include_pad = reinterpret_cast<const AvgPool2DAttrs*>(param)->count_include_pad;
- return Array<top::Tensor>{topi::nn::pool_grad(inputs[0], inputs[1], pool_size, strides, padding,
+ return Array<te::Tensor>{topi::nn::pool_grad(inputs[0], inputs[1], pool_size, strides, padding,
mode, ceil_mode, layout.name(), count_include_pad)};
} else {
- return Array<top::Tensor>{topi::nn::pool_grad(inputs[0], inputs[1], pool_size, strides, padding,
+ return Array<te::Tensor>{topi::nn::pool_grad(inputs[0], inputs[1], pool_size, strides, padding,
mode, ceil_mode, layout.name())};
}
}
template<typename AttrType, topi::nn::PoolType mode>
-Array<top::Tensor> Pool1DCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> Pool1DCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
static const Layout kNCW("NCW");
if (mode == topi::nn::kAvgPool) {
bool count_include_pad = reinterpret_cast<const AvgPool1DAttrs*>(param)->count_include_pad;
- return Array<top::Tensor>{
+ return Array<te::Tensor>{
topi::nn::pool1d(inputs[0], pool_size, strides, padding,
mode, ceil_mode, layout.name(), count_include_pad)};
} else {
- return Array<top::Tensor>{
+ return Array<te::Tensor>{
topi::nn::pool1d(inputs[0], pool_size, strides, padding,
mode, ceil_mode, layout.name())};
}
template<typename AttrType, topi::nn::PoolType mode>
-Array<top::Tensor> Pool3DCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> Pool3DCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
static const Layout kNCDHW("NCDHW");
}
if (mode == topi::nn::kAvgPool) {
bool count_include_pad = reinterpret_cast<const AvgPool3DAttrs*>(param)->count_include_pad;
- return Array<top::Tensor>{
+ return Array<te::Tensor>{
topi::nn::pool3d(inputs[0], pool_size, strides, padding,
mode, ceil_mode, layout.name(), count_include_pad)};
} else {
- return Array<top::Tensor>{
+ return Array<te::Tensor>{
topi::nn::pool3d(inputs[0], pool_size, strides, padding,
mode, ceil_mode, layout.name())};
}
#define RELAY_BINARY_COMPUTE(FTOPI) \
[] (const Attrs& attrs, \
- const Array<top::Tensor>& inputs, \
+ const Array<te::Tensor>& inputs, \
const Type& out_type, \
- const Target& target) -> Array<top::Tensor> { \
+ const Target& target) -> Array<te::Tensor> { \
CHECK_EQ(inputs.size(), 2U); \
return {FTOPI(inputs[0], inputs[1])}; \
} \
}
template<typename F>
-Array<top::Tensor> ReduceCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> ReduceCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target,
F f) {
.add_argument("data", "Tensor", "The input tensor.")
-Array<top::Tensor> ArgMaxCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> ArgMaxCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
return ReduceCompute(attrs, inputs, out_type, target, topi::argmax);
.set_attr<TOpPattern>("TOpPattern", kCommReduce);
-Array<top::Tensor> ArgMinCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> ArgMinCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
return ReduceCompute(attrs, inputs, out_type, target, topi::argmin);
.set_attr<FTVMCompute>("FTVMCompute", ArgMinCompute)
.set_attr<TOpPattern>("TOpPattern", kCommReduce);
-Array<top::Tensor> SumCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> SumCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
return ReduceCompute(attrs, inputs, out_type, target, topi::sum);
.set_attr<TOpPattern>("TOpPattern", kCommReduce);
-Array<top::Tensor> AllCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> AllCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
return ReduceCompute(attrs, inputs, out_type, target, topi::all);
.set_attr<TOpPattern>("TOpPattern", kCommReduce);
-Array<top::Tensor> AnyCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> AnyCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
return ReduceCompute(attrs, inputs, out_type, target, topi::any);
.set_attr<TOpPattern>("TOpPattern", kCommReduce);
-Array<top::Tensor> MaxCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> MaxCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
return ReduceCompute(attrs, inputs, out_type, target, topi::max);
.set_attr<TOpPattern>("TOpPattern", kCommReduce);
-Array<top::Tensor> MinCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> MinCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
return ReduceCompute(attrs, inputs, out_type, target, topi::min);
.set_attr<TOpPattern>("TOpPattern", kCommReduce);
-Array<top::Tensor> ProdCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> ProdCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
return ReduceCompute(attrs, inputs, out_type, target, topi::prod);
.set_attr<TOpPattern>("TOpPattern", kCommReduce);
-Array<top::Tensor> MeanCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> MeanCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
IndexExpr count = tir::make_const(inputs[0]->dtype, 1);
return true;
}
-Array<top::Tensor> VarianceCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> VarianceCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
IndexExpr count = tir::make_const(inputs[0]->dtype, 1);
return true;
}
-Array<top::Tensor> CastCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> CastCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const CastAttrs *param = attrs.as<CastAttrs>();
}
-Array<top::Tensor> CastLikeCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> CastLikeCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
return { topi::cast(inputs[0], inputs[1]->dtype) };
.set_attr<FInferCorrectLayout>("FInferCorrectLayout", ElemwiseArbitraryLayout);
-Array<top::Tensor> ReinterpretCompute(const Attrs& attrs, const Array<top::Tensor>& inputs,
+Array<te::Tensor> ReinterpretCompute(const Attrs& attrs, const Array<te::Tensor>& inputs,
const Type& out_type, const Target& target) {
const CastAttrs* param = attrs.as<CastAttrs>();
CHECK(param != nullptr);
return true;
}
-Array<top::Tensor> ExpandDimsCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> ExpandDimsCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const ExpandDimsAttrs *param = attrs.as<ExpandDimsAttrs>();
// relay.concatenate
TVM_REGISTER_NODE_TYPE(ConcatenateAttrs);
-Array<top::Tensor> ConcatenateCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> ConcatenateCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const ConcatenateAttrs *param = attrs.as<ConcatenateAttrs>();
return true;
}
-Array<top::Tensor> StackCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> StackCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const StackAttrs *param = attrs.as<StackAttrs>();
return true;
}
-Array<top::Tensor> TransposeCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> TransposeCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* param = attrs.as<TransposeAttrs>();
CHECK(param != nullptr);
- return Array<top::Tensor>{ topi::transpose(inputs[0], param->axes) };
+ return Array<te::Tensor>{ topi::transpose(inputs[0], param->axes) };
}
Expr MakeTranspose(Expr data,
return true;
}
-Array<top::Tensor> ReshapeCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> ReshapeCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* out_ttype = out_type.as<TensorTypeNode>();
return true;
}
-Array<top::Tensor> TakeCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> TakeCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* param = attrs.as<TakeAttrs>();
CHECK(param != nullptr);
if (!param->axis.defined()) {
- return Array<top::Tensor>{ topi::take(inputs[0], inputs[1], param->mode) };
+ return Array<te::Tensor>{ topi::take(inputs[0], inputs[1], param->mode) };
} else {
- return Array<top::Tensor>{ topi::take(inputs[0], inputs[1], param->axis, param->mode) };
+ return Array<te::Tensor>{ topi::take(inputs[0], inputs[1], param->axis, param->mode) };
}
}
return true;
}
-Array<top::Tensor> FullCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> FullCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* out_ttype = out_type.as<TensorTypeNode>();
return true;
}
-Array<top::Tensor> FullLikeCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> FullLikeCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
return { topi::full_like(inputs[0], inputs[1]()) };
}
}
-inline top::Tensor DynamicArange(const top::Tensor& start,
- const top::Tensor& stop,
- const top::Tensor& step,
+inline te::Tensor DynamicArange(const te::Tensor& start,
+ const te::Tensor& stop,
+ const te::Tensor& step,
tvm::DataType dtype,
std::string name = "tensor",
std::string tag = topi::kInjective) {
tvm::PrimExpr num_elem = tvm::tir::Var("num_elem");
- return top::compute({num_elem}, [&](const Array<tvm::tir::Var>& indices) {
+ return te::compute({num_elem}, [&](const Array<tvm::tir::Var>& indices) {
return tvm::cast(dtype, start[0] + step[0] * indices[0]);
}, name, tag);
}
-Array<top::Tensor> ArangeCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> ArangeCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const ArangeAttrs* param = attrs.as<ArangeAttrs>();
- top::Tensor start = inputs[0];
- top::Tensor stop = inputs[1];
- top::Tensor step = inputs[2];
+ te::Tensor start = inputs[0];
+ te::Tensor stop = inputs[1];
+ te::Tensor step = inputs[2];
Array<tvm::PrimExpr> empty = {0};
return { DynamicArange(start, stop, step, param->dtype) };
}
return true;
}
-Array<top::Tensor> RepeatCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> RepeatCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const RepeatAttrs *param = attrs.as<RepeatAttrs>();
return true;
}
-Array<top::Tensor> TileCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> TileCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const TileAttrs *param = attrs.as<TileAttrs>();
return true;
}
-Array<top::Tensor> ReverseCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> ReverseCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const ReverseAttrs *param = attrs.as<ReverseAttrs>();
return CallNode::make(op, {condition, x, y});
}
-Array<top::Tensor> WhereCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> WhereCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
return { topi::where(inputs[0], inputs[1], inputs[2]) };
return true;
}
-Array<top::Tensor> SqueezeCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> SqueezeCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const SqueezeAttrs *param = attrs.as<SqueezeAttrs>();
return CallNode::make(op, {data, collapse_type}, Attrs(), {});
}
-Array<top::Tensor> CollapseSumLikeCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> CollapseSumLikeCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* out_ttype = out_type.as<TensorTypeNode>();
return CallNode::make(op, {data}, Attrs(attrs), {});
}
-Array<top::Tensor> BroadCastToCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> BroadCastToCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
auto ioattrs = attrs.as<InitOpAttrs>();
return CallNode::make(op, {data, broadcast_type}, Attrs(), {});
}
-Array<top::Tensor> BroadCastToLikeCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> BroadCastToLikeCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* out_ttype = out_type.as<TensorTypeNode>();
return CallNode::make(op, {data}, Attrs(attrs), {});
}
-Array<top::Tensor> StridedSliceCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> StridedSliceCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const StridedSliceAttrs *param = attrs.as<StridedSliceAttrs>();
CHECK(param != nullptr);
- return Array<top::Tensor>{
+ return Array<te::Tensor>{
topi::strided_slice(inputs[0], param->begin, param->end, param->strides)
};
}
return true;
}
-Array<top::Tensor> SplitCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> SplitCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto param = attrs.as<SplitAttrs>();
if (const IntImmNode* sections = param->indices_or_sections.as<IntImmNode>()) {
int64_t num_sections = sections->value;
- return Array<top::Tensor>{
+ return Array<te::Tensor>{
topi::split_sections(inputs[0], num_sections, param->axis) };
} else {
auto indices = Downcast<Array<Integer> >(param->indices_or_sections);
- return Array<top::Tensor>{ topi::split(inputs[0], indices, param->axis) };
+ return Array<te::Tensor>{ topi::split(inputs[0], indices, param->axis) };
}
}
return CallNode::make(op, {data, shape_like}, Attrs(attrs), {});
}
-Array<top::Tensor> SliceLikeCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> SliceLikeCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* param = attrs.as<SliceLikeAttrs>();
<< topi::GetConstInt(src_shape[axis]);
}
}
- return Array<top::Tensor>{
+ return Array<te::Tensor>{
topi::strided_slice(inputs[0],
GetIntArray(begin_idx),
GetIntArray(end_idx),
// relay.layout_transform
TVM_REGISTER_NODE_TYPE(LayoutTransformAttrs);
-Array<top::Tensor> LayoutTransformCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> LayoutTransformCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* param = attrs.as<LayoutTransformAttrs>();
CHECK(param != nullptr);
- return Array<top::Tensor>{
+ return Array<te::Tensor>{
topi::layout_transform(inputs[0], param->src_layout, param->dst_layout)
};
}
return true;
}
-Array<top::Tensor> GatherNDCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> GatherNDCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
return { topi::gather_nd(inputs[0], inputs[1]) };
return true;
}
-Array<top::Tensor> SequenceMaskCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> SequenceMaskCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* param = attrs.as<SequenceMaskAttrs>();
CHECK(param != nullptr);
- return Array<top::Tensor>{
+ return Array<te::Tensor>{
topi::sequence_mask(inputs[0], inputs[1], param->mask_value, param->axis) };
}
return true;
}
-Array<top::Tensor> OneHotCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> OneHotCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* param = attrs.as<OneHotAttrs>();
CHECK(param != nullptr);
- return Array<top::Tensor> {
+ return Array<te::Tensor> {
topi::one_hot(inputs[0],
inputs[1](),
inputs[2](),
#define RELAY_UNARY_COMPUTE(FTOPI) \
[] (const Attrs& attrs, \
- const Array<top::Tensor>& inputs, \
+ const Array<te::Tensor>& inputs, \
const Type& out_type, \
- const Target& target) -> Array<top::Tensor> { \
+ const Target& target) -> Array<te::Tensor> { \
return {FTOPI(inputs[0])}; \
} \
return true;
}
-Array<top::Tensor> ShapeOfCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> ShapeOfCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
CHECK_EQ(inputs.size(), 1);
return true;
}
-Array<top::Tensor> NdarraySizeCompute(const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+Array<te::Tensor> NdarraySizeCompute(const Attrs& attrs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
CHECK_EQ(inputs.size(), 1);
const auto* param = attrs.as<NdarraySizeAttrs>();
CHECK(param != nullptr);
- return Array<top::Tensor>{topi::ndarray_size(inputs[0], param->dtype)};
+ return Array<te::Tensor>{topi::ndarray_size(inputs[0], param->dtype)};
}
TVM_REGISTER_GLOBAL("relay.op.contrib._make.ndarray_size")
.set_attrs_type<YoloReorgAttrs>()
.add_type_rel("YoloReorg", YoloReorgRel)
.set_attr<FTVMCompute>("FTVMCompute", [](const Attrs& attrs,
- const Array<top::Tensor>& inputs,
+ const Array<te::Tensor>& inputs,
const Type& out_type,
const Target& target) {
const auto* params = attrs.as<YoloReorgAttrs>();
CHECK(params != nullptr);
- return Array<top::Tensor>{ topi::vision::reorg(inputs[0], params->stride) };
+ return Array<te::Tensor>{ topi::vision::reorg(inputs[0], params->stride) };
});
} // namespace relay
#include <tvm/relay/op_attr_types.h>
#include <tvm/relay/attrs/transform.h>
#include <tvm/relay/transform.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tuple>
#include <vector>
#include <functional>
Expr new_e;
bool modified = false;
if (falter_layout.count(op)) {
- tvm::Array<tvm::top::Tensor> tinfos;
+ tvm::Array<tvm::te::Tensor> tinfos;
for (auto expr : ref_call->args) {
auto ttype = expr->type_as<TensorTypeNode>();
- tinfos.push_back(tvm::top::placeholder(ttype->shape, ttype->dtype));
+ tinfos.push_back(tvm::te::placeholder(ttype->shape, ttype->dtype));
}
Expr altered_value = falter_layout[op](ref_call->attrs, new_args, tinfos);
if (altered_value.defined()) {
#include <tvm/relay/op_attr_types.h>
#include <tvm/relay/attrs/transform.h>
#include <tvm/relay/transform.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tuple>
#include <vector>
#include <functional>
Expr new_e;
bool modified = false;
if (fconvert_layout.count(op)) {
- tvm::Array<tvm::top::Tensor> tinfos;
+ tvm::Array<tvm::te::Tensor> tinfos;
for (auto expr : ref_call->args) {
auto ttype = expr->type_as<TensorTypeNode>();
- tinfos.push_back(tvm::top::placeholder(ttype->shape, ttype->dtype));
+ tinfos.push_back(tvm::te::placeholder(ttype->shape, ttype->dtype));
}
Expr altered_value =
fconvert_layout[op](ref_call->attrs, new_args, tinfos, operator->()->desired_layout_);
*/
#include <tvm/ir/type_functor.h>
#include <tvm/tir/lowered_func.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/relay/expr_functor.h>
#include <tvm/relay/analysis.h>
#include <tvm/relay/transform.h>
* shape, dtype or layout to another op or a sequence of ops.
*/
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/relay/expr_functor.h>
#include <tvm/relay/op_attr_types.h>
#include <tvm/relay/transform.h>
* \brief Compute Op.
* \file compute_op.cc
*/
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/arith/analyzer.h>
#include <tvm/tir/expr.h>
#include <tvm/tir/ir_pass.h>
#include "../../arith/interval_set.h"
namespace tvm {
-namespace top {
+namespace te {
using namespace tir;
TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable)
if (this->body[0]->IsInstance<tir::ReduceNode>()) {
// Specially handle reduce so the replaced op
// still share all the components
- PrimExpr new_reduce = top::ReplaceTensor(this->body[0], rmap);
+ PrimExpr new_reduce = te::ReplaceTensor(this->body[0], rmap);
if (!new_reduce.same_as(this->body[0])) {
const tir::ReduceNode* r = new_reduce.as<tir::ReduceNode>();
for (size_t k = 0; k < this->body.size(); ++k) {
}
} else {
arr = UpdateArray(this->body, [&rmap] (const PrimExpr& e) {
- return top::ReplaceTensor(e, rmap);
+ return te::ReplaceTensor(e, rmap);
});
}
if (!arr.same_as(this->body)) {
update_state[self->axis[i]] = 1;
}
// find which iter var is related to reduction and which is related to axis.
- top::PassDownBitMaskOr(stage, &update_state);
+ te::PassDownBitMaskOr(stage, &update_state);
auto leaf_iter_vars = stage->leaf_iter_vars;
// first first loop that is related to reduction.
size_t begin_loop = leaf_iter_vars.size();
update, body);
}
-} // namespace top
+} // namespace te
} // namespace tvm
* \brief Helper utilities to implement compute_op.
* \file compute_op.h
*/
-#ifndef TVM_TOP_OPERATION_COMPUTE_OP_H_
-#define TVM_TOP_OPERATION_COMPUTE_OP_H_
+#ifndef TVM_TE_OPERATION_COMPUTE_OP_H_
+#define TVM_TE_OPERATION_COMPUTE_OP_H_
#include <tvm/tir/expr.h>
#include <tvm/tir/expr.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <vector>
#include <unordered_map>
namespace tvm {
-namespace top {
+namespace te {
// loop nest structure for general compute
// This the loop nest structured used in compute.
// Does not include the loop body.
const ComputeLoopNest& n,
Stmt body,
Stmt update);
-} // namespace top
+} // namespace te
} // namespace tvm
-#endif // TVM_TOP_OPERATION_COMPUTE_OP_H_
+#endif // TVM_TE_OPERATION_COMPUTE_OP_H_
#include "op_util.h"
namespace tvm {
-namespace top {
+namespace te {
using namespace tir;
Stmt MakeCrossThreadReduction(
body = Substitute(body, value_map);
return MergeNest(nest, body);
}
-} // namespace top
+} // namespace te
} // namespace tvm
* \brief External computation rule.
* \file extern_op.cc
*/
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/arith/analyzer.h>
#include <tvm/tir/expr.h>
#include <unordered_set>
#include "op_util.h"
namespace tvm {
-namespace top {
+namespace te {
using namespace tir;
// ExternOpNode
TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable)
}
return ret;
}
-} // namespace top
+} // namespace te
} // namespace tvm
* \brief Hybrid computation rule.
* \file hybrid_op.cc
*/
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/arith/analyzer.h>
#include <tvm/tir/expr.h>
#include <tvm/tir/stmt_functor.h>
#include "hybrid_op.h"
namespace tvm {
-namespace top {
+namespace te {
using namespace tir;
// HybridOpNode
TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable)
n->attrs = std::move(attrs);
n->inputs = std::move(inputs);
n->outputs = std::move(outputs);
- n->axis = top::GatherLoopVars(body);
+ n->axis = te::GatherLoopVars(body);
n->body = std::move(body);
Operation res = Operation(n);
return res;
const std::unordered_map<Tensor, Tensor> &rmap) const {
CHECK_EQ(self.operator->(), this);
auto n = make_object<HybridOpNode>(*this);
- n->body = top::ReplaceTensor(this->body, rmap);
+ n->body = te::ReplaceTensor(this->body, rmap);
for (size_t i = 0; i < n->inputs.size(); ++i) {
Tensor t = n->inputs[i];
if (rmap.count(t)) {
* This is a major difference that HybridOpNode is NOT the same as
* ExternOpNode.
* */
- ret = top::ReplaceTensor(ret, rmap);
- ret = top::ReplaceProvideTensor(ret, rmap);
+ ret = te::ReplaceTensor(ret, rmap);
+ ret = te::ReplaceProvideTensor(ret, rmap);
- ret = top::ApplySchedule(stage, dom_map, ret);
+ ret = te::ApplySchedule(stage, dom_map, ret);
return ret;
}
Stmt ret = repl(stmt);
return repl.found ? ret : stmt;
}
-} // namespace top
+} // namespace te
} // namespace tvm
* \brief Helper utilities to implement hybrid_op.
* \file hybrid_op.h
*/
-#ifndef TVM_TOP_OPERATION_HYBRID_OP_H_
-#define TVM_TOP_OPERATION_HYBRID_OP_H_
+#ifndef TVM_TE_OPERATION_HYBRID_OP_H_
+#define TVM_TE_OPERATION_HYBRID_OP_H_
#include <tvm/tir/expr.h>
-#include <tvm/top/schedule.h>
+#include <tvm/te/schedule.h>
#include <unordered_map>
#include <unordered_set>
#include "../../tir/pass/arg_binder.h"
namespace tvm {
-namespace top {
+namespace te {
/*!
* \brief Find all the iteration variables in the given statement body.
const std::unordered_map<IterVar, Range> &dom_map,
const std::unordered_map<IterVar, IterVar> &rebased, Stmt stmt);
-} // namespace top
+} // namespace te
} // namespace tvm
-#endif // TVM_TOP_OPERATION_HYBRID_OP_H_
+#endif // TVM_TE_OPERATION_HYBRID_OP_H_
#include <tvm/tir/expr.h>
#include <tvm/tir/ir_pass.h>
#include <tvm/tir/stmt_functor.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <string>
#include "op_util.h"
#include "../schedule/message_passing.h"
#include "../../arith/compute_expr.h"
namespace tvm {
-namespace top {
+namespace te {
using namespace arith;
using namespace tir;
}
}
// message passing to get offset of root iter vars.
- top::PassUpIndex(stage, dom_map, &value_map);
+ te::PassUpIndex(stage, dom_map, &value_map);
return nest;
}
}
}
-} // namespace top
+} // namespace te
} // namespace tvm
* \file op_util.h
* \brief Common utility used in operator construction.
*/
-#ifndef TVM_TOP_OPERATION_OP_UTIL_H_
-#define TVM_TOP_OPERATION_OP_UTIL_H_
+#ifndef TVM_TE_OPERATION_OP_UTIL_H_
+#define TVM_TE_OPERATION_OP_UTIL_H_
#include <tvm/tir/expr.h>
-#include <tvm/top/schedule.h>
+#include <tvm/te/schedule.h>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "../schedule/message_passing.h"
namespace tvm {
-namespace top {
+namespace te {
using tir::MergeNest;
*/
tir::ForType IterVarTypeToForType(IterVarType iter_type);
-} // namespace top
+} // namespace te
} // namespace tvm
-#endif // TVM_TOP_OPERATION_OP_UTIL_H_
+#endif // TVM_TE_OPERATION_OP_UTIL_H_
* \brief Placeholder op.
* \file placeholder_op.cc
*/
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
namespace tvm {
-namespace top {
+namespace te {
// PlaceholderOpNode
TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable)
bool debug_keep_trivial_loop) const {
return Stmt();
}
-} // namespace top
+} // namespace te
} // namespace tvm
* \brief Scan Operator.
* \file scan_op.cc
*/
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/tir/expr.h>
#include <tvm/tir/ir_pass.h>
#include "op_util.h"
#include "../schedule/graph.h"
namespace tvm {
-namespace top {
+namespace te {
using namespace tir;
TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable)
MakeBoundCheck(stage, dom_map, vmap, false, empty)));
return MergeNest(nest, provide);
}
-} // namespace top
+} // namespace te
} // namespace tvm
* \brief Tensor Compute Op.
* \file tensor_compute_op.cc
*/
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/arith/analyzer.h>
#include <tvm/tir/expr.h>
#include <tvm/tir/ir_pass.h>
#include "../../arith/compute_expr.h"
namespace tvm {
-namespace top {
+namespace te {
using namespace tir;
// TensorComputeOpNode
TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable)
body = MergeNest(input_bind_nest, body);
body = tir::Substitute(body, vmap);
body = MergeNest(binder.asserts(), body);
- body = top::Substitute(body, n.main_vmap);
+ body = te::Substitute(body, n.main_vmap);
Stmt ret = MergeNest(nest, body);
return ret;
} else {
n.init_nest.begin(), n.init_nest.begin() + tloc + 1);
init_nest.emplace_back(MakeIfNest(n.init_predicates));
Stmt init = MergeNest(output_bind_nest, this->intrin->reduce_init);
- init = top::Substitute(init, n.init_vmap);
+ init = te::Substitute(init, n.init_vmap);
init = MergeNest(init_nest, init);
// The update
Stmt update = MergeNest(output_bind_nest, this->intrin->reduce_update);
update = MergeNest(input_bind_nest, update);
update = tir::Substitute(update, vmap);
update = MergeNest(binder.asserts(), update);
- update = top::Substitute(update, n.main_vmap);
+ update = te::Substitute(update, n.main_vmap);
update = MergeNest(update_nest, update);
return MergeNest(common, SeqStmt::Flatten(init, update));
} else {
update = MergeNest(input_bind_nest, update);
update = tir::Substitute(update, vmap);
update = MergeNest(binder.asserts(), update);
- update = top::Substitute(update, n.main_vmap);
+ update = te::Substitute(update, n.main_vmap);
update = MergeNest(update_nest, update);
return MergeNest(common, update);
}
}
}
-} // namespace top
+} // namespace te
} // namespace tvm
#include "../schedule/message_passing.h"
namespace tvm {
-namespace top {
+namespace te {
using namespace tir;
}
CHECK(found_point);
// Get domain of the tensorized scope.
- top::PassUpDomain(stage, dom_map, &up_state);
+ te::PassUpDomain(stage, dom_map, &up_state);
// Get domains if inputs
std::unordered_map<Tensor, TensorDom> in_dom;
std::unordered_map<const VarNode*, IntSet> temp_dmap;
body = MergeNest(input_bind_nest, body);
body = tir::Substitute(body, vmap);
body = MergeNest(binder.asserts(), body);
- body = top::Substitute(body, n.main_vmap);
+ body = te::Substitute(body, n.main_vmap);
return MergeNest(nest, body);
} else {
// Need to split reduction
n.init_nest.begin(), n.init_nest.begin() + tloc + 1);
init_nest.emplace_back(MakeIfNest(n.init_predicates));
Stmt init = MergeNest(output_bind_nest, intrin->reduce_init);
- init = top::Substitute(init, n.init_vmap);
+ init = te::Substitute(init, n.init_vmap);
init = MergeNest(init_nest, init);
// The update
Stmt update = MergeNest(output_bind_nest, intrin->reduce_update);
update = MergeNest(input_bind_nest, update);
update = tir::Substitute(update, vmap);
update = MergeNest(binder.asserts(), update);
- update = top::Substitute(update, n.main_vmap);
+ update = te::Substitute(update, n.main_vmap);
update = MergeNest(update_nest, update);
return MergeNest(common, SeqStmt::Flatten(init, update));
} else {
update = MergeNest(input_bind_nest, update);
update = tir::Substitute(update, vmap);
update = MergeNest(binder.asserts(), update);
- update = top::Substitute(update, n.main_vmap);
+ update = te::Substitute(update, n.main_vmap);
update = MergeNest(update_nest, update);
return MergeNest(common, update);
}
intrin,
&vrange);
});
-} // namespace top
+} // namespace te
} // namespace tvm
/*!
* \file auto_inline_elem_wise.cc
*/
-#include <tvm/top/schedule_pass.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/schedule_pass.h>
+#include <tvm/te/operation.h>
#include <tvm/tir/expr_functor.h>
namespace tvm {
-namespace top {
+namespace te {
using namespace tir;
}
}
-} // namespace top
+} // namespace te
} // namespace tvm
* \file bound.cc
* \brief The bound inference logic.
*/
-#include <tvm/top/schedule_pass.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/schedule_pass.h>
+#include <tvm/te/operation.h>
#include <tvm/tir/ir_pass.h>
#include <unordered_map>
#include <unordered_set>
#include "../../runtime/thread_storage_scope.h"
namespace tvm {
-namespace top {
+namespace te {
using runtime::StorageRank;
using runtime::StorageScope;
return Map<IterVar, Range>(ret.begin(), ret.end());
}
-} // namespace top
+} // namespace te
} // namespace tvm
*/
#include <tvm/tir/expr.h>
#include <tvm/tir/stmt_functor.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <utility>
#include <unordered_set>
#include <unordered_map>
#include "graph.h"
namespace tvm {
-namespace top {
+namespace te {
// key to specific tensor dimension.
struct TensorDimKey {
tir::FunctionRef f;
return !operator==(other);
}
};
-} // namespace top
+} // namespace te
} // namespace tvm
namespace std {
template <>
-struct hash<::tvm::top::TensorDimKey> {
- std::size_t operator()(const ::tvm::top::TensorDimKey& k) const {
+struct hash<::tvm::te::TensorDimKey> {
+ std::size_t operator()(const ::tvm::te::TensorDimKey& k) const {
size_t lhs = ::tvm::ObjectHash()(k.f);
size_t rhs = static_cast<size_t>(k.value_index) << 16UL |
static_cast<size_t>(k.dim);
namespace tvm {
-namespace top {
+namespace te {
// construct a read graph that gives readers of each operation
// that the root depend on
return ret;
}
-} // namespace top
+} // namespace te
} // namespace tvm
* \file graph.h
* \brief Utilities to get information about schedule graph.
*/
-#ifndef TVM_TOP_SCHEDULE_GRAPH_H_
-#define TVM_TOP_SCHEDULE_GRAPH_H_
+#ifndef TVM_TE_SCHEDULE_GRAPH_H_
+#define TVM_TE_SCHEDULE_GRAPH_H_
#include <tvm/tir/expr.h>
-#include <tvm/top/schedule.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/schedule.h>
+#include <tvm/te/operation.h>
#include <unordered_map>
#include <unordered_set>
#include <vector>
namespace tvm {
-namespace top {
+namespace te {
/*!
* \brief data structure of Operation->Tensors it reads
*/
Map<IterVar, PrimExpr> ScanFixPointAnalysis(const Operation& scan);
-} // namespace top
+} // namespace te
} // namespace tvm
-#endif // TVM_TOP_SCHEDULE_GRAPH_H_
+#endif // TVM_TE_SCHEDULE_GRAPH_H_
#include "../../arith/compute_expr.h"
namespace tvm {
-namespace top {
+namespace te {
using namespace tir;
}
return preds;
}
-} // namespace top
+} // namespace te
} // namespace tvm
* \brief Common utilities to do message passing
* on the schedule hyper graph.
*/
-#ifndef TVM_TOP_SCHEDULE_MESSAGE_PASSING_H_
-#define TVM_TOP_SCHEDULE_MESSAGE_PASSING_H_
+#ifndef TVM_TE_SCHEDULE_MESSAGE_PASSING_H_
+#define TVM_TE_SCHEDULE_MESSAGE_PASSING_H_
#include <tvm/tir/expr.h>
-#include <tvm/top/schedule.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/schedule.h>
+#include <tvm/te/operation.h>
#include <tvm/arith/analyzer.h>
#include <unordered_map>
#include <unordered_set>
#include <vector>
namespace tvm {
-namespace top {
+namespace te {
/*!
* \brief Downward inference of domain of each IterVar.
* Caller set the range of the root, then the function
bool skip_ivar_domain,
const std::unordered_set<IterVar>& skip_iter);
-} // namespace top
+} // namespace te
} // namespace tvm
-#endif // TVM_TOP_SCHEDULE_MESSAGE_PASSING_H_
+#endif // TVM_TE_SCHEDULE_MESSAGE_PASSING_H_
/*!
* \file schedule_dataflow_rewrite.cc
*/
-#include <tvm/top/schedule.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/schedule.h>
+#include <tvm/te/operation.h>
#include <tvm/tir/stmt_functor.h>
#include <tvm/tir/ir_pass.h>
#include <unordered_set>
#include "../../arith/compute_expr.h"
namespace tvm {
-namespace top {
+namespace te {
// find first occurance location in leaf
template<typename T>
size_t FindNodeRef(ArrayNode* array_node, const T& v) {
dom_map[iv] = iv->dom;
analyzer.Bind(iv->var, iv->dom);
}
- top::PassDownDomain(orig_stage, &dom_map, &analyzer, true);
+ te::PassDownDomain(orig_stage, &dom_map, &analyzer, true);
{
// The source->cache
std::unordered_map<IterVar, PrimExpr> value_map;
for (IterVar iv : compute->axis) {
value_map[iv] = iv->var;
}
- top::PassDownIndex(orig_stage, dom_map, &value_map, true);
+ te::PassDownIndex(orig_stage, dom_map, &value_map, true);
for (IterVar iv : orig_stage->leaf_iter_vars) {
if (red_axis.count(iv)) continue;
args.push_back(value_map.at(iv));
// Find touched reduction axis.
std::unordered_map<IterVar, int> touch_map;
touch_map[axis] = 1;
- top::PassUpBitMaskOr(reduce_stage, &touch_map, true);
- top::PassDownBitMaskOr(reduce_stage, &touch_map, true);
+ te::PassUpBitMaskOr(reduce_stage, &touch_map, true);
+ te::PassDownBitMaskOr(reduce_stage, &touch_map, true);
// skip reduction iteration.
std::unordered_set<IterVar> skip_bound_check;
// Verify normal axis are not touched.
}
analyzer.Bind(iv->var, iv->dom);
}
- top::PassDownDomain(reduce_stage, &dom_map, &analyzer, true);
+ te::PassDownDomain(reduce_stage, &dom_map, &analyzer, true);
for (IterVar iv : reduce_stage->leaf_iter_vars) {
if (touch_map.count(iv)) {
Range dom = dom_map.at(iv);
}
}
}
- top::PassUpIndex(reduce_stage, dom_map, &value_map, true);
+ te::PassUpIndex(reduce_stage, dom_map, &value_map, true);
std::vector<PrimExpr> predicates = MakeBoundCheck(
reduce_stage, dom_map, value_map, true, skip_bound_check);
reduce_stage->relations = Array<IterVarRelation>();
return factor_tensors;
}
-} // namespace top
+} // namespace te
} // namespace tvm
/*!
* \file schedule_lang.cc
*/
-#include <tvm/top/schedule.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/schedule.h>
+#include <tvm/te/operation.h>
#include <unordered_set>
#include "graph.h"
namespace tvm {
-namespace top {
+namespace te {
// find first occurance location in leaf
template<typename T>
self->InitCache();
const auto& op2stage_cache = self->op2stage_cache_;
// Get the ops.
- Array<Operation> ops = top::GetSubGraph(
+ Array<Operation> ops = te::GetSubGraph(
RemapTensor(self, outputs),
RemapTensor(self, inputs),
include_inputs);
auto n = make_object<ScheduleNode>();
Schedule sch(n);
n->outputs = ops;
- auto g = top::CreateReadGraph(n->outputs);
- Array<Operation> post_order = top::PostDFSOrder(n->outputs, g);
+ auto g = te::CreateReadGraph(n->outputs);
+ Array<Operation> post_order = te::PostDFSOrder(n->outputs, g);
// output set.
std::unordered_set<Operation> output_set;
for (Operation x : ops) {
auto* op = static_cast<const ScheduleNode*>(node.get());
p->stream << "schedule(" << op << ")";
});
-} // namespace top
+} // namespace te
} // namespace tvm
#include <tvm/tir/expr.h>
#include <tvm/tir/ir_pass.h>
#include <tvm/tir/stmt_functor.h>
-#include <tvm/top/operation.h>
-#include <tvm/top/schedule_pass.h>
+#include <tvm/te/operation.h>
+#include <tvm/te/schedule_pass.h>
#include <utility>
#include <unordered_map>
#include <unordered_set>
#include "../../tir/pass/ir_util.h"
namespace tvm {
-namespace top {
+namespace te {
using namespace tir;
return post_proc(std::move(body));
}
-} // namespace top
+} // namespace te
} // namespace tvm
/*!
* \file tensor.cc
*/
-#include <tvm/top/tensor.h>
-#include <tvm/top/operation.h>
-#include <tvm/top/tensor_intrin.h>
+#include <tvm/te/tensor.h>
+#include <tvm/te/operation.h>
+#include <tvm/te/tensor_intrin.h>
#include <memory>
namespace tvm {
-namespace top {
+namespace te {
IterVar thread_axis(Range dom, std::string tag) {
return IterVarNode::make(
TVM_REGISTER_NODE_TYPE(TensorIntrinCallNode);
-} // namespace top
+} // namespace te
} // namespace tvm
Stmt ret = StmtMutator::VisitStmt_(op);
op = ret.as<AttrStmtNode>();
if (op && op->attr_key == attr::prefetch_scope) {
- top::Tensor ts = Downcast<top::Tensor>(op->node);
+ te::Tensor ts = Downcast<te::Tensor>(op->node);
CHECK_NE(loop_nest_.size(), 0U);
Domain domain = DomainTouched(op->body, ts, true, false);
Region region;
#include <tvm/arith/analyzer.h>
#include <tvm/tir/expr.h>
#include <tvm/tir/stmt.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/tir/stmt_functor.h>
#include <tvm/tir/op.h>
#include <tvm/tir/ir_pass.h>
class StorageFlattener : public StmtExprMutator {
public:
- explicit StorageFlattener(Map<top::Tensor, Buffer> extern_buffer,
+ explicit StorageFlattener(Map<te::Tensor, Buffer> extern_buffer,
int cache_line_size, bool create_bound_attributes,
IRVisitorWithAnalyzer* bounded_analyzer)
: bounded_analyzer_(bounded_analyzer),
storage_scope_[op->node.get()] = op->value.as<StringImmNode>()->value;
return this->VisitStmt(op->body);
} else if (op->attr_key == attr::double_buffer_scope &&
- op->node->IsInstance<top::OperationNode>()) {
- auto func = Downcast<top::Operation>(op->node);
+ op->node->IsInstance<te::OperationNode>()) {
+ auto func = Downcast<te::Operation>(op->node);
Stmt body = this->VisitStmt(op->body);
for (int i = 0; i < func->num_outputs(); ++i) {
TensorKey key{func, i};
} else if (op->attr_key == attr::buffer_bind_scope) {
return HandleBufferBindScope(op);
} else if (op->attr_key == attr::buffer_dim_align) {
- auto tensor = Downcast<top::Tensor>(op->node);
+ auto tensor = Downcast<te::Tensor>(op->node);
const CallNode* tuple = op->value.as<CallNode>();
CHECK(tuple && tuple->is_intrinsic(intrinsic::tvm_tuple));
TensorKey key{tensor->op, tensor->value_index};
Array<ObjectRef> arr = Downcast<Array<ObjectRef> > (op->node);
CHECK_EQ(arr.size(), 2U);
const BufferNode* buffer = arr[0].as<BufferNode>();
- const top::TensorNode* tensor = arr[1].as<top::TensorNode>();
+ const te::TensorNode* tensor = arr[1].as<te::TensorNode>();
const CallNode* tuple = op->value.as<CallNode>();
CHECK(buffer && tensor);
CHECK(tuple && tuple->is_intrinsic(intrinsic::tvm_tuple));
bool create_bound_attributes_{false};
};
-Stmt StorageFlatten(Stmt stmt, Map<top::Tensor, Buffer> extern_buffer,
+Stmt StorageFlatten(Stmt stmt, Map<te::Tensor, Buffer> extern_buffer,
int cache_line_size, bool create_bound_attributes) {
IRVisitorWithAnalyzer bounded_analyzer;
bounded_analyzer(stmt);
// IR Passes for TensorCore CodeGen
#include <tvm/tir/expr.h>
#include <tvm/tir/stmt.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/tir/stmt_functor.h>
#include <tvm/tir/op.h>
#include <tvm/tir/ir_pass.h>
namespace tvm {
namespace tir {
-using namespace top;
+using namespace te;
using runtime::StorageRank;
using runtime::StorageScope;
using runtime::ThreadScope;
storage_scope_[op->node.get()] = op->value.as<StringImmNode>()->value;
this->VisitStmt(op->body);
} else if (op->attr_key == attr::buffer_dim_align) {
- top::Tensor tensor = Downcast<top::Tensor>(op->node);
+ te::Tensor tensor = Downcast<te::Tensor>(op->node);
const CallNode* tuple = op->value.as<CallNode>();
CHECK(tuple && tuple->is_intrinsic(intrinsic::tvm_tuple));
auto& vinfo = dim_align_[TensorKey{tensor->op, tensor->value_index}];
Stmt VisitStmt_(const AttrStmtNode* op) final {
Stmt stmt = StmtExprMutator::VisitStmt_(op);
if (op->attr_key == attr::realize_scope) {
- auto node = op->node.as<top::OperationNode>();
+ auto node = op->node.as<te::OperationNode>();
if (node != nullptr) {
if (!frag_reg_.count(node->name)) {
return stmt;
buffer_node->offset_factor = 1;
Buffer buffer(buffer_node);
- ObjectPtr<top::TensorNode> tensor_node = make_object<top::TensorNode>();
+ ObjectPtr<te::TensorNode> tensor_node = make_object<te::TensorNode>();
tensor_node->value_index = key.value_index;
- tensor_node->op = Downcast<top::Operation>(key.f);
+ tensor_node->op = Downcast<te::Operation>(key.f);
tensor_node->shape = shape;
tensor_node->dtype = datatype;
Tensor tensor(tensor_node);
#include <tvm/tir/expr.h>
#include <tvm/tir/stmt.h>
#include <tvm/tir/stmt_functor.h>
-#include <tvm/top/tensor.h>
+#include <tvm/te/tensor.h>
#include <unordered_map>
#include <dmlc/logging.h>
#include <gtest/gtest.h>
#include <topi/cuda/injective.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/runtime/registry.h>
#include <tvm/driver/driver.h>
TEST(BuildModule, Basic) {
using namespace tvm;
- using namespace tvm::top;
+ using namespace tvm::te;
auto n = var("n");
Array<PrimExpr> shape;
shape.push_back(n);
*/
using namespace tvm;
- using namespace tvm::top;
+ using namespace tvm::te;
const runtime::PackedFunc* pf = runtime::Registry::Get("module._Enabled");
bool enabled = (*pf)("cuda");
if (!enabled) {
#include <dmlc/logging.h>
#include <gtest/gtest.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
TEST(Expr, Basic) {
using namespace tvm;
#include <dmlc/logging.h>
#include <gtest/gtest.h>
#include <tvm/tir/ir_pass.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
TEST(IRSIMPLIFY, MinMax) {
- auto x = tvm::top::var("x");
+ auto x = tvm::te::var("x");
auto e1 = (tvm::max(x, 1) - tvm::max(x, 1)) ;
auto e1s = tvm::tir::CanonicalSimplify(e1);
CHECK(tvm::tir::is_zero(e1s));
}
TEST(IRSIMPLIFY, Mul) {
- auto x = tvm::top::var("x");
+ auto x = tvm::te::var("x");
auto e = (x * x) - (x * x) ;
auto es = tvm::tir::CanonicalSimplify(e);
CHECK(tvm::tir::is_zero(es));
#include <gtest/gtest.h>
#include <tvm/driver/driver.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/type.h>
#include <tvm/relay/analysis.h>
*/
#include <gtest/gtest.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/type.h>
#include <tvm/relay/analysis.h>
#include <tvm/relay/type.h>
#include <tvm/runtime/packed_func.h>
#include <tvm/runtime/registry.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
TVM_REGISTER_GLOBAL("schedule")
.set_body([](tvm::TVMArgs args, tvm::TVMRetValue* rv) {
#include <dmlc/logging.h>
#include <gtest/gtest.h>
#include <tvm/tir/ir_pass.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
TEST(SimplePasses, HasSideEffect) {
using namespace tvm;
- auto n = top::var("n");
+ auto n = te::var("n");
Array<PrimExpr> shape;
shape.push_back(n);
- auto A = top::placeholder(shape, DataType::Float(32), "A");
+ auto A = te::placeholder(shape, DataType::Float(32), "A");
CHECK(!tvm::tir::HasSideEffect(A[0]));
}
#include <dmlc/logging.h>
#include <gtest/gtest.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
TEST(Tensor, Basic) {
using namespace tvm;
- using namespace tvm::top;
+ using namespace tvm::te;
Var m("m"), n("n"), l("l");
TEST(Tensor, Reduce) {
using namespace tvm;
- using namespace tvm::top;
+ using namespace tvm::te;
Var m("m"), n("n"), l("l");
- top::Tensor A = top::placeholder({m, l}, DataType::Float(32), "A");
- top::Tensor B = top::placeholder({n, l}, DataType::Float(32), "B");
+ te::Tensor A = te::placeholder({m, l}, DataType::Float(32), "A");
+ te::Tensor B = te::placeholder({n, l}, DataType::Float(32), "B");
IterVar rv = reduce_axis(Range{0, l}, "k");
- auto C = top::compute({m, n}, [&](Var i, Var j) {
+ auto C = te::compute({m, n}, [&](Var i, Var j) {
return sum(max(1 + A[i][rv] + 1, B[j][rv]), {rv});
}, "C");
- LOG(INFO) << C->op.as<top::ComputeOpNode>()->body;
+ LOG(INFO) << C->op.as<te::ComputeOpNode>()->body;
}
int main(int argc, char ** argv) {
* under the License.
*/
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <topi/elemwise.h>
#include <gtest/gtest.h>
#include <gtest/gtest.h>
#include <topi/generic/injective.h>
#include <tvm/driver/driver.h>
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <tvm/relay/analysis.h>
#include <tvm/relay/expr.h>
#include <tvm/relay/transform.h>
#ifndef TOPI_BROADCAST_H_
#define TOPI_BROADCAST_H_
+#include <topi/detail/broadcast.h>
+#include <topi/detail/constant_utils.h>
+#include <topi/tags.h>
+
#include <string>
#include <algorithm>
-#include "topi/detail/broadcast.h"
-#include "topi/detail/constant_utils.h"
-#include "topi/tags.h"
namespace topi {
*
* \return A Tensor whose op member is a broadcast operation
*/
-inline tvm::top::Tensor broadcast_to(const tvm::top::Tensor& t,
+inline tvm::te::Tensor broadcast_to(const tvm::te::Tensor& t,
const tvm::Array<tvm::PrimExpr>& output_shape,
std::string name = "T_broadcast_to",
std::string tag = kBroadcast) {
auto l = [&](tvm::Array<tvm::tir::Var> ovars) {
return t(detail::InputIndexFromBroadcast(ovars, t, bh.vars2, bh.all_vars));
};
- return tvm::top::compute(
+ return tvm::te::compute(
tvm::Array<tvm::PrimExpr>(bh.common_shape.begin(), bh.common_shape.end()),
l,
name,
const tvm::PrimExpr& b) { \
ComputeRule; \
} \
- inline tvm::top::Tensor Name(const tvm::top::Tensor& A, \
- const tvm::top::Tensor& B, \
- std::string name = "T_" #Name, \
- std::string tag = kBroadcast) { \
+ inline tvm::te::Tensor Name(const tvm::te::Tensor& A, \
+ const tvm::te::Tensor& B, \
+ std::string name = "T_" #Name, \
+ std::string tag = kBroadcast) { \
auto l = [](tvm::PrimExpr a, tvm::PrimExpr b) { ComputeRule; }; \
return detail::WithBroadcast(l, A, B, name, tag); \
} \
- inline tvm::top::Tensor Name(const tvm::top::Tensor& A, \
- const tvm::PrimExpr& B, \
- std::string name = "T_" #Name, \
- std::string tag = kElementWise) { \
+ inline tvm::te::Tensor Name(const tvm::te::Tensor& A, \
+ const tvm::PrimExpr& B, \
+ std::string name = "T_" #Name, \
+ std::string tag = kElementWise) { \
auto l = [](tvm::PrimExpr a, tvm::PrimExpr b) { ComputeRule; }; \
- return tvm::top::compute(A->shape, [&](const ::tvm::Array<::tvm::tir::Var>& i) { \
+ return tvm::te::compute(A->shape, [&](const ::tvm::Array<::tvm::tir::Var>& i) { \
return l(A(i), B); \
}, name, tag); \
} \
- inline tvm::top::Tensor Name(const tvm::PrimExpr& A, \
- const tvm::top::Tensor& B, \
- std::string name = "T_" #Name, \
- std::string tag = kElementWise) { \
+ inline tvm::te::Tensor Name(const tvm::PrimExpr& A, \
+ const tvm::te::Tensor& B, \
+ std::string name = "T_" #Name, \
+ std::string tag = kElementWise) { \
auto l = [&](tvm::PrimExpr a, tvm::PrimExpr b) { ComputeRule; }; \
- return tvm::top::compute(B->shape, [&](const ::tvm::Array<::tvm::tir::Var>& i) { \
+ return tvm::te::compute(B->shape, [&](const ::tvm::Array<::tvm::tir::Var>& i) { \
return l(A, B(i)); \
}, name, tag); \
}
#define TOPI_DEFINE_OP_OVERLOAD(Name, OpName) \
- inline tvm::top::Tensor Name(const tvm::top::Tensor& A, \
- const tvm::top::Tensor& B) { \
+ inline tvm::te::Tensor Name(const tvm::te::Tensor& A, \
+ const tvm::te::Tensor& B) { \
return topi::OpName(A, B); \
} \
- inline tvm::top::Tensor Name(const tvm::PrimExpr& A, \
- const tvm::top::Tensor& B) { \
+ inline tvm::te::Tensor Name(const tvm::PrimExpr& A, \
+ const tvm::te::Tensor& B) { \
return topi::OpName(A, B); \
} \
- inline tvm::top::Tensor Name(const tvm::top::Tensor& A, \
- const tvm::PrimExpr& B) { \
+ inline tvm::te::Tensor Name(const tvm::te::Tensor& A, \
+ const tvm::PrimExpr& B) { \
return topi::OpName(A, B); \
}
#ifndef TOPI_CONTRIB_CUBLAS_H_
#define TOPI_CONTRIB_CUBLAS_H_
-#include "tvm/top/operation.h"
-#include "topi/detail/extern.h"
+#include <tvm/te/operation.h>
+#include <topi/detail/extern.h>
namespace topi {
namespace contrib {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
using namespace topi::detail;
/*!
* \brief Create an op that multiplies lhs and rhs with cuBLAS
#ifndef TOPI_CONTRIB_ROCBLAS_H_
#define TOPI_CONTRIB_ROCBLAS_H_
-#include "tvm/top/operation.h"
+#include <tvm/te/operation.h>
#include "topi/detail/extern.h"
namespace topi {
namespace contrib {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Create an op that multiplies lhs and rhs with rocBLAS
*
#ifndef TOPI_CUDA_DENSE_H_
#define TOPI_CUDA_DENSE_H_
-#include "tvm/top/operation.h"
-#include "tvm/top/schedule_pass.h"
-#include "tvm/target/generic_func.h"
-#include "topi/tags.h"
-#include "topi/detail/array_utils.h"
-#include "topi/nn/dense.h"
-#include "topi/contrib/cublas.h"
-#include "topi/generic/extern.h"
+#include <tvm/te/operation.h>
+#include <tvm/te/schedule_pass.h>
+#include <tvm/target/generic_func.h>
+#include <topi/tags.h>
+#include <topi/detail/array_utils.h>
+#include <topi/nn/dense.h>
+#include <topi/contrib/cublas.h>
+#include <topi/generic/extern.h>
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace cuda {
/*!
*
* \return Tensor with shape [batch, out_dim]
*/
-inline tvm::top::Tensor dense_cuda(const Target& target,
- const tvm::top::Tensor& data,
- const tvm::top::Tensor& weight,
- const tvm::top::Tensor& bias,
+inline tvm::te::Tensor dense_cuda(const Target& target,
+ const tvm::te::Tensor& data,
+ const tvm::te::Tensor& weight,
+ const tvm::te::Tensor& bias,
const DataType& out_dtype) {
CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data";
CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight";
CHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported.";
auto mm = topi::contrib::cublas_matmul(data, weight, false, true);
if (bias.defined()) {
- mm = tvm::top::compute({ batch, out_dim },
+ mm = tvm::te::compute({ batch, out_dim },
[&](Var i, Var j) {
return mm(i, j) + bias(j);
}, "tensor", kBroadcast);
s[dense].compute_at(s[out], s[out]->op.as<ComputeOpNode>()->axis[1]);
}
s[out].bind(s[out]->op.as<ComputeOpNode>()->axis[0],
- tvm::top::thread_axis(Range(), "blockIdx.y"));
+ tvm::te::thread_axis(Range(), "blockIdx.y"));
s[out].bind(s[out]->op.as<ComputeOpNode>()->axis[1],
- tvm::top::thread_axis(Range(), "blockIdx.x"));
+ tvm::te::thread_axis(Range(), "blockIdx.x"));
auto tx = s[dense]->op.as<ComputeOpNode>()->reduce_axis[0];
- auto thread_x = tvm::top::thread_axis(Range(), "threadIdx.x");
+ auto thread_x = tvm::te::thread_axis(Range(), "threadIdx.x");
s[dense].bind(tx, thread_x);
s[dense_f].compute_at(s[dense], tx);
s[dense].set_store_predicate(static_cast<PrimExpr>(thread_x) == 0);
#ifndef TOPI_CUDA_INJECTIVE_H_
#define TOPI_CUDA_INJECTIVE_H_
-#include "topi/tags.h"
-#include "topi/detail/fuse.h"
-#include "tvm/top/operation.h"
-#include "tvm/top/schedule_pass.h"
-#include "tvm/target/generic_func.h"
+#include <tvm/te/operation.h>
+#include <tvm/te/schedule_pass.h>
+#include <tvm/target/generic_func.h>
+#include <topi/tags.h>
+#include <topi/detail/fuse.h>
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace cuda {
out_ops.push_back(t->op);
}
auto s = create_schedule(out_ops);
- tvm::top::AutoInlineInjective(s);
+ tvm::te::AutoInlineInjective(s);
for (auto out : outs) {
schedule_injective_from_existing(s, out);
}
#ifndef TOPI_CUDA_NORMALIZATION_H_
#define TOPI_CUDA_NORMALIZATION_H_
-#include "tvm/top/operation.h"
-#include "tvm/top/schedule_pass.h"
-#include "tvm/target/generic_func.h"
-#include "topi/tags.h"
+#include <tvm/te/operation.h>
+#include <tvm/te/schedule_pass.h>
+#include <tvm/target/generic_func.h>
+#include <topi/tags.h>
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace cuda {
/*!
* \brief Create a CUDA schedule for LRN
}
Schedule s = create_schedule(out_ops);
int num_thread = 64;
- IterVar block_x = tvm::top::thread_axis(Range(), "blockIdx.x");
- IterVar thread_x = tvm::top::thread_axis(Range(0, num_thread), "threadIdx.x");
+ IterVar block_x = tvm::te::thread_axis(Range(), "blockIdx.x");
+ IterVar thread_x = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.x");
Tensor lrn = outs[0];
Tensor sqr_sum_up = lrn->op->InputTensors()[1];
Tensor sqr_sum = sqr_sum_up->op->InputTensors()[0];
traverse(outs[0]->op);
int num_thread = 64;
Tensor l2_normalize = outs[0];
- IterVar block_x = tvm::top::thread_axis(Range(), "blockIdx.x");
- IterVar thread_x = tvm::top::thread_axis(Range(0, num_thread), "threadIdx.x");
+ IterVar block_x = tvm::te::thread_axis(Range(), "blockIdx.x");
+ IterVar thread_x = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.x");
IterVar xto, xti;
s[l2_normalize].split_by_nparts(l2_normalize->op.as<ComputeOpNode>()->axis[1],
num_thread, &xto, &xti);
#ifndef TOPI_CUDA_POOLING_H_
#define TOPI_CUDA_POOLING_H_
-#include "topi/tags.h"
-#include "topi/detail/fuse.h"
-#include "topi/detail/array_utils.h"
-#include "tvm/top/operation.h"
-#include "tvm/top/schedule_pass.h"
-#include "tvm/target/generic_func.h"
+#include <tvm/te/operation.h>
+#include <tvm/te/schedule_pass.h>
+#include <tvm/target/generic_func.h>
+#include <topi/tags.h>
+#include <topi/detail/fuse.h>
+#include <topi/detail/array_utils.h>
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace cuda {
auto fused = detail::Fuse(s[out], s[out]->op.as<ComputeOpNode>()->axis);
IterVar bx, tx;
s[out].split(fused, num_thread, &bx, &tx);
- s[out].bind(bx, tvm::top::thread_axis(Range(), "blockIdx.x"));
- s[out].bind(tx, tvm::top::thread_axis(Range(), "threadIdx.x"));
+ s[out].bind(bx, tvm::te::thread_axis(Range(), "blockIdx.x"));
+ s[out].bind(tx, tvm::te::thread_axis(Range(), "threadIdx.x"));
if (detail::contains(s->outputs, pool->op)) {
s[OL].compute_at(s[out], tx);
} else {
auto _schedule = [&](const Tensor& pool) {
auto num_thread = 8;
- auto block_x = tvm::top::thread_axis(Range(), "blockIdx.x");
- auto block_y = tvm::top::thread_axis(Range(), "blockIdx.y");
- auto thread_x = tvm::top::thread_axis(Range(0, num_thread), "threadIdx.x");
- auto thread_y = tvm::top::thread_axis(Range(0, num_thread), "threadIdx.y");
+ auto block_x = tvm::te::thread_axis(Range(), "blockIdx.x");
+ auto block_y = tvm::te::thread_axis(Range(), "blockIdx.y");
+ auto thread_x = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.x");
+ auto thread_y = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.y");
Tensor out;
Tensor OL;
if (detail::contains(s->outputs, pool->op)) {
#ifndef TOPI_CUDA_REDUCTION_H_
#define TOPI_CUDA_REDUCTION_H_
-#include "topi/tags.h"
-#include "topi/detail/fuse.h"
-#include "tvm/top/operation.h"
-#include "tvm/top/schedule_pass.h"
-#include "tvm/target/generic_func.h"
+#include <tvm/te/operation.h>
+#include <tvm/te/schedule_pass.h>
+#include <tvm/target/generic_func.h>
+#include <topi/tags.h>
+#include <topi/detail/fuse.h>
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace cuda {
/*!
// Don't know why.
num_thread = 16;
}
- block_x = tvm::top::thread_axis(Range(), "blockIdx.x");
- thread_x = tvm::top::thread_axis(Range(0, num_thread), "threadIdx.x");
- thread_y = tvm::top::thread_axis(Range(0, num_thread), "threadIdx.y");
+ block_x = tvm::te::thread_axis(Range(), "blockIdx.x");
+ thread_x = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.x");
+ thread_y = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.y");
} else {
all_reduce = true;
num_thread = target->max_num_threads;
- thread_x = tvm::top::thread_axis(Range(0, num_thread), "threadIdx.x");
+ thread_x = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.x");
}
auto fused_reduce = detail::Fuse(out_stage, out_stage->op.as<ComputeOpNode>()->reduce_axis);
#ifndef TOPI_CUDA_SOFTMAX_H_
#define TOPI_CUDA_SOFTMAX_H_
-#include "topi/tags.h"
-#include "topi/detail/fuse.h"
-#include "tvm/top/operation.h"
-#include "tvm/top/schedule_pass.h"
-#include "tvm/target/generic_func.h"
+#include <tvm/te/operation.h>
+#include <tvm/te/schedule_pass.h>
+#include <tvm/target/generic_func.h>
+#include <topi/tags.h>
+#include <topi/detail/fuse.h>
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace cuda {
auto s = create_schedule(out_ops);
auto softmax = outs[0];
- tvm::top::Tensor max_elem;
- tvm::top::Tensor expsum;
- tvm::top::Tensor exp;
+ tvm::te::Tensor max_elem;
+ tvm::te::Tensor expsum;
+ tvm::te::Tensor exp;
bool has_exp = false;
auto tag = softmax->op.as<ComputeOpNode>()->tag;
}
int num_thread = 64;
- auto block_x = tvm::top::thread_axis(Range(), "blockIdx.x");
- auto thread_x = tvm::top::thread_axis(Range(0, num_thread), "threadIdx.x");
+ auto block_x = tvm::te::thread_axis(Range(), "blockIdx.x");
+ auto thread_x = tvm::te::thread_axis(Range(0, num_thread), "threadIdx.x");
if (has_exp) {
s[exp].bind(exp->op.as<ComputeOpNode>()->axis[0], block_x);
#ifndef TOPI_DETAIL_ARRAY_UTILS_H_
#define TOPI_DETAIL_ARRAY_UTILS_H_
-#include "tvm/top/operation.h"
+#include <tvm/te/operation.h>
namespace topi {
namespace detail {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Search an array for a specific item
#ifndef TOPI_DETAIL_BROADCAST_H_
#define TOPI_DETAIL_BROADCAST_H_
+#include <tvm/te/operation.h>
+#include <topi/detail/constant_utils.h>
+
#include <algorithm>
#include <deque>
#include <string>
-#include "tvm/tir/ir_pass.h"
-#include "tvm/top/operation.h"
-#include "tvm/tir/op.h"
-#include "topi/detail/constant_utils.h"
-
namespace topi {
namespace detail {
inline tvm::Array<tvm::PrimExpr> InputIndexFromBroadcast(
const tvm::Array<tvm::tir::Var>& ovars,
- const tvm::top::Tensor& T,
+ const tvm::te::Tensor& T,
const std::deque<tvm::tir::Var>& my_vars,
const std::deque<tvm::tir::Var>& all_vars) {
tvm::Array<tvm::PrimExpr> ivars;
}
template <typename FBinaryExpr>
-inline tvm::top::Tensor WithBroadcast(FBinaryExpr op,
- const tvm::top::Tensor& A,
- const tvm::top::Tensor& B,
+inline tvm::te::Tensor WithBroadcast(FBinaryExpr op,
+ const tvm::te::Tensor& A,
+ const tvm::te::Tensor& B,
const std::string& name = "tensor",
const std::string& tag = "") {
auto bh = BroadcastShape(A->shape, B->shape);
return op(A(InputIndexFromBroadcast(ovars, A, bh.vars1, bh.all_vars)),
B(InputIndexFromBroadcast(ovars, B, bh.vars2, bh.all_vars)));
};
- return tvm::top::compute(
+ return tvm::te::compute(
tvm::Array<tvm::PrimExpr>(bh.common_shape.begin(), bh.common_shape.end()),
l,
name,
namespace topi {
namespace detail {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Test whether the given Expr is a constant integer
#ifndef TOPI_DETAIL_EXTERN_H_
#define TOPI_DETAIL_EXTERN_H_
-#include <tvm/top/operation.h>
+#include <tvm/te/operation.h>
#include <vector>
#include <string>
namespace topi {
namespace detail {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Construct a buffer to pass to an external function
#ifndef TOPI_DETAIL_FUSE_H_
#define TOPI_DETAIL_FUSE_H_
-#include "tvm/top/operation.h"
+#include <tvm/te/operation.h>
namespace topi {
namespace detail {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Fuse all of the given args
namespace topi {
namespace detail {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Get padding size for each side given padding height and width
#ifndef TOPI_DETAIL_RAVEL_UNRAVEL_H_
#define TOPI_DETAIL_RAVEL_UNRAVEL_H_
-#include <vector>
+#include <tvm/te/operation.h>
-#include "tvm/top/operation.h"
-#include "tvm/tir/op.h"
+#include <vector>
namespace topi {
namespace detail {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Flatten the indices to 1D
namespace topi {
namespace detail {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Check whether input shape has dimension of size 0;
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
// Unary intrinsic operators
#define TOPI_DECLARE_UNARY_OP(OpName) \
#ifndef TOPI_GENERIC_DEFAULT_H_
#define TOPI_GENERIC_DEFAULT_H_
-#include "topi/tags.h"
-#include "topi/detail/fuse.h"
-#include "tvm/top/operation.h"
-#include "tvm/top/schedule_pass.h"
-#include "tvm/target/generic_func.h"
+#include <tvm/te/operation.h>
+#include <tvm/te/schedule_pass.h>
+#include <tvm/target/generic_func.h>
+#include <topi/tags.h>
+#include <topi/detail/fuse.h>
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace generic {
/*!
}
auto s = create_schedule(out_ops);
auto x = outs[0];
- tvm::top::AutoInlineInjective(s);
+ tvm::te::AutoInlineInjective(s);
auto axis = s[x]->op.as<ComputeOpNode>()->axis;
if (axis.size() > 0) {
detail::Fuse(s[x], axis);
#ifndef TOPI_GENERIC_EXTERN_H_
#define TOPI_GENERIC_EXTERN_H_
-#include "topi/tags.h"
-#include "topi/detail/fuse.h"
-#include "tvm/top/operation.h"
-#include "tvm/top/schedule_pass.h"
-#include "tvm/target/generic_func.h"
-#include "injective.h"
+#include <tvm/te/operation.h>
+#include <tvm/te/schedule_pass.h>
+#include <tvm/target/generic_func.h>
+#include <topi/tags.h>
+#include <topi/detail/fuse.h>
+#include <topi/generic/injective.h>
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace generic {
/*!
}
auto s = create_schedule(out_ops);
- tvm::top::AutoInlineInjective(s);
+ tvm::te::AutoInlineInjective(s);
for (auto out : outs) {
if (out->op->IsInstance<ExternOpNode>()) {
continue;
#ifndef TOPI_GENERIC_INJECTIVE_H_
#define TOPI_GENERIC_INJECTIVE_H_
-#include "topi/tags.h"
-#include "topi/detail/fuse.h"
-#include "tvm/top/operation.h"
-#include "tvm/top/schedule_pass.h"
-#include "tvm/target/generic_func.h"
+#include <tvm/te/operation.h>
+#include <tvm/te/schedule_pass.h>
+#include <tvm/target/generic_func.h>
+#include <topi/tags.h>
+#include <topi/detail/fuse.h>
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace generic {
out_ops.push_back(t->op);
}
auto s = create_schedule(out_ops);
- tvm::top::AutoInlineInjective(s);
+ tvm::te::AutoInlineInjective(s);
auto x = outs[0];
schedule_injective_from_existing(s, x);
#ifndef TOPI_IMAGE_RESIZE_H_
#define TOPI_IMAGE_RESIZE_H_
+#include <tvm/te/operation.h>
+#include <topi/tags.h>
+#include <topi/elemwise.h>
+#include <topi/detail/ravel_unravel.h>
+#include <topi/detail/constant_utils.h>
+
#include <string>
#include <vector>
#include <iterator>
#include <algorithm>
-#include "topi/tags.h"
-#include "topi/elemwise.h"
-#include "topi/detail/ravel_unravel.h"
-#include "topi/detail/constant_utils.h"
-#include "tvm/top/operation.h"
-#include "tvm/tir/op.h"
-
namespace topi {
namespace image {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Sample a point in a tensor using bilinear interpolation.
#ifndef TOPI_NN_H_
#define TOPI_NN_H_
+#include <topi/tags.h>
+#include <topi/detail/constant_utils.h>
+#include <tvm/tir/expr.h>
+#include <tvm/tir/ir_pass.h>
+#include <tvm/tir/op.h>
+#include <tvm/te/operation.h>
+
#include <algorithm>
#include <string>
-#include "topi/tags.h"
-#include "topi/detail/constant_utils.h"
-#include "tvm/tir/expr.h"
-#include "tvm/tir/ir_pass.h"
-#include "tvm/top/operation.h"
-#include "tvm/tir/op.h"
-
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace detail {
template <typename T>
* \return A Tensor whose op member is the relu operation
*/
template <typename T>
-inline tvm::top::Tensor relu(const tvm::top::Tensor& t,
+inline tvm::te::Tensor relu(const tvm::te::Tensor& t,
T threshold = static_cast<T>(0),
std::string name = "T_relu",
std::string tag = kElementWise) {
- return tvm::top::compute(
+ return tvm::te::compute(
t->shape,
[&](const tvm::Array<tvm::tir::Var>& i) {
auto threshold_const = tvm::tir::make_const(t->dtype, threshold);
*
* \return A Tensor whose op member is the leaky relu operation
*/
-inline tvm::top::Tensor leaky_relu(const tvm::top::Tensor& t,
+inline tvm::te::Tensor leaky_relu(const tvm::te::Tensor& t,
double alpha = 0.1,
std::string name = "T_leaky_relu",
std::string tag = kElementWise) {
- return tvm::top::compute(
+ return tvm::te::compute(
t->shape,
[&](const tvm::Array<tvm::tir::Var>& i) {
auto value = t(i);
*
* \return A Tensor whose op member is the parametric relu operation
*/
-inline tvm::top::Tensor prelu(const tvm::top::Tensor &x,
- const tvm::top::Tensor &slope,
+inline tvm::te::Tensor prelu(const tvm::te::Tensor &x,
+ const tvm::te::Tensor &slope,
const int axis = 1,
std::string name = "T_prelu",
std::string tag = kBroadcast) {
topi::detail::GetConstInt(x->shape[axis]))
<< "Wrong slope shape received.";
- return tvm::top::compute(x->shape,
+ return tvm::te::compute(x->shape,
[&](const tvm::Array<tvm::tir::Var> &indices) {
auto xval = x(indices);
return tvm::tir::SelectNode::make(
*
*
*/
-inline tvm::top::Tensor pad(const tvm::top::Tensor& t,
+inline tvm::te::Tensor pad(const tvm::te::Tensor& t,
const tvm::Array<tvm::PrimExpr>& pad_before,
tvm::Array<tvm::PrimExpr> pad_after = tvm::Array<tvm::PrimExpr>(),
PrimExpr pad_value = PrimExpr(),
}
return t(indices);
};
- return tvm::top::compute(output_shape, l, name, tag);
+ return tvm::te::compute(output_shape, l, name, tag);
}
/*!
* \return A Tensor whose op member is the 2-D convolution operation (NCHW
* layout)
*/
-inline tvm::top::Tensor conv2d_nchw(const tvm::top::Tensor& I,
- const tvm::top::Tensor& W,
+inline tvm::te::Tensor conv2d_nchw(const tvm::te::Tensor& I,
+ const tvm::te::Tensor& W,
int pad_h = 0,
int pad_w = 0,
int stride_h = 1,
indexdiv(I->shape[2] - W->shape[2] + 2 * pad_h, stride_h) + 1, // H
indexdiv(I->shape[3] - W->shape[3] + 2 * pad_w, stride_w) + 1 // W
};
- auto i = tvm::top::reduce_axis(tvm::Range{0, I->shape[1]}, "i");
- auto kh = tvm::top::reduce_axis(tvm::Range{0, W->shape[2]}, "kh");
- auto kw = tvm::top::reduce_axis(tvm::Range{0, W->shape[3]}, "kw");
+ auto i = tvm::te::reduce_axis(tvm::Range{0, I->shape[1]}, "i");
+ auto kh = tvm::te::reduce_axis(tvm::Range{0, W->shape[2]}, "kh");
+ auto kw = tvm::te::reduce_axis(tvm::Range{0, W->shape[3]}, "kw");
auto T = (pad_h == 0 && pad_w == 0)
? I
: pad(I, {tvm::PrimExpr(0), tvm::PrimExpr(0), pad_h, pad_w});
T(b, i, stride_h * h + kh, stride_w * w + kw) * W(o, i, kh, kw),
{i, kh, kw});
};
- return tvm::top::compute(output_shape, l, name, tag);
+ return tvm::te::compute(output_shape, l, name, tag);
}
/*!
* \return A Tensor whose op member is the 2-D convolution operation
* (HWCN layout)
*/
-inline tvm::top::Tensor conv2d_hwcn(const tvm::top::Tensor& I,
- const tvm::top::Tensor& W,
+inline tvm::te::Tensor conv2d_hwcn(const tvm::te::Tensor& I,
+ const tvm::te::Tensor& W,
int pad_h = 0,
int pad_w = 0,
int stride_h = 1,
I->shape[2], // B
W->shape[3] // O
};
- auto i = tvm::top::reduce_axis(tvm::Range{0, I->shape[3]}, "i");
- auto kh = tvm::top::reduce_axis(tvm::Range{0, W->shape[0]}, "kh");
- auto kw = tvm::top::reduce_axis(tvm::Range{0, W->shape[1]}, "kw");
+ auto i = tvm::te::reduce_axis(tvm::Range{0, I->shape[3]}, "i");
+ auto kh = tvm::te::reduce_axis(tvm::Range{0, W->shape[0]}, "kh");
+ auto kw = tvm::te::reduce_axis(tvm::Range{0, W->shape[1]}, "kw");
auto T = (pad_h == 0 && pad_w == 0) ? I : pad(I, {pad_h, pad_w});
auto l = [&](tvm::tir::Var b, tvm::tir::Var o, tvm::tir::Var h, tvm::tir::Var w) {
return tvm::sum(
T(stride_h * h + kh, stride_w * w + kw, i, b) * W(kh, kw, i, o),
{i, kh, kw});
};
- return tvm::top::compute(output_shape, l, name, tag);
+ return tvm::te::compute(output_shape, l, name, tag);
}
* \return A Tensor whose op member is the 2-D depthwise convolution operation
* (NCHW layout)
*/
-inline tvm::top::Tensor depthwise_conv2d_nchw(const tvm::top::Tensor& I,
- const tvm::top::Tensor& W,
+inline tvm::te::Tensor depthwise_conv2d_nchw(const tvm::te::Tensor& I,
+ const tvm::te::Tensor& W,
int pad_h = 0,
int pad_w = 0,
int stride_h = 1,
indexdiv(I->shape[2] - W->shape[2] + 2 * pad_h, stride_h) + 1, // H
indexdiv(I->shape[3] - W->shape[3] + 2 * pad_w, stride_w) + 1 // W
};
- auto i = tvm::top::reduce_axis(tvm::Range{0, I->shape[1]}, "i");
- auto kh = tvm::top::reduce_axis(tvm::Range{0, W->shape[2]}, "kh");
- auto kw = tvm::top::reduce_axis(tvm::Range{0, W->shape[3]}, "kw");
+ auto i = tvm::te::reduce_axis(tvm::Range{0, I->shape[1]}, "i");
+ auto kh = tvm::te::reduce_axis(tvm::Range{0, W->shape[2]}, "kh");
+ auto kw = tvm::te::reduce_axis(tvm::Range{0, W->shape[3]}, "kw");
auto T = (pad_h == 0 && pad_w == 0)
? I
: pad(I, {tvm::PrimExpr(0), tvm::PrimExpr(0), pad_h, pad_w});
W(indexdiv(i, pCM), indexmod(o, pCM), kh, kw),
{i, kh, kw});
};
- return tvm::top::compute(output_shape, l, name, tag);
+ return tvm::te::compute(output_shape, l, name, tag);
}
-inline tvm::top::Tensor depthwise_conv2d_nhwc(const tvm::top::Tensor& I,
- const tvm::top::Tensor& W,
+inline tvm::te::Tensor depthwise_conv2d_nhwc(const tvm::te::Tensor& I,
+ const tvm::te::Tensor& W,
int pad_h = 0,
int pad_w = 0,
int stride_h = 1,
indexdiv(I->shape[2] - W->shape[2] + 2 * pad_w, stride_w) + 1, // W
W->shape[3], // O
};
- auto i = tvm::top::reduce_axis(tvm::Range{0, I->shape[3]}, "i");
- auto kh = tvm::top::reduce_axis(tvm::Range{0, W->shape[0]}, "kh");
- auto kw = tvm::top::reduce_axis(tvm::Range{0, W->shape[1]}, "kw");
+ auto i = tvm::te::reduce_axis(tvm::Range{0, I->shape[3]}, "i");
+ auto kh = tvm::te::reduce_axis(tvm::Range{0, W->shape[0]}, "kh");
+ auto kw = tvm::te::reduce_axis(tvm::Range{0, W->shape[1]}, "kw");
auto T = (pad_h == 0 && pad_w == 0)
? I
: pad(I, {tvm::PrimExpr(0), pad_h, pad_w, tvm::PrimExpr(0)});
W(kh, kw, indexdiv(i, pCM), indexmod(o, pCM)),
{kh, kw, i});
};
- return tvm::top::compute(output_shape, l, name, tag);
+ return tvm::te::compute(output_shape, l, name, tag);
}
/*!
* \return A Tensor whose op member is the 2-D groupconvolution operation
* (NCHW layout)
*/
-inline tvm::top::Tensor group_conv2d_ngchw(const tvm::top::Tensor& I,
- const tvm::top::Tensor& W,
+inline tvm::te::Tensor group_conv2d_ngchw(const tvm::te::Tensor& I,
+ const tvm::te::Tensor& W,
int pad_h = 0,
int pad_w = 0,
int stride_h = 1,
indexdiv(I->shape[3] - W->shape[3] + 2 * pad_h, stride_h) + 1, // H
indexdiv(I->shape[4] - W->shape[4] + 2 * pad_w, stride_w) + 1 // W
};
- auto i = tvm::top::reduce_axis(tvm::Range{0, I->shape[2]}, "i");
- auto kh = tvm::top::reduce_axis(tvm::Range{0, W->shape[3]}, "kh");
- auto kw = tvm::top::reduce_axis(tvm::Range{0, W->shape[4]}, "kw");
+ auto i = tvm::te::reduce_axis(tvm::Range{0, I->shape[2]}, "i");
+ auto kh = tvm::te::reduce_axis(tvm::Range{0, W->shape[3]}, "kh");
+ auto kw = tvm::te::reduce_axis(tvm::Range{0, W->shape[4]}, "kw");
auto T = (pad_h == 0 && pad_w == 0)
? I
I(b, g, i, stride_h * h + kh, stride_w * w + kw) * W(g, i, o, kh, kw),
{i, kh, kw});
};
- return tvm::top::compute(output_shape, l, name, tag);
+ return tvm::te::compute(output_shape, l, name, tag);
}
} // namespace topi
#ifndef TOPI_NN_BATCH_MATMUL_H_
#define TOPI_NN_BATCH_MATMUL_H_
-#include <string>
+#include <tvm/te/operation.h>
+#include <topi/tags.h>
-#include "topi/tags.h"
-#include "tvm/top/operation.h"
+#include <string>
namespace topi {
namespace nn {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Creates an operation that calculates matrix multiplication in batch.
*
* \return Tensor with shape [batch, M, N]
*/
-inline tvm::top::Tensor batch_matmul(const tvm::top::Tensor& x,
- const tvm::top::Tensor& y) {
+inline tvm::te::Tensor batch_matmul(const tvm::te::Tensor& x,
+ const tvm::te::Tensor& y) {
CHECK_EQ(x->shape.size(), 3) << "batch_matmul requires 3-D data";
CHECK_EQ(y->shape.size(), 3) << "batch_matmul requires 3-D data";
auto K = x->shape[2];
auto N = y->shape[1];
- auto k = tvm::top::reduce_axis(Range(0, K), "k");
- auto result = tvm::top::compute(
+ auto k = tvm::te::reduce_axis(Range(0, K), "k");
+ auto result = tvm::te::compute(
{ batch, M, N },
[&](Var b, Var i, Var j) {
return tvm::sum(x(b, i, k) * y(b, j, k), { k });
#ifndef TOPI_NN_BIAS_ADD_H_
#define TOPI_NN_BIAS_ADD_H_
-#include <string>
+#include <tvm/te/operation.h>
+#include <topi/tags.h>
+#include <topi/broadcast.h>
+#include <topi/transform.h>
-#include "topi/tags.h"
-#include "topi/broadcast.h"
-#include "topi/transform.h"
-#include "tvm/top/operation.h"
-#include "tvm/tir/op.h"
+#include <string>
namespace topi {
namespace nn {
* \param axis The axis to add the bias to.
* \return Tensor with shape [batch, in_dim]
*/
-inline tvm::top::Tensor bias_add(const tvm::top::Tensor& data,
- const tvm::top::Tensor& bias,
+inline tvm::te::Tensor bias_add(const tvm::te::Tensor& data,
+ const tvm::te::Tensor& bias,
int axis) {
int data_ndim = data->shape.size();
if (axis < 0) {
#ifndef TOPI_NN_BNN_H_
#define TOPI_NN_BNN_H_
-#include <string>
+#include <tvm/te/operation.h>
+#include <tvm/tir/ir_pass.h>
+#include <topi/tags.h>
+#include <topi/detail/constant_utils.h>
-#include "tvm/top/operation.h"
-#include "tvm/tir/ir_pass.h"
-#include "topi/tags.h"
-#include "topi/detail/constant_utils.h"
+#include <string>
namespace topi {
namespace nn {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Binarization and bit-packing along a certain axis.
*
* \return Output tensor with dtype uint32
*/
-inline tvm::top::Tensor binarize_pack(const tvm::top::Tensor& data,
+inline tvm::te::Tensor binarize_pack(const tvm::te::Tensor& data,
int axis,
std::string name = "PackedInput",
std::string tag = "binarize_pack") {
ishape[i]);
}
- return tvm::top::compute(
+ return tvm::te::compute(
oshape,
[&](const Array<Var>& indices) {
Array<PrimExpr> start_idx;
*
* \return Tensor with shape [batch, out_dim], dtype is float32
*/
-inline tvm::top::Tensor binary_dense(const tvm::top::Tensor& data,
- const tvm::top::Tensor& weight) {
+inline tvm::te::Tensor binary_dense(const tvm::te::Tensor& data,
+ const tvm::te::Tensor& weight) {
CHECK_EQ(data->shape.size(), 2) << "binary_dense requires 2-D data";
CHECK_EQ(weight->shape.size(), 2) << "binary_dense requires 2-D weight";
CHECK_EQ(data->dtype, DataType::UInt(32)) << "binary_dense requires uint32 data";
auto in_dim = data->shape[1];
auto out_dim = weight->shape[0];
- auto k = tvm::top::reduce_axis(Range(0, in_dim), "k");
- auto matmul = tvm::top::compute(
+ auto k = tvm::te::reduce_axis(Range(0, in_dim), "k");
+ auto matmul = tvm::te::compute(
{ batch, out_dim },
[&](Var i, Var j) {
return tvm::sum(popcount(data(i, k) ^ weight(j, k)), { k });
}, "tensor", "binary_dense");
- return tvm::top::compute(
+ return tvm::te::compute(
{ batch, out_dim },
[&](Var i, Var j) {
return 32 * in_dim - 2.0f * matmul(i, j);
#ifndef TOPI_NN_DENSE_H_
#define TOPI_NN_DENSE_H_
-#include <string>
+#include <tvm/te/operation.h>
+#include <topi/tags.h>
-#include "topi/tags.h"
-#include "tvm/top/operation.h"
+#include <string>
namespace topi {
namespace nn {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Creates an operation that calculates data * weight^T + bias
*
* \return Tensor with shape [batch, out_dim]
*/
-inline tvm::top::Tensor dense(const tvm::top::Tensor& data,
- const tvm::top::Tensor& weight,
- const tvm::top::Tensor& bias,
+inline tvm::te::Tensor dense(const tvm::te::Tensor& data,
+ const tvm::te::Tensor& weight,
+ const tvm::te::Tensor& bias,
const DataType& out_dtype) {
CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data";
CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight";
auto in_dim = data->shape[1];
auto out_dim = weight->shape[0];
- auto k = tvm::top::reduce_axis(Range(0, in_dim), "k");
- auto matmul = tvm::top::compute(
+ auto k = tvm::te::reduce_axis(Range(0, in_dim), "k");
+ auto matmul = tvm::te::compute(
{ batch, out_dim },
[&](Var i, Var j) {
return tvm::sum(tvm::cast(out_dtype, data(i, k)) *
}, "tensor", "dense");
if (bias.defined()) {
- matmul = tvm::top::compute(
+ matmul = tvm::te::compute(
{ batch, out_dim },
[&](Var i, Var j) {
return matmul(i, j) + tvm::cast(out_dtype, bias(j));
#ifndef TOPI_NN_DILATE_H_
#define TOPI_NN_DILATE_H_
-#include <string>
+#include <tvm/te/operation.h>
+#include <tvm/tir/ir_pass.h>
+#include <topi/tags.h>
-#include "tvm/top/operation.h"
-#include "tvm/tir/ir_pass.h"
-#include "topi/tags.h"
+#include <string>
namespace topi {
namespace nn {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Create a new expression of the logical and of all
(x->shape[i] - 1) * cast(DataType::Int(32), strides[i] + 1)));
}
- return tvm::top::compute(
+ return tvm::te::compute(
out_shape,
[&](const Array<Var>& indices) {
Array<PrimExpr> not_zero;
#ifndef TOPI_NN_FLATTEN_H_
#define TOPI_NN_FLATTEN_H_
+#include <tvm/te/operation.h>
+#include <topi/tags.h>
+#include <topi/detail/constant_utils.h>
+
#include <string>
#include <vector>
-#include "topi/tags.h"
-#include "topi/detail/constant_utils.h"
-#include "tvm/top/operation.h"
-#include "tvm/tir/op.h"
-
-
namespace topi {
namespace nn {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Flattens the input tensor into a 2-D tensor by collapsing higher dimensions.
}
std::reverse(extra_shape.begin(), extra_shape.end());
- return tvm::top::compute(
+ return tvm::te::compute(
oshape, [&](Var i, Var j) {
PrimExpr idx = j;
std::vector<PrimExpr> index;
#ifndef TOPI_NN_L2_NORMALIZE_H_
#define TOPI_NN_L2_NORMALIZE_H_
+#include <tvm/te/operation.h>
+#include <topi/tags.h>
+
#include <string>
#include <algorithm>
-#include "topi/tags.h"
-#include "tvm/top/operation.h"
+
namespace topi {
namespace nn {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief L2 normalization inference operator
Tensor sum_value = topi::sum(dot_value, axis, true);
Tensor expand_sum = topi::broadcast_to(sum_value, input_shape);
return topi::divide(data,
- topi::sqrt(tvm::top::compute(expand_sum->shape,
+ topi::sqrt(tvm::te::compute(expand_sum->shape,
[&](const Array<Var>& i){
return (max(expand_sum(i), eps));
}, name, tag)));
#ifndef TOPI_NN_LOCAL_RESPONSE_NORM_H_
#define TOPI_NN_LOCAL_RESPONSE_NORM_H_
-#include <string>
+#include <tvm/te/operation.h>
+#include <topi/tags.h>
-#include "topi/tags.h"
-#include "tvm/top/operation.h"
+#include <string>
namespace topi {
namespace nn {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Local response normalization inference operator
pad_before.Set(axis, static_cast<PrimExpr>(size/2));
pad_after.Set(axis, static_cast<PrimExpr>(size/2));
auto pad_data = pad(data, pad_before, pad_after, 0, "pad_data");
- auto rxs = tvm::top::reduce_axis(Range(0, size), "rxs");
+ auto rxs = tvm::te::reduce_axis(Range(0, size), "rxs");
Tensor sqr_sum;
if (axis == 1) {
- sqr_sum = tvm::top::compute(input_shape,
+ sqr_sum = tvm::te::compute(input_shape,
[&](Var i, Var l, Var j, Var k) {
return tvm::sum(pad_data(i, l + rxs, j, k) *
pad_data(i, l + rxs, j, k),
{rxs});
});
} else if (axis == 3) {
- sqr_sum = tvm::top::compute(input_shape,
+ sqr_sum = tvm::te::compute(input_shape,
[&](Var i, Var l, Var j, Var k) {
return tvm::sum(pad_data(i, l, j, k + rxs) *
pad_data(i, l, j, k + rxs),
{rxs});
});
}
- auto sqrt_sum_up = tvm::top::compute(
+ auto sqrt_sum_up = tvm::te::compute(
input_shape,
[&](Var i, Var j, Var k, Var l) {
return tvm::pow(bias +
#ifndef TOPI_NN_MAPPING_H_
#define TOPI_NN_MAPPING_H_
-#include <string>
+#include <tvm/te/operation.h>
+#include <topi/tags.h>
-#include "topi/tags.h"
-#include "tvm/top/operation.h"
+#include <string>
namespace topi {
namespace nn {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Scale and shift with NCHW order
const Tensor& shift,
std::string name = "ScaleShift",
std::string tag = kBroadcast) {
- return tvm::top::compute(
+ return tvm::te::compute(
x->shape,
[&](Var b, Var c, Var h, Var w) {
return x(b, c, h, w) * scale(c) + shift(w);
const Tensor& shift,
std::string name = "ScaleShift",
std::string tag = kBroadcast) {
- return tvm::top::compute(
+ return tvm::te::compute(
x->shape,
[&](Var b, Var h, Var w, Var c) {
return x(b, h, w, c) * scale(c) + shift(w);
#ifndef TOPI_NN_POOLING_H_
#define TOPI_NN_POOLING_H_
+#include <topi/detail/pad_utils.h>
+#include <topi/nn.h>
+#include <topi/reduction.h>
+#include <topi/tags.h>
+#include <tvm/tir/ir_pass.h>
+
#include <algorithm>
#include <string>
#include <vector>
-#include "topi/detail/pad_utils.h"
-#include "topi/nn.h"
-#include "topi/reduction.h"
-#include "topi/tags.h"
-#include "tvm/tir/ir_pass.h"
-
namespace topi {
namespace nn {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*! \brief Pooling type */
enum PoolType : int {
auto out_width = tvm::tir::Simplify(
indexdiv(width - kernel_width + pad_left + pad_right, stride_width) + 1);
- auto dheight = tvm::top::reduce_axis(Range(0, kernel_height));
- auto dwidth = tvm::top::reduce_axis(Range(0, kernel_width));
+ auto dheight = tvm::te::reduce_axis(Range(0, kernel_height));
+ auto dwidth = tvm::te::reduce_axis(Range(0, kernel_width));
Array<PrimExpr> out_shape = x->shape;
out_shape.Set(height_axis, out_height);
if (pool_type == kMaxPool) {
auto temp = do_pad ? pad(
x, pad_before, pad_after, tvm::min_value(x->dtype), "pad_temp") : x;
- return tvm::top::compute(out_shape, [&](const Array<Var>& output) {
+ return tvm::te::compute(out_shape, [&](const Array<Var>& output) {
Array<PrimExpr> indices;
for (const Var& var : output) indices.push_back(var);
indices.Set(height_axis, output[height_axis] * stride_height + dheight);
auto temp = do_pad ? pad(x, pad_before, pad_after, 0, "pad_temp") : x;
// TVM compute for summing the pooling window.
- auto pool_sum = tvm::top::compute(out_shape,
+ auto pool_sum = tvm::te::compute(out_shape,
[&](const Array<Var>& output) {
Array<PrimExpr> indices;
for (const Var& var : output) indices.push_back(var);
}, "tensor", "pool_sum");
// TVM compute for dividing the reduced window sum by kernel size.
- return tvm::top::compute(out_shape,
+ return tvm::te::compute(out_shape,
[&](const Array<Var>& output) {
Array<PrimExpr> indices;
for (const Var& var : output) indices.push_back(var);
auto out_width =
tvm::tir::Simplify((width - kernel_width + pad_left + pad_right) / stride_width + 1);
- auto dheight = tvm::top::reduce_axis(Range(0, kernel_height));
- auto dwidth = tvm::top::reduce_axis(Range(0, kernel_width));
+ auto dheight = tvm::te::reduce_axis(Range(0, kernel_height));
+ auto dwidth = tvm::te::reduce_axis(Range(0, kernel_width));
Array<PrimExpr> out_shape = x->shape;
out_shape.Set(height_axis, out_height);
ravel_shape.Set(height_axis, ravel_shape[height_axis] + pad_top + pad_bottom);
ravel_shape.Set(width_axis, ravel_shape[width_axis] + pad_left + pad_right);
- auto windowh = tvm::top::reduce_axis(
+ auto windowh = tvm::te::reduce_axis(
Range(0, (kernel_height + stride_height - 1) / stride_height));
- auto windoww = tvm::top::reduce_axis(
+ auto windoww = tvm::te::reduce_axis(
Range(0, (kernel_width + stride_width - 1) / stride_width));
auto argmax = MakeArgmaxReducer();
x, pad_before, pad_after, tvm::min_value(x->dtype), "pad_temp") : x;
auto mp_argmax =
- tvm::top::compute(
+ tvm::te::compute(
out_shape,
[&](const Array<Var>& inds) {
Array<PrimExpr> window_inds{inds.begin(), inds.end()};
auto mp_inds = mp_argmax[0];
- return tvm::top::compute(
+ return tvm::te::compute(
x->shape,
[&](const Array<Var>& inds) {
Array<PrimExpr> pad_inds {inds.begin(), inds.end()};
},
"T_pool_grad", "pool_grad_max");
} else if (pool_type == kAvgPool) {
- auto windowh = tvm::top::reduce_axis(
+ auto windowh = tvm::te::reduce_axis(
Range(0, (kernel_height + stride_height - 1) / stride_height));
- auto windoww = tvm::top::reduce_axis(
+ auto windoww = tvm::te::reduce_axis(
Range(0, (kernel_width + stride_width - 1) / stride_width));
- return tvm::top::compute(
+ return tvm::te::compute(
x->shape,
[&](const Array<Var>& inds) {
PrimExpr pad_h_idx = inds[height_axis] + pad_top;
out_shape.Set(width_axis, out_width);
if (pool_type == kMaxPool) {
- return tvm::top::compute(out_shape, [&](const Array<Var>& output) {
+ return tvm::te::compute(out_shape, [&](const Array<Var>& output) {
Array<PrimExpr> indices;
for (const Var& var : output) indices.push_back(var);
auto i_start_h = start_index(output[height_axis], out_height, height);
auto i_end_h = end_index(output[height_axis], out_height, height);
auto i_start_w = start_index(output[width_axis], out_width, width);
auto i_end_w = end_index(output[width_axis], out_width, width);
- auto dheight = tvm::top::reduce_axis(Range(0, i_end_h - i_start_h), "rv1");
- auto dwidth = tvm::top::reduce_axis(Range(0, i_end_w - i_start_w), "rv2");
+ auto dheight = tvm::te::reduce_axis(Range(0, i_end_h - i_start_h), "rv1");
+ auto dwidth = tvm::te::reduce_axis(Range(0, i_end_w - i_start_w), "rv2");
indices.Set(height_axis, i_start_h + dheight);
indices.Set(width_axis, i_start_w + dwidth);
return tvm::max(x(indices), { dheight, dwidth }); // NOLINT(*)
}, "tensor", "adaptive_pool_max");
} else if (pool_type == kAvgPool) {
- auto pool_sum = tvm::top::compute(out_shape, [&](const Array<Var>& output) {
+ auto pool_sum = tvm::te::compute(out_shape, [&](const Array<Var>& output) {
Array<PrimExpr> indices;
for (const Var& var : output) indices.push_back(var);
auto i_start_h = start_index(output[height_axis], out_height, height);
auto i_end_w = end_index(output[width_axis], out_width, width);
auto divide_factor = tvm::cast(x->dtype, (i_end_h - i_start_h)
* (i_end_w - i_start_w));
- auto dheight = tvm::top::reduce_axis(Range(0, i_end_h - i_start_h), "rv1");
- auto dwidth = tvm::top::reduce_axis(Range(0, i_end_w - i_start_w), "rv2");
+ auto dheight = tvm::te::reduce_axis(Range(0, i_end_h - i_start_h), "rv1");
+ auto dwidth = tvm::te::reduce_axis(Range(0, i_end_w - i_start_w), "rv2");
indices.Set(height_axis, i_start_h + dheight);
indices.Set(width_axis, i_start_w + dwidth);
return tvm::sum(x(indices), { dheight, dwidth });
}, "tensor", "adaptive_pool_sum");
- return tvm::top::compute(out_shape, [&](const Array<Var>& output) {
+ return tvm::te::compute(out_shape, [&](const Array<Var>& output) {
Array<PrimExpr> indices;
for (const Var& var : output) indices.push_back(var);
auto i_start_h = start_index(output[height_axis], out_height, height);
pad_tail[i] += stride[i] - 1;
}
- daxis.push_back(tvm::top::reduce_axis(Range(0, kernel[i])));
+ daxis.push_back(tvm::te::reduce_axis(Range(0, kernel[i])));
pad_before.Set(ii, pad_head[i]);
pad_after.Set(ii, pad_tail[i]);
if (pool_type == kMaxPool) {
auto temp = do_pad ? pad(
x, pad_before, pad_after, tvm::min_value(x->dtype), "pad_temp") : x;
- return tvm::top::compute(out_shape, [&](const Array<Var>& output) {
+ return tvm::te::compute(out_shape, [&](const Array<Var>& output) {
Array<PrimExpr> indices;
for (const Var& var : output) indices.push_back(var);
auto temp = do_pad ? pad(x, pad_before, pad_after, 0, "pad_temp") : x;
// TVM compute for summing the pooling window.
- auto pool_sum = tvm::top::compute(out_shape,
+ auto pool_sum = tvm::te::compute(out_shape,
[&](const Array<Var>& output) {
Array<PrimExpr> indices;
for (const Var& var : output) indices.push_back(var);
}, "tensor", "pool_sum");
// TVM compute for dividing the reduced window sum by kernel size.
- return tvm::top::compute(out_shape,
+ return tvm::te::compute(out_shape,
[&](const Array<Var>& output) {
Array<PrimExpr> indices;
for (const Var& var : output) indices.push_back(var);
#ifndef TOPI_NN_SOFTMAX_H_
#define TOPI_NN_SOFTMAX_H_
+#include <tvm/te/operation.h>
+#include <topi/reduction.h>
+#include <topi/tags.h>
+
#include <algorithm>
#include <string>
-#include "topi/reduction.h"
-#include "topi/tags.h"
-#include "tvm/top/operation.h"
-#include "tvm/tir/op.h"
-
namespace topi {
namespace nn {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Softmax activation
}
CHECK_LT(axis, ndim) << "axis parameter should be less than input dim";
- auto k1 = tvm::top::reduce_axis(Range(0, input_shape[axis]), "k1");
- auto k2 = tvm::top::reduce_axis(Range(0, input_shape[axis]), "k2");
+ auto k1 = tvm::te::reduce_axis(Range(0, input_shape[axis]), "k1");
+ auto k2 = tvm::te::reduce_axis(Range(0, input_shape[axis]), "k2");
auto reduced_shape = MakeReduceTargetShape({axis}, x, false, false);
tvm::Map<std::string, ObjectRef> attrs;
return exp(indices) / expsum(non_reduce_indices);
};
- auto max_elem = tvm::top::compute(reduced_shape, _compute_max);
- auto exp = tvm::top::compute(input_shape, [&](const Array<Var> &indices) {
+ auto max_elem = tvm::te::compute(reduced_shape, _compute_max);
+ auto exp = tvm::te::compute(input_shape, [&](const Array<Var> &indices) {
return _compute_exp(max_elem, indices);
});
- auto expsum = tvm::top::compute(reduced_shape, [&](const Array<Var> &indices) {
+ auto expsum = tvm::te::compute(reduced_shape, [&](const Array<Var> &indices) {
return _compute_expsum(exp, indices);
});
- return tvm::top::compute(input_shape, [&](const Array<Var> &indices) {
+ return tvm::te::compute(input_shape, [&](const Array<Var> &indices) {
return _normalize(exp, expsum, indices);
}, name, tag, attrs);
}
PrimExpr m = x->shape[0];
PrimExpr n = x->shape[1];
- auto k = tvm::top::reduce_axis(Range(0, n), "k");
- auto max_elem = tvm::top::compute(
+ auto k = tvm::te::reduce_axis(Range(0, n), "k");
+ auto max_elem = tvm::te::compute(
{ m }, [&](Var i) {
return tvm::max(x(i, k), Array<IterVar>{ k }); });
- k = tvm::top::reduce_axis(Range(0, n), "k");
+ k = tvm::te::reduce_axis(Range(0, n), "k");
- auto expsum = tvm::top::compute(
+ auto expsum = tvm::te::compute(
{ m }, [&](Var i) {
return tvm::sum(tvm::exp(x(i, k) - max_elem(i)), { k }); });
- return tvm::top::compute(
+ return tvm::te::compute(
x->shape, [&](Var i, Var j) {
return x(i, j) - max_elem(i) - tvm::log(expsum(i));
}, name, tag);
namespace topi {
namespace nn {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
using namespace topi::image;
/*!
#ifndef TOPI_REDUCTION_H_
#define TOPI_REDUCTION_H_
+#include <tvm/te/operation.h>
+#include <topi/broadcast.h>
+#include <topi/elemwise.h>
+#include <topi/tags.h>
+#include <topi/transform.h>
+#include <topi/detail/ravel_unravel.h>
+#include <topi/detail/constant_utils.h>
+
#include <algorithm>
#include <string>
#include <vector>
#include <iterator>
-#include "topi/broadcast.h"
-#include "topi/elemwise.h"
-#include "topi/tags.h"
-#include "topi/transform.h"
-#include "topi/detail/ravel_unravel.h"
-#include "topi/detail/constant_utils.h"
-#include "tvm/top/operation.h"
-#include "tvm/tir/op.h"
-
-
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*! \brief The operation to use for CommReduce */
using FReduce = std::function<PrimExpr(PrimExpr source, const Array<IterVar>& axis)>;
for (auto i : real_axis) {
std::string name = "k" + std::to_string(i);
reduce_axes.push_back(
- tvm::top::reduce_axis(Range(0, data->shape[i]), name));
+ tvm::te::reduce_axis(Range(0, data->shape[i]), name));
}
return reduce_axes;
}
return func(data(eval_range), r_axes);
};
- return tvm::top::compute(target_shape, compute, data->op->name + "_red", kCommReduce);
+ return tvm::te::compute(target_shape, compute, data->op->name + "_red", kCommReduce);
}
/*!
return func({ idx, data(eval_range) }, reduce_axes, nullptr);
};
- auto temp_idx_val = tvm::top::compute(target_shape, compute,
+ auto temp_idx_val = tvm::te::compute(target_shape, compute,
data->op->name + "_red_temp", kCommReduceIdx);
auto temp_idx = temp_idx_val[0];
auto temp_val = temp_idx_val[1];
- return tvm::top::compute(
+ return tvm::te::compute(
target_shape,
[&temp_idx](const Array<Var>& indices) { return temp_idx(indices); },
data->op->name + "_red",
#ifndef TOPI_ROCM_DENSE_H_
#define TOPI_ROCM_DENSE_H_
-#include "tvm/top/operation.h"
-#include "tvm/target/generic_func.h"
-#include "topi/tags.h"
+#include <tvm/te/operation.h>
+#include <tvm/target/generic_func.h>
+#include <topi/tags.h>
#include "topi/detail/array_utils.h"
#include "topi/nn/dense.h"
#include "topi/contrib/rocblas.h"
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace rocm {
/*!
*
* \return Tensor with shape [batch, out_dim]
*/
-inline tvm::top::Tensor dense_rocm(const Target& target,
- const tvm::top::Tensor& data,
- const tvm::top::Tensor& weight,
- const tvm::top::Tensor& bias,
+inline tvm::te::Tensor dense_rocm(const Target& target,
+ const tvm::te::Tensor& data,
+ const tvm::te::Tensor& weight,
+ const tvm::te::Tensor& bias,
const DataType& out_dtype) {
CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data";
CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight";
CHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported.";
auto mm = topi::contrib::rocblas_matmul(data, weight, false, true);
if (bias.defined()) {
- mm = tvm::top::compute({ batch, out_dim },
+ mm = tvm::te::compute({ batch, out_dim },
[&](Var i, Var j) {
return mm(i, j) + bias(j);
}, "tensor", kBroadcast);
#ifndef TOPI_ROCM_INJECTIVE_H_
#define TOPI_ROCM_INJECTIVE_H_
-#include "topi/tags.h"
-#include "topi/detail/fuse.h"
-#include "tvm/top/operation.h"
-#include "tvm/target/generic_func.h"
+#include <topi/tags.h>
+#include <topi/detail/fuse.h>
+#include <tvm/te/operation.h>
+#include <tvm/target/generic_func.h>
#include "topi/cuda/injective.h"
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace rocm {
#ifndef TOPI_ROCM_NORMALIZATION_H_
#define TOPI_ROCM_NORMALIZATION_H_
-#include "tvm/top/operation.h"
-#include "tvm/target/generic_func.h"
-#include "topi/tags.h"
+#include <tvm/te/operation.h>
+#include <tvm/target/generic_func.h>
+#include <topi/tags.h>
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace rocm {
/*!
* \brief Create a rocm schedule for LRN
#ifndef TOPI_ROCM_POOLING_H_
#define TOPI_ROCM_POOLING_H_
-#include "topi/tags.h"
-#include "topi/detail/fuse.h"
-#include "topi/detail/array_utils.h"
-#include "tvm/top/operation.h"
-#include "tvm/target/generic_func.h"
-
-#include "topi/cuda/pooling.h"
+#include <tvm/te/operation.h>
+#include <tvm/target/generic_func.h>
+#include <topi/tags.h>
+#include <topi/detail/fuse.h>
+#include <topi/detail/array_utils.h>
+#include <topi/cuda/pooling.h>
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace rocm {
#ifndef TOPI_ROCM_REDUCTION_H_
#define TOPI_ROCM_REDUCTION_H_
-#include "topi/tags.h"
-#include "topi/detail/fuse.h"
-#include "tvm/top/operation.h"
-#include "tvm/target/generic_func.h"
+#include <topi/tags.h>
+#include <topi/detail/fuse.h>
+#include <tvm/te/operation.h>
+#include <tvm/target/generic_func.h>
#include "topi/cuda/reduction.h"
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace rocm {
/*!
#ifndef TOPI_ROCM_SOFTMAX_H_
#define TOPI_ROCM_SOFTMAX_H_
-#include "topi/tags.h"
-#include "topi/detail/fuse.h"
-#include "tvm/top/operation.h"
-#include "tvm/target/generic_func.h"
+#include <topi/tags.h>
+#include <topi/detail/fuse.h>
+#include <tvm/te/operation.h>
+#include <tvm/target/generic_func.h>
#include "topi/cuda/softmax.h"
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace rocm {
#ifndef TOPI_TRANSFORM_H_
#define TOPI_TRANSFORM_H_
+#include <tvm/tir/data_layout.h>
+#include <tvm/te/operation.h>
+#include <topi/tags.h>
+#include <topi/detail/ravel_unravel.h>
+#include <topi/detail/constant_utils.h>
+#include <topi/detail/tensor_utils.h>
+
#include <string>
#include <vector>
#include <iterator>
#include <limits>
#include <unordered_set>
-#include "topi/tags.h"
-#include "topi/detail/ravel_unravel.h"
-#include "topi/detail/constant_utils.h"
-#include "topi/detail/tensor_utils.h"
-#include "tvm/top/operation.h"
-#include "tvm/tir/op.h"
-#include "tvm/tir/data_layout.h"
-
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
using namespace topi::detail;
/*!
*
* \return A Tensor whose op member is the matmul operation
*/
-inline tvm::top::Tensor matmul(const tvm::top::Tensor& A,
- const tvm::top::Tensor& B,
+inline tvm::te::Tensor matmul(const tvm::te::Tensor& A,
+ const tvm::te::Tensor& B,
bool trans_a = false,
bool trans_b = false,
std::string name = "T_matmul",
std::string tag = kMatMul) {
tvm::Array<tvm::PrimExpr> output_shape{A->shape[trans_a ? 1 : 0],
B->shape[trans_b ? 0 : 1]};
- auto k = tvm::top::reduce_axis(tvm::Range{0, A->shape[trans_a ? 0 : 1]}, "k");
+ auto k = tvm::te::reduce_axis(tvm::Range{0, A->shape[trans_a ? 0 : 1]}, "k");
auto l = [&](tvm::tir::Var i, tvm::tir::Var j) {
return tvm::sum((trans_a ? A[k][i] : A[i][k]) * (trans_b ? B[j][k] : B[k][j]),
{k});
};
- return tvm::top::compute(output_shape, l, name, tag);
+ return tvm::te::compute(output_shape, l, name, tag);
}
/*!
* \return A Tensor computing the result
*/
inline Tensor tensordot(const Tensor& A,
- const tvm::top::Tensor& B,
+ const tvm::te::Tensor& B,
int axes = 2,
std::string name = "T_tensordot",
std::string tag = kMatMul) {
* \return A Tensor computing the result
*/
inline Tensor tensordot(const Tensor& A,
- const tvm::top::Tensor& B,
+ const tvm::te::Tensor& B,
Array<PrimExpr> A_axes,
Array<PrimExpr> B_axes,
std::string name = "T_tensordot",
#ifndef TOPI_VISION_REORG_H_
#define TOPI_VISION_REORG_H_
+#include <tvm/te/operation.h>
+#include <topi/detail/constant_utils.h>
+#include <topi/reduction.h>
+#include <topi/tags.h>
+#include <topi/transform.h>
+
#include <algorithm>
#include <string>
-#include "topi/detail/constant_utils.h"
-#include "topi/reduction.h"
-#include "topi/tags.h"
-#include "topi/transform.h"
-#include "tvm/top/operation.h"
-#include "tvm/tir/op.h"
-
namespace topi {
namespace vision {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
/*!
* \brief Reorg operation
int w_in = GetConstInt(input_shape[3]);
int out_c = c_in / (stride * stride);
- auto out = tvm::top::compute(input_shape,
+ auto out = tvm::te::compute(input_shape,
[&](Var b, Var k, Var j, Var i) {
return data(b * stride * stride,
indexmod(k, out_c) * stride * stride,
#ifndef TOPI_X86_BNN_H_
#define TOPI_X86_BNN_H_
-#include "topi/tags.h"
-#include "topi/detail/fuse.h"
-#include "tvm/top/operation.h"
-#include "tvm/target/generic_func.h"
+#include <topi/tags.h>
+#include <topi/detail/fuse.h>
+#include <tvm/te/operation.h>
+#include <tvm/target/generic_func.h>
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace x86 {
/*!
#ifndef TOPI_X86_DEFAULT_H_
#define TOPI_X86_DEFAULT_H_
-#include "topi/tags.h"
-#include "topi/detail/fuse.h"
-#include "tvm/top/operation.h"
-#include "tvm/target/generic_func.h"
+#include <topi/tags.h>
+#include <topi/detail/fuse.h>
+#include <tvm/te/operation.h>
+#include <tvm/target/generic_func.h>
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace x86 {
/*!
auto axis = s[x]->op.as<ComputeOpNode>()->axis;
if (auto_inline) {
- tvm::top::AutoInlineInjective(s);
+ tvm::te::AutoInlineInjective(s);
if (axis.size() > 0) {
detail::Fuse(s[x], axis);
}
#ifndef TOPI_X86_INJECTIVE_H_
#define TOPI_X86_INJECTIVE_H_
-#include "topi/tags.h"
-#include "topi/detail/fuse.h"
-#include "tvm/top/operation.h"
-#include "tvm/target/generic_func.h"
+#include <topi/tags.h>
+#include <topi/detail/fuse.h>
+#include <tvm/te/operation.h>
+#include <tvm/target/generic_func.h>
namespace topi {
using namespace tvm;
-using namespace tvm::top;
+using namespace tvm::te;
namespace x86 {
out_ops.push_back(t->op);
}
auto s = create_schedule(out_ops);
- tvm::top::AutoInlineInjective(s);
+ tvm::te::AutoInlineInjective(s);
auto x = outs[0];
schedule_injective_from_existing(s, x);
inline bool IsTensorType(TVMArgValue arg) {
return (arg.type_code() == kTVMObjectHandle &&
static_cast<Object*>(
- arg.value().v_handle)->IsInstance<tvm::top::TensorNode>());
+ arg.value().v_handle)->IsInstance<tvm::te::TensorNode>());
}
bool lhs_is_tensor = IsTensorType(args[0]); \
bool rhs_is_tensor = IsTensorType(args[1]); \
if (lhs_is_tensor && rhs_is_tensor) { \
- *rv = Op(args[0].operator tvm::top::Tensor(), \
- args[1].operator tvm::top::Tensor()); \
+ *rv = Op(args[0].operator tvm::te::Tensor(), \
+ args[1].operator tvm::te::Tensor()); \
} else if (!lhs_is_tensor && rhs_is_tensor) { \
- *rv = Op(args[0].operator tvm::PrimExpr(), \
- args[1].operator tvm::top::Tensor()); \
+ *rv = Op(args[0].operator tvm::PrimExpr(), \
+ args[1].operator tvm::te::Tensor()); \
} else if (lhs_is_tensor && !rhs_is_tensor) { \
- *rv = Op(args[0].operator tvm::top::Tensor(), \
- args[1].operator tvm::PrimExpr()); \
+ *rv = Op(args[0].operator tvm::te::Tensor(), \
+ args[1].operator tvm::PrimExpr()); \
} else if (!lhs_is_tensor && !rhs_is_tensor) { \
- *rv = Op(args[0].operator tvm::PrimExpr(), \
- args[1].operator tvm::PrimExpr()); \
+ *rv = Op(args[0].operator tvm::PrimExpr(), \
+ args[1].operator tvm::PrimExpr()); \
} \
}); \
/*! \brief Builder function for instantiating schedules. */
using FTVMScheduleBuilder = std::function<
- tvm::top::Schedule(const tvm::Target& target, const tvm::Array<tvm::top::Tensor>& outs)>;
+ tvm::te::Schedule(const tvm::Target& target, const tvm::Array<tvm::te::Tensor>& outs)>;
/*!
* \brief Helper function for registering generic functions matching the
/*! \brief Builder function for instantiating schedules from existing schedules. */
using FTVMScheduleFromExistingBuilder = std::function<
- tvm::top::Schedule(tvm::top::Schedule sch, const tvm::top::Tensor& out)>;
+ tvm::te::Schedule(tvm::te::Schedule sch, const tvm::te::Tensor& out)>;
/*!
* \brief Helper function for registering generic functions matching the
topi::cuda::schedule_injective_from_existing));
/*! \brief Builder function for instantiating dense ops. */
-using FTVMDenseOpBuilder = std::function<tvm::top::Tensor(const Target& target,
- const tvm::top::Tensor& data,
- const tvm::top::Tensor& weight,
- const tvm::top::Tensor& bias,
+using FTVMDenseOpBuilder = std::function<tvm::te::Tensor(const Target& target,
+ const tvm::te::Tensor& data,
+ const tvm::te::Tensor& weight,
+ const tvm::te::Tensor& bias,
const DataType& out_dtype)>;
/*!
TVM_REGISTER_GENERIC_FUNC(dense)
.set_default(WrapDenseOp([](const Target& target,
- const tvm::top::Tensor& data,
- const tvm::top::Tensor& weight,
- const tvm::top::Tensor& bias,
+ const tvm::te::Tensor& data,
+ const tvm::te::Tensor& weight,
+ const tvm::te::Tensor& bias,
const DataType& out_dtype) {
return topi::nn::dense(data, weight, bias, out_dtype);
}))