#include <c10/core/Backend.h>
#include <c10/core/ScalarType.h>
+#include <ATen/core/LegacyDeviceTypeInit.h>
#include <ATen/LegacyTHDispatcher.h>
namespace at {
dispatcher_registry[static_cast<int>(b)][static_cast<int>(s)] = std::move(t);
}
+ LegacyTHDispatcher & getLegacyTHDispatcher(Backend p, ScalarType s) {
+ auto* dispatcher = getLegacyTHDispatcherOpt(p, s);
+ if (!dispatcher) AT_ERROR(toString(p), toString(s), "THDispatcher is not enabled.");
+ return *dispatcher;
+ }
+private:
LegacyTHDispatcher* getLegacyTHDispatcherRaw(Backend p, ScalarType s) {
return dispatcher_registry[static_cast<int>(p)][static_cast<int>(s)].get();
}
- LegacyTHDispatcher & getLegacyTHDispatcher(Backend p, ScalarType s) {
- auto* type = getLegacyTHDispatcherRaw(p, s);
- if (!type) AT_ERROR(toString(p), toString(s), "THDispatcher is not enabled.");
- return *type;
+ LegacyTHDispatcher* getLegacyTHDispatcherOpt(Backend p, ScalarType s) {
+ if (p != Backend::Undefined) {
+ initForDeviceType(backendToDeviceType(p));
+ // NB: there is no Complex for TH, so no initialization to be done.
+ }
+ auto dispatcher = getLegacyTHDispatcherRaw(p, s);
+
+ if(!dispatcher) {
+ if (p == Backend::Undefined || s == ScalarType::Undefined) {
+ AT_ERROR("Requested Undefined THDispatcher which is invalid. Backend:",
+ toString(p), "ScalarType: ", toString(s));
+ }
+ }
+
+ return dispatcher;
}
-private:
+
+ void initForDeviceType(DeviceType p) {
+ static std::once_flag cpu_once;
+ static std::once_flag cuda_once;
+ if (p == DeviceType::CPU) {
+ std::call_once(cpu_once, [] {
+ getLegacyDeviceTypeInit().initCPU();
+ });
+ } else if (p == DeviceType::CUDA) {
+ std::call_once(cuda_once, [] {
+ getLegacyDeviceTypeInit().initCUDA();
+ });
+ } else if (p == DeviceType::HIP) {
+ std::call_once(cuda_once, [] {
+ getLegacyDeviceTypeInit().initHIP();
+ });
+ }
+ }
+
// NB: dispatcher_registry has nullptr for all CUDA backends until
// CUDA initialization has occurred
LegacyTHDispatcherUniquePtr dispatcher_registry
TYPE_DEFAULT_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.cpp")
LEGACY_TH_DISPATCHER_H = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcher.h")
+LEGACY_TH_DISPATCHER_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcher.cpp")
+LEGACY_TH_DISPATCHER_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcherDerived.cpp")
+LEGACY_TH_DISPATCHER_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcherDerived.h")
REGISTER_CPU_H = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCPU.h")
REGISTER_CPU_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCPU.cpp")
TENSOR_METHODS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorMethods.h")
FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Functions.h")
+LEGACY_TH_FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHFunctions.h")
NATIVE_FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/NativeFunctions.h")
return env
+def generate_legacy_th_dispatcher(backend, density, scalar_type, declarations):
+ assert density != 'Sparse'
+ scalar_name, c_type, accreal, th_scalar_type, is_floating_type = scalar_type
+ env = {}
+ env['Backend'] = backend
+ env['Dispatcher'] = "LegacyTH{}{}Dispatcher".format(backend, scalar_name)
+
+ fm = file_manager
+ if backend == 'CUDA':
+ fm = cuda_file_manager
+
+ fm.write(env['Dispatcher'] + ".cpp", LEGACY_TH_DISPATCHER_DERIVED_CPP, env)
+ fm.write(env['Dispatcher'] + ".h", LEGACY_TH_DISPATCHER_DERIVED_H, env)
+
+ return env
+
+
def iterate_types():
for backend in backends:
for density in densities:
for f in core_files:
core_file_manager.will_write(f)
files = ['Declarations.yaml', 'TypeExtendedInterface.h', 'TypeDefault.cpp', 'TypeDefault.h',
- 'LegacyTHDispatcher.h',
+ 'LegacyTHDispatcher.h', 'LegacyTHDispatcher.cpp', 'LegacyTHFunctions.h',
'Functions.h', 'NativeFunctions.h', 'RegisterCPU.cpp', 'RegisterCPU.h']
for f in files:
file_manager.will_write(f)
for backend, density, scalar_types in iterate_types():
scalar_name = scalar_types[0]
full_backend = "Sparse" + backend if density == "Sparse" else backend
+ fm = file_manager
+ if backend == 'CUDA':
+ fm = cuda_file_manager
for kind in ["Type"]:
if kind != 'Type' and density == "Sparse":
# No Storage or Tensor for sparse
continue
- fm = file_manager
- if backend == 'CUDA':
- fm = cuda_file_manager
fm.will_write("{}{}{}.h".format(full_backend, scalar_name, kind))
fm.will_write("{}{}{}.cpp".format(full_backend, scalar_name, kind))
+ # output LegacyTHDispatchers
+ if density != 'Sparse':
+ fm.will_write("{}{}{}{}.h".format('LegacyTH', full_backend, scalar_name, 'Dispatcher'))
+ fm.will_write("{}{}{}{}.cpp".format('LegacyTH', full_backend, scalar_name, 'Dispatcher'))
def filter_by_extension(files, *extensions):
all_types.append(generate_storage_type_and_tensor(
backend, density, scalar_type, declarations))
+ all_legacy_th_dispatchers = []
+ for backend, density, scalar_type in iterate_types():
+ if density != 'Sparse':
+ all_legacy_th_dispatchers.append(generate_legacy_th_dispatcher(
+ backend, density, scalar_type, []))
+
core_files = {
'Type.h': TYPE_H,
'Tensor.h': TENSOR_H,
file_manager.write('TypeDefault.cpp', TYPE_DEFAULT_CPP, top_env)
file_manager.write('LegacyTHDispatcher.h', LEGACY_TH_DISPATCHER_H, top_env)
+ file_manager.write('LegacyTHDispatcher.cpp', LEGACY_TH_DISPATCHER_CPP, top_env)
file_manager.write('RegisterCPU.h', REGISTER_CPU_H, top_env)
file_manager.write('RegisterCPU.cpp', REGISTER_CPU_CPP, top_env)
cuda_file_manager.write('RegisterCUDA.cpp', REGISTER_CUDA_CPP, top_env)
file_manager.write('Functions.h', FUNCTIONS_H, top_env)
+ file_manager.write('LegacyTHFunctions.h', LEGACY_TH_FUNCTIONS_H, top_env)
file_manager.write('NativeFunctions.h', NATIVE_FUNCTIONS_H, top_env)
#include <ATen/CPUApplyUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
+#include <ATen/LegacyTHFunctions.h>
#include <ATen/native/LinearAlgebraUtils.h>
// TODO: #7102: It's not necessary to have gesv (single) bindings for both
// TH and ATen. We should remove the TH gesv bindings, especially
// since the lapackGesv function is already in ATen.
- return at::_th_gesv_single(self, A);
+ return at::legacy::th::_th_gesv_single(self, A);
}
Tensor self_broadcasted, A_broadcasted;
AT_CHECK(self.dim() == 2 && A.dim() == 2,
"torch.gesv() with the `out` keyword does not support batching. "
"b.dim() (", self.dim(), ") and A.dim() (", A.dim(), ") must both be 2.");
- return at::_th_gesv_single_out(solution, lu, self, A);
+ return at::legacy::th::_th_gesv_single_out(solution, lu, self, A);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ inverse ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
return at::empty_like(self);
}
if (self.dim() == 2) {
- return at::_th_getri_single(self);
+ return at::legacy::th::_th_getri_single(self);
}
squareCheckInputs(self);
return at::_inverse_helper(self);
// Supports arbitrary batch dimensions for self and A
Tensor potrs(const Tensor& self, const Tensor& A, bool upper) {
if (self.dim() <= 2 && A.dim() <= 2) {
- return at::_th_potrs_single(self, A, upper);
+ return at::legacy::th::_th_potrs_single(self, A, upper);
}
Tensor self_broadcasted, A_broadcasted;
AT_CHECK(self.dim() == 2 && A.dim() == 2,
"torch.potrs() with the `out` keyword does not support batching. "
"b.dim() (", self.dim(), ") and A.dim() (", A.dim(), ") must both be 2.");
- return at::_th_potrs_single_out(result, self, A, upper);
+ return at::legacy::th::_th_potrs_single_out(result, self, A, upper);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ cholesky ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
return at::empty_like(self);
}
if (self.dim() == 2) {
- return at::_th_potrf_single(self, upper);
+ return at::legacy::th::_th_potrf_single(self, upper);
}
squareCheckInputs(self);
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
+#include <ATen/LegacyTHFunctions.h>
#include <ATen/ExpandUtils.h>
#include <ATen/native/TensorIterator.h>
"index_copy_(): Number of indices (", numIndices, ") should be equal to source.size(dim) (", source.size(dim), ")");
}
- return at::_th_index_copy_(self, dim, index, source);
+ return at::legacy::th::_th_index_copy_(self, dim, index, source);
}
}} // at::native
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
+#include <ATen/LegacyTHFunctions.h>
#include <ATen/core/SparseTensorRef.h>
#include <ATen/ExpandUtils.h>
if (_has_native(self)) {
return native_clone(self);
} else {
- return _th_clone(self);
+ return legacy::th::_th_clone(self);
}
}
if (_has_native(self)) {
return native_resize_as_(self, the_template);
} else {
- return _th_resize_as_(self, the_template);
+ return legacy::th::_th_resize_as_(self, the_template);
}
}
if (_has_native(self)) {
return native_pow_out(result, self, exponent);
} else {
- return _th_pow_out(result, self, exponent);
+ return legacy::th::_th_pow_out(result, self, exponent);
}
}
if (_has_native(self)) {
return native_pow(self, exponent);
} else {
- return _th_pow(self, exponent);
+ return legacy::th::_th_pow(self, exponent);
}
}
if (_has_native(self)) {
return native_zero_(self);
} else {
- return _th_zero_(self);
+ return legacy::th::_th_zero_(self);
}
}
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm_out");
return s_native_addmm_out(result, b_self, mat1, mat2, beta, alpha);
} else {
- return _th_addmm_out(result, self, mat1, mat2, beta, alpha);
+ return legacy::th::_th_addmm_out(result, self, mat1, mat2, beta, alpha);
}
}
std::tie(b_self) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
return s_native_addmm(b_self, mat1, mat2, beta, alpha);
} else {
- return _th_addmm(self, mat1, mat2, beta, alpha);
+ return legacy::th::_th_addmm(self, mat1, mat2, beta, alpha);
}
}
// inplace is not broadcasting
return s_native_addmm_(self, mat1, mat2, beta, alpha);
} else {
- return _th_addmm_(self, mat1, mat2, beta, alpha);
+ return legacy::th::_th_addmm_(self, mat1, mat2, beta, alpha);
}
}
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
+#include <ATen/LegacyTHFunctions.h>
namespace at { namespace native {
}
Tensor & set_(Tensor& self, Storage source) {
- return at::_th_set_(self, source);
+ return at::legacy::th::_th_set_(self, source);
}
Tensor & set_(Tensor& self, Storage source, int64_t storage_offset, IntList size, IntList stride) {
- return at::_th_set_(self, source, storage_offset, size, stride);
+ return at::legacy::th::_th_set_(self, source, storage_offset, size, stride);
}
Tensor & set_(Tensor& self, const Tensor & source) {
- return at::_th_set_(self, source);
+ return at::legacy::th::_th_set_(self, source);
}
Tensor & set_(Tensor& self) {
- return at::_th_set_(self);
+ return at::legacy::th::_th_set_(self);
}
bool is_set_to(const Tensor& self, const Tensor & tensor) {
- return at::_th_is_set_to(self, tensor);
+ return at::legacy::th::_th_is_set_to(self, tensor);
}
Tensor & masked_fill_(Tensor& self, const Tensor & mask, Scalar value) {
- return at::_th_masked_fill_(self, mask, value);
+ return at::legacy::th::_th_masked_fill_(self, mask, value);
}
Tensor & masked_fill_(Tensor& self, const Tensor & mask, const Tensor & value) {
- return at::_th_masked_fill_(self, mask, value);
+ return at::legacy::th::_th_masked_fill_(self, mask, value);
}
Tensor & masked_scatter_(Tensor& self, const Tensor & mask, const Tensor & source) {
- return at::_th_masked_scatter_(self, mask, source);
+ return at::legacy::th::_th_masked_scatter_(self, mask, source);
}
Tensor view(const Tensor& self, IntList size) {
- return at::_th_view(self, size);
+ return at::legacy::th::_th_view(self, size);
}
Tensor & put_(Tensor& self, const Tensor & index, const Tensor & source, bool accumulate) {
- return at::_th_put_(self, index, source, accumulate);
+ return at::legacy::th::_th_put_(self, index, source, accumulate);
}
Tensor & index_add_(Tensor& self, int64_t dim, const Tensor & index, const Tensor & source) {
- return at::_th_index_add_(self, dim, index, source);
+ return at::legacy::th::_th_index_add_(self, dim, index, source);
}
Tensor & index_fill_(Tensor& self, int64_t dim, const Tensor & index, Scalar value) {
- return at::_th_index_fill_(self, dim, index, value);
+ return at::legacy::th::_th_index_fill_(self, dim, index, value);
}
Tensor & index_fill_(Tensor& self, int64_t dim, const Tensor & index, const Tensor & value) {
- return at::_th_index_fill_(self, dim, index, value);
+ return at::legacy::th::_th_index_fill_(self, dim, index, value);
}
Tensor & scatter_(Tensor& self, int64_t dim, const Tensor & index, const Tensor & src) {
- return at::_th_scatter_(self, dim, index, src);
+ return at::legacy::th::_th_scatter_(self, dim, index, src);
}
Tensor & scatter_(Tensor& self, int64_t dim, const Tensor & index, Scalar value) {
- return at::_th_scatter_(self, dim, index, value);
+ return at::legacy::th::_th_scatter_(self, dim, index, value);
}
Tensor & scatter_add_(Tensor& self, int64_t dim, const Tensor & index, const Tensor & src) {
- return at::_th_scatter_add_(self, dim, index, src);
+ return at::legacy::th::_th_scatter_add_(self, dim, index, src);
}
Tensor & lt_(Tensor& self, Scalar other) {
- return at::_th_lt_(self, other);
+ return at::legacy::th::_th_lt_(self, other);
}
Tensor & lt_(Tensor& self, const Tensor & other) {
- return at::_th_lt_(self, other);
+ return at::legacy::th::_th_lt_(self, other);
}
Tensor & gt_(Tensor& self, Scalar other) {
- return at::_th_gt_(self, other);
+ return at::legacy::th::_th_gt_(self, other);
}
Tensor & gt_(Tensor& self, const Tensor & other) {
- return at::_th_gt_(self, other);
+ return at::legacy::th::_th_gt_(self, other);
}
Tensor & le_(Tensor& self, Scalar other) {
- return at::_th_le_(self, other);
+ return at::legacy::th::_th_le_(self, other);
}
Tensor & le_(Tensor& self, const Tensor & other) {
- return at::_th_le_(self, other);
+ return at::legacy::th::_th_le_(self, other);
}
Tensor & ge_(Tensor& self, Scalar other) {
- return at::_th_ge_(self, other);
+ return at::legacy::th::_th_ge_(self, other);
}
Tensor & ge_(Tensor& self, const Tensor & other) {
- return at::_th_ge_(self, other);
+ return at::legacy::th::_th_ge_(self, other);
}
Tensor & eq_(Tensor& self, Scalar other) {
- return at::_th_eq_(self, other);
+ return at::legacy::th::_th_eq_(self, other);
}
Tensor & eq_(Tensor& self, const Tensor & other) {
- return at::_th_ge_(self, other);
+ return at::legacy::th::_th_ge_(self, other);
}
Tensor & ne_(Tensor& self, Scalar other) {
- return at::_th_ne_(self, other);
+ return at::legacy::th::_th_ne_(self, other);
}
Tensor & ne_(Tensor& self, const Tensor & other) {
- return at::_th_ne_(self, other);
+ return at::legacy::th::_th_ne_(self, other);
}
Tensor & lgamma_(Tensor& self) {
- return at::_th_lgamma_(self);
+ return at::legacy::th::_th_lgamma_(self);
}
Tensor & atan2_(Tensor& self, const Tensor & other) {
- return at::_th_atan2_(self, other);
+ return at::legacy::th::_th_atan2_(self, other);
}
Tensor & tril_(Tensor& self, int64_t diagonal) {
- return at::_th_tril_(self, diagonal);
+ return at::legacy::th::_th_tril_(self, diagonal);
}
Tensor & triu_(Tensor& self, int64_t diagonal) {
- return at::_th_triu_(self, diagonal);
+ return at::legacy::th::_th_triu_(self, diagonal);
}
Tensor & digamma_(Tensor& self) {
- return at::_th_digamma_(self);
+ return at::legacy::th::_th_digamma_(self);
}
Tensor & polygamma_(Tensor& self, int64_t n) {
- return at::_th_polygamma_(self, n);
+ return at::legacy::th::_th_polygamma_(self, n);
}
Tensor & erfinv_(Tensor& self) {
- return at::_th_erfinv_(self);
+ return at::legacy::th::_th_erfinv_(self);
}
Tensor & frac_(Tensor& self) {
- return at::_th_frac_(self);
+ return at::legacy::th::_th_frac_(self);
}
Tensor & renorm_(Tensor& self, Scalar p, int64_t dim, Scalar maxnorm) {
- return at::_th_renorm_(self, p, dim, maxnorm);
+ return at::legacy::th::_th_renorm_(self, p, dim, maxnorm);
}
Tensor & reciprocal_(Tensor& self) {
- return at::_th_reciprocal_(self);
+ return at::legacy::th::_th_reciprocal_(self);
}
Tensor & neg_(Tensor& self) {
- return at::_th_neg_(self);
+ return at::legacy::th::_th_neg_(self);
}
Tensor & pow_(Tensor& self, Scalar exponent) {
- return at::_th_pow_(self, exponent);
+ return at::legacy::th::_th_pow_(self, exponent);
}
Tensor & pow_(Tensor& self, const Tensor & exponent) {
- return at::_th_pow_(self, exponent);
+ return at::legacy::th::_th_pow_(self, exponent);
}
Tensor & lerp_(Tensor& self, const Tensor & end, Scalar weight) {
- return at::_th_lerp_(self, end, weight);
+ return at::legacy::th::_th_lerp_(self, end, weight);
}
Tensor & sign_(Tensor& self) {
- return at::_th_sign_(self);
+ return at::legacy::th::_th_sign_(self);
}
Tensor & fmod_(Tensor& self, Scalar other) {
- return at::_th_fmod_(self, other);
+ return at::legacy::th::_th_fmod_(self, other);
}
Tensor & fmod_(Tensor& self, const Tensor & other) {
- return at::_th_fmod_(self, other);
+ return at::legacy::th::_th_fmod_(self, other);
}
Tensor & remainder_(Tensor& self, Scalar other) {
- return at::_th_remainder_(self, other);
+ return at::legacy::th::_th_remainder_(self, other);
}
Tensor & remainder_(Tensor& self, const Tensor & other) {
- return at::_th_remainder_(self, other);
+ return at::legacy::th::_th_remainder_(self, other);
}
Tensor & addbmm_(Tensor& self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) {
- return at::_th_addbmm_(self, batch1, batch2, beta, alpha);
+ return at::legacy::th::_th_addbmm_(self, batch1, batch2, beta, alpha);
}
Tensor & addbmm_out(Tensor & result, const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) {
- return at::_th_addbmm_out(result, self, batch1, batch2, beta, alpha);
+ return at::legacy::th::_th_addbmm_out(result, self, batch1, batch2, beta, alpha);
}
Tensor addbmm(const Tensor & self, const Tensor & batch1, const Tensor & batch2, Scalar beta, Scalar alpha) {
- return at::_th_addbmm(self, batch1, batch2, beta, alpha);
+ return at::legacy::th::_th_addbmm(self, batch1, batch2, beta, alpha);
}
Tensor & addcmul_(Tensor& self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) {
- return at::_th_addcmul_(self, tensor1, tensor2, value);
+ return at::legacy::th::_th_addcmul_(self, tensor1, tensor2, value);
}
Tensor & addcdiv_(Tensor& self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) {
- return at::_th_addcdiv_(self, tensor1, tensor2, value);
+ return at::legacy::th::_th_addcdiv_(self, tensor1, tensor2, value);
}
Tensor & random_(Tensor& self, int64_t from, int64_t to, Generator * generator) {
- return at::_th_random_(self, from, to, generator);
+ return at::legacy::th::_th_random_(self, from, to, generator);
}
Tensor & random_(Tensor& self, int64_t to, Generator * generator) {
- return at::_th_random_(self, to, generator);
+ return at::legacy::th::_th_random_(self, to, generator);
}
Tensor & random_(Tensor& self, Generator * generator) {
- return at::_th_random_(self, generator);
+ return at::legacy::th::_th_random_(self, generator);
}
Tensor & uniform_(Tensor& self, double from, double to, Generator * generator) {
- return at::_th_uniform_(self, from, to, generator);
+ return at::legacy::th::_th_uniform_(self, from, to, generator);
}
Tensor & normal_(Tensor& self, double mean, double std, Generator * generator) {
- return at::_th_normal_(self, mean, std, generator);
+ return at::legacy::th::_th_normal_(self, mean, std, generator);
}
Tensor & cauchy_(Tensor& self, double median, double sigma, Generator * generator) {
- return at::_th_cauchy_(self, median, sigma, generator);
+ return at::legacy::th::_th_cauchy_(self, median, sigma, generator);
}
Tensor & log_normal_(Tensor& self, double mean, double std, Generator * generator) {
- return at::_th_log_normal_(self, mean, std, generator);
+ return at::legacy::th::_th_log_normal_(self, mean, std, generator);
}
Tensor & exponential_(Tensor& self, double lambd, Generator * generator) {
- return at::_th_exponential_(self, lambd, generator);
+ return at::legacy::th::_th_exponential_(self, lambd, generator);
}
Tensor & geometric_(Tensor& self, double p, Generator * generator) {
- return at::_th_geometric_(self, p, generator);
+ return at::legacy::th::_th_geometric_(self, p, generator);
}
// Functions
Tensor & diag_out(Tensor & result, const Tensor & self, int64_t diagonal) {
- return at::_th_diag_out(result, self, diagonal);
+ return at::legacy::th::_th_diag_out(result, self, diagonal);
}
Tensor diag(const Tensor & self, int64_t diagonal) {
- return at::_th_diag(self, diagonal);
+ return at::legacy::th::_th_diag(self, diagonal);
}
Tensor & cross_out(Tensor & result, const Tensor & self, const Tensor & other, int64_t dim) {
- return at::_th_cross_out(result, self, other, dim);
+ return at::legacy::th::_th_cross_out(result, self, other, dim);
}
Tensor cross(const Tensor & self, const Tensor & other, int64_t dim) {
- return at::_th_cross(self, other, dim);
+ return at::legacy::th::_th_cross(self, other, dim);
}
Tensor & triu_out(Tensor & result, const Tensor & self, int64_t diagonal) {
- return at::_th_triu_out(result, self, diagonal);
+ return at::legacy::th::_th_triu_out(result, self, diagonal);
}
Tensor triu(const Tensor & self, int64_t diagonal) {
- return at::_th_triu(self, diagonal);
+ return at::legacy::th::_th_triu(self, diagonal);
}
Tensor & tril_out(Tensor & result, const Tensor & self, int64_t diagonal) {
- return at::_th_tril_out(result, self, diagonal);
+ return at::legacy::th::_th_tril_out(result, self, diagonal);
}
Tensor tril(const Tensor & self, int64_t diagonal) {
- return at::_th_tril(self, diagonal);
+ return at::legacy::th::_th_tril(self, diagonal);
}
Tensor trace(const Tensor & self) {
- return at::_th_trace(self);
+ return at::legacy::th::_th_trace(self);
}
Tensor & ne_out(Tensor & result, const Tensor & self, Scalar other) {
- return at::_th_ne_out(result, self, other);
+ return at::legacy::th::_th_ne_out(result, self, other);
}
Tensor ne(const Tensor & self, Scalar other) {
- return at::_th_ne(self, other);
+ return at::legacy::th::_th_ne(self, other);
}
Tensor & ne_out(Tensor & result, const Tensor & self, const Tensor & other) {
- return at::_th_ne_out(result, self, other);
+ return at::legacy::th::_th_ne_out(result, self, other);
}
Tensor ne(const Tensor & self, const Tensor & other) {
- return at::_th_ne(self, other);
+ return at::legacy::th::_th_ne(self, other);
}
Tensor & eq_out(Tensor & result, const Tensor & self, Scalar other) {
- return at::_th_eq_out(result, self, other);
+ return at::legacy::th::_th_eq_out(result, self, other);
}
Tensor eq(const Tensor & self, Scalar other) {
- return at::_th_eq(self, other);
+ return at::legacy::th::_th_eq(self, other);
}
Tensor & eq_out(Tensor & result, const Tensor & self, const Tensor & other) {
- return at::_th_eq_out(result, self, other);
+ return at::legacy::th::_th_eq_out(result, self, other);
}
Tensor eq(const Tensor & self, const Tensor & other) {
- return at::_th_eq(self, other);
+ return at::legacy::th::_th_eq(self, other);
}
Tensor & ge_out(Tensor & result, const Tensor & self, Scalar other) {
- return at::_th_ge_out(result, self, other);
+ return at::legacy::th::_th_ge_out(result, self, other);
}
Tensor ge(const Tensor & self, Scalar other) {
- return at::_th_ge(self, other);
+ return at::legacy::th::_th_ge(self, other);
}
Tensor & ge_out(Tensor & result, const Tensor & self, const Tensor & other) {
- return at::_th_ge_out(result, self, other);
+ return at::legacy::th::_th_ge_out(result, self, other);
}
Tensor ge(const Tensor & self, const Tensor & other) {
- return at::_th_ge(self, other);
+ return at::legacy::th::_th_ge(self, other);
}
Tensor & le_out(Tensor & result, const Tensor & self, Scalar other) {
- return at::_th_le_out(result, self, other);
+ return at::legacy::th::_th_le_out(result, self, other);
}
Tensor le(const Tensor & self, Scalar other) {
- return at::_th_le(self, other);
+ return at::legacy::th::_th_le(self, other);
}
Tensor & le_out(Tensor & result, const Tensor & self, const Tensor & other) {
- return at::_th_le_out(result, self, other);
+ return at::legacy::th::_th_le_out(result, self, other);
}
Tensor le(const Tensor & self, const Tensor & other) {
- return at::_th_le(self, other);
+ return at::legacy::th::_th_le(self, other);
}
Tensor & gt_out(Tensor & result, const Tensor & self, Scalar other) {
- return at::_th_gt_out(result, self, other);
+ return at::legacy::th::_th_gt_out(result, self, other);
}
Tensor gt(const Tensor & self, Scalar other) {
- return at::_th_gt(self, other);
+ return at::legacy::th::_th_gt(self, other);
}
Tensor & gt_out(Tensor & result, const Tensor & self, const Tensor & other) {
- return at::_th_gt_out(result, self, other);
+ return at::legacy::th::_th_gt_out(result, self, other);
}
Tensor gt(const Tensor & self, const Tensor & other) {
- return at::_th_gt(self, other);
+ return at::legacy::th::_th_gt(self, other);
}
Tensor & lt_out(Tensor & result, const Tensor & self, Scalar other) {
- return at::_th_lt_out(result, self, other);
+ return at::legacy::th::_th_lt_out(result, self, other);
}
Tensor lt(const Tensor & self, Scalar other) {
- return at::_th_lt(self, other);
+ return at::legacy::th::_th_lt(self, other);
}
Tensor & lt_out(Tensor & result, const Tensor & self, const Tensor & other) {
- return at::_th_lt_out(result, self, other);
+ return at::legacy::th::_th_lt_out(result, self, other);
}
Tensor lt(const Tensor & self, const Tensor & other) {
- return at::_th_lt(self, other);
+ return at::legacy::th::_th_lt(self, other);
}
Tensor & take_out(Tensor & result, const Tensor & self, const Tensor & index) {
- return at::_th_take_out(result, self, index);
+ return at::legacy::th::_th_take_out(result, self, index);
}
Tensor take(const Tensor & self, const Tensor & index) {
- return at::_th_take(self, index);
+ return at::legacy::th::_th_take(self, index);
}
Tensor & index_select_out(Tensor & result, const Tensor & self, int64_t dim, const Tensor & index) {
- return at::_th_index_select_out(result, self, dim, index);
+ return at::legacy::th::_th_index_select_out(result, self, dim, index);
}
Tensor index_select(const Tensor & self, int64_t dim, const Tensor & index) {
- return at::_th_index_select(self, dim, index);
+ return at::legacy::th::_th_index_select(self, dim, index);
}
Tensor & masked_select_out(Tensor & result, const Tensor & self, const Tensor & mask) {
- return at::_th_masked_select_out(result, self, mask);
+ return at::legacy::th::_th_masked_select_out(result, self, mask);
}
Tensor masked_select(const Tensor & self, const Tensor & mask) {
- return at::_th_masked_select(self, mask);
+ return at::legacy::th::_th_masked_select(self, mask);
}
Tensor & nonzero_out(Tensor & result, const Tensor & self) {
- return at::_th_nonzero_out(result, self);
+ return at::legacy::th::_th_nonzero_out(result, self);
}
Tensor nonzero(const Tensor & self) {
- return at::_th_nonzero(self);
+ return at::legacy::th::_th_nonzero(self);
}
Tensor & gather_out(Tensor & result, const Tensor & self, int64_t dim, const Tensor & index) {
- return at::_th_gather_out(result, self, dim, index);
+ return at::legacy::th::_th_gather_out(result, self, dim, index);
}
Tensor gather(const Tensor & self, int64_t dim, const Tensor & index) {
- return at::_th_gather(self, dim, index);
+ return at::legacy::th::_th_gather(self, dim, index);
}
Tensor & addcmul_out(Tensor & result, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) {
- return at::_th_addcmul_out(result, self, tensor1, tensor2, value);
+ return at::legacy::th::_th_addcmul_out(result, self, tensor1, tensor2, value);
}
Tensor addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) {
- return at::_th_addcmul(self, tensor1, tensor2, value);
+ return at::legacy::th::_th_addcmul(self, tensor1, tensor2, value);
}
Tensor & addcdiv_out(Tensor & result, const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) {
- return at::_th_addcdiv_out(result, self, tensor1, tensor2, value);
+ return at::legacy::th::_th_addcdiv_out(result, self, tensor1, tensor2, value);
}
Tensor addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) {
- return at::_th_addcdiv(self, tensor1, tensor2, value);
+ return at::legacy::th::_th_addcdiv(self, tensor1, tensor2, value);
}
std::tuple<Tensor &,Tensor &> gels_out(Tensor & X, Tensor & qr, const Tensor & self, const Tensor & A) {
- return at::_th_gels_out(X, qr, self, A);
+ return at::legacy::th::_th_gels_out(X, qr, self, A);
}
std::tuple<Tensor,Tensor> gels(const Tensor & self, const Tensor & A) {
- return at::_th_gels(self, A);
+ return at::legacy::th::_th_gels(self, A);
}
std::tuple<Tensor &,Tensor &> trtrs_out(Tensor & X, Tensor & M, const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) {
- return at::_th_trtrs_out(X, M, self, A, upper, transpose, unitriangular);
+ return at::legacy::th::_th_trtrs_out(X, M, self, A, upper, transpose, unitriangular);
}
std::tuple<Tensor,Tensor> trtrs(const Tensor & self, const Tensor & A, bool upper, bool transpose, bool unitriangular) {
- return at::_th_trtrs(self, A, upper, transpose, unitriangular);
+ return at::legacy::th::_th_trtrs(self, A, upper, transpose, unitriangular);
}
std::tuple<Tensor &,Tensor &> symeig_out(Tensor & e, Tensor & V, const Tensor & self, bool eigenvectors, bool upper) {
- return at::_th_symeig_out(e, V, self, eigenvectors, upper);
+ return at::legacy::th::_th_symeig_out(e, V, self, eigenvectors, upper);
}
std::tuple<Tensor,Tensor> symeig(const Tensor & self, bool eigenvectors, bool upper) {
- return at::_th_symeig(self, eigenvectors, upper);
+ return at::legacy::th::_th_symeig(self, eigenvectors, upper);
}
std::tuple<Tensor &,Tensor &> eig_out(Tensor & e, Tensor & v, const Tensor & self, bool eigenvectors) {
- return at::_th_eig_out(e, v, self, eigenvectors);
+ return at::legacy::th::_th_eig_out(e, v, self, eigenvectors);
}
std::tuple<Tensor,Tensor> eig(const Tensor & self, bool eigenvectors) {
- return at::_th_eig(self, eigenvectors);
+ return at::legacy::th::_th_eig(self, eigenvectors);
}
std::tuple<Tensor &,Tensor &,Tensor &> svd_out(Tensor & U, Tensor & S, Tensor & V, const Tensor & self, bool some, bool compute_uv) {
- return at::_th_svd_out(U, S, V, self, some, compute_uv);
+ return at::legacy::th::_th_svd_out(U, S, V, self, some, compute_uv);
}
std::tuple<Tensor,Tensor,Tensor> svd(const Tensor & self, bool some, bool compute_uv) {
- return at::_th_svd(self, some, compute_uv);
+ return at::legacy::th::_th_svd(self, some, compute_uv);
}
Tensor & potri_out(Tensor & result, const Tensor & self, bool upper) {
- return at::_th_potri_out(result, self, upper);
+ return at::legacy::th::_th_potri_out(result, self, upper);
}
Tensor potri(const Tensor & self, bool upper) {
- return at::_th_potri(self, upper);
+ return at::legacy::th::_th_potri(self, upper);
}
std::tuple<Tensor &,Tensor &> pstrf_out(Tensor & u, Tensor & piv, const Tensor & self, bool upper, Scalar tol) {
- return at::_th_pstrf_out(u, piv, self, upper, tol);
+ return at::legacy::th::_th_pstrf_out(u, piv, self, upper, tol);
}
std::tuple<Tensor,Tensor> pstrf(const Tensor & self, bool upper, Scalar tol) {
- return at::_th_pstrf(self, upper, tol);
+ return at::legacy::th::_th_pstrf(self, upper, tol);
}
std::tuple<Tensor &,Tensor &> qr_out(Tensor & Q, Tensor & R, const Tensor & self) {
- return at::_th_qr_out(Q, R, self);
+ return at::legacy::th::_th_qr_out(Q, R, self);
}
std::tuple<Tensor,Tensor> qr(const Tensor & self) {
- return at::_th_qr(self);
+ return at::legacy::th::_th_qr(self);
}
std::tuple<Tensor &,Tensor &> geqrf_out(Tensor & result0, Tensor & result1, const Tensor & self) {
}
std::tuple<Tensor,Tensor> geqrf(const Tensor & self) {
- return at::_th_geqrf(self);
+ return at::legacy::th::_th_geqrf(self);
}
Tensor & orgqr_out(Tensor & result, const Tensor & self, const Tensor & input2) {
- return at::_th_orgqr_out(result, self, input2);
+ return at::legacy::th::_th_orgqr_out(result, self, input2);
}
Tensor orgqr(const Tensor & self, const Tensor & input2) {
- return at::_th_orgqr(self, input2);
+ return at::legacy::th::_th_orgqr(self, input2);
}
Tensor & ormqr_out(Tensor & result, const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) {
}
Tensor ormqr(const Tensor & self, const Tensor & input2, const Tensor & input3, bool left, bool transpose) {
- return at::_th_ormqr(self, input2, input3, left, transpose);
+ return at::legacy::th::_th_ormqr(self, input2, input3, left, transpose);
}
std::tuple<Tensor &,Tensor &> btrifact_out(Tensor & A_LU, Tensor & pivots, const Tensor & self, bool pivot) {
- return at::_th_btrifact_out(A_LU, pivots, self, pivot);
+ return at::legacy::th::_th_btrifact_out(A_LU, pivots, self, pivot);
}
std::tuple<Tensor,Tensor> btrifact(const Tensor & self, bool pivot) {
- return at::_th_btrifact(self, pivot);
+ return at::legacy::th::_th_btrifact(self, pivot);
}
std::tuple<Tensor &,Tensor &,Tensor &> btrifact_with_info_out(Tensor & A_LU, Tensor & pivots, Tensor & info, const Tensor & self, bool pivot) {
- return at::_th_btrifact_with_info_out(A_LU, pivots, info, self, pivot);
+ return at::legacy::th::_th_btrifact_with_info_out(A_LU, pivots, info, self, pivot);
}
std::tuple<Tensor,Tensor,Tensor> btrifact_with_info(const Tensor & self, bool pivot) {
- return at::_th_btrifact_with_info(self, pivot);
+ return at::legacy::th::_th_btrifact_with_info(self, pivot);
}
Tensor & btrisolve_out(Tensor & result, const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) {
- return at::_th_btrisolve_out(result, self, LU_data, LU_pivots);
+ return at::legacy::th::_th_btrisolve_out(result, self, LU_data, LU_pivots);
}
Tensor btrisolve(const Tensor & self, const Tensor & LU_data, const Tensor & LU_pivots) {
- return at::_th_btrisolve(self, LU_data, LU_pivots);
+ return at::legacy::th::_th_btrisolve(self, LU_data, LU_pivots);
}
Tensor & multinomial_out(Tensor & result, const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) {
- return at::_th_multinomial_out(result, self, num_samples, replacement, generator);
+ return at::legacy::th::_th_multinomial_out(result, self, num_samples, replacement, generator);
}
Tensor multinomial(const Tensor & self, int64_t num_samples, bool replacement, Generator * generator) {
- return at::_th_multinomial(self, num_samples, replacement, generator);
+ return at::legacy::th::_th_multinomial(self, num_samples, replacement, generator);
}
Tensor & lgamma_out(Tensor & result, const Tensor & self) {
- return at::_th_lgamma_out(result, self);
+ return at::legacy::th::_th_lgamma_out(result, self);
}
Tensor lgamma(const Tensor & self) {
- return at::_th_lgamma(self);
+ return at::legacy::th::_th_lgamma(self);
}
Tensor & digamma_out(Tensor & result, const Tensor & self) {
- return at::_th_digamma_out(result, self);
+ return at::legacy::th::_th_digamma_out(result, self);
}
Tensor digamma(const Tensor & self) {
- return at::_th_digamma(self);
+ return at::legacy::th::_th_digamma(self);
}
Tensor & polygamma_out(Tensor & result, int64_t n, const Tensor & self) {
- return at::_th_polygamma_out(result, n, self);
+ return at::legacy::th::_th_polygamma_out(result, n, self);
}
Tensor polygamma(int64_t n, const Tensor & self) {
- return at::_th_polygamma(n, self);
+ return at::legacy::th::_th_polygamma(n, self);
}
Tensor & erfinv_out(Tensor & result, const Tensor & self) {
- return at::_th_erfinv_out(result, self);
+ return at::legacy::th::_th_erfinv_out(result, self);
}
Tensor erfinv(const Tensor & self) {
- return at::_th_erfinv(self);
+ return at::legacy::th::_th_erfinv(self);
}
Tensor & frac_out(Tensor & result, const Tensor & self) {
- return at::_th_frac_out(result, self);
+ return at::legacy::th::_th_frac_out(result, self);
}
Tensor frac(const Tensor & self) {
- return at::_th_frac(self);
+ return at::legacy::th::_th_frac(self);
}
Tensor dist(const Tensor & self, const Tensor & other, Scalar p) {
- return at::_th_dist(self, other, p);
+ return at::legacy::th::_th_dist(self, other, p);
}
Tensor & reciprocal_out(Tensor & result, const Tensor & self) {
- return at::_th_reciprocal_out(result, self);
+ return at::legacy::th::_th_reciprocal_out(result, self);
}
Tensor reciprocal(const Tensor & self) {
- return at::_th_reciprocal(self);
+ return at::legacy::th::_th_reciprocal(self);
}
Tensor & neg_out(Tensor & result, const Tensor & self) {
- return at::_th_neg_out(result, self);
+ return at::legacy::th::_th_neg_out(result, self);
}
Tensor neg(const Tensor & self) {
- return at::_th_neg(self);
+ return at::legacy::th::_th_neg(self);
}
Tensor & atan2_out(Tensor & result, const Tensor & self, const Tensor & other) {
- return at::_th_atan2_out(result, self, other);
+ return at::legacy::th::_th_atan2_out(result, self, other);
}
Tensor atan2(const Tensor & self, const Tensor & other) {
- return at::_th_atan2(self, other);
+ return at::legacy::th::_th_atan2(self, other);
}
Tensor & lerp_out(Tensor & result, const Tensor & self, const Tensor & end, Scalar weight) {
- return at::_th_lerp_out(result, self, end, weight);
+ return at::legacy::th::_th_lerp_out(result, self, end, weight);
}
Tensor lerp(const Tensor & self, const Tensor & end, Scalar weight) {
- return at::_th_lerp(self, end, weight);
+ return at::legacy::th::_th_lerp(self, end, weight);
}
Tensor & histc_out(Tensor & result, const Tensor & self, int64_t bins, Scalar min, Scalar max) {
- return at::_th_histc_out(result, self, bins, min, max);
+ return at::legacy::th::_th_histc_out(result, self, bins, min, max);
}
Tensor histc(const Tensor & self, int64_t bins, Scalar min, Scalar max) {
- return at::_th_histc(self, bins, min, max);
+ return at::legacy::th::_th_histc(self, bins, min, max);
}
Tensor & sign_out(Tensor & result, const Tensor & self) {
- return at::_th_sign_out(result, self);
+ return at::legacy::th::_th_sign_out(result, self);
}
Tensor sign(const Tensor & self) {
- return at::_th_sign(self);
+ return at::legacy::th::_th_sign(self);
}
Tensor & fmod_out(Tensor & result, const Tensor & self, Scalar other) {
- return at::_th_fmod_out(result, self, other);
+ return at::legacy::th::_th_fmod_out(result, self, other);
}
Tensor fmod(const Tensor & self, Scalar other) {
- return at::_th_fmod(self, other);
+ return at::legacy::th::_th_fmod(self, other);
}
Tensor & fmod_out(Tensor & result, const Tensor & self, const Tensor & other) {
- return at::_th_fmod_out(result, self, other);
+ return at::legacy::th::_th_fmod_out(result, self, other);
}
Tensor fmod(const Tensor & self, const Tensor & other) {
- return at::_th_fmod(self, other);
+ return at::legacy::th::_th_fmod(self, other);
}
Tensor & remainder_out(Tensor & result, const Tensor & self, Scalar other) {
- return at::_th_remainder_out(result, self, other);
+ return at::legacy::th::_th_remainder_out(result, self, other);
}
Tensor remainder(const Tensor & self, Scalar other) {
- return at::_th_remainder(self, other);
+ return at::legacy::th::_th_remainder(self, other);
}
Tensor & remainder_out(Tensor & result, const Tensor & self, const Tensor & other) {
- return at::_th_remainder_out(result, self, other);
+ return at::legacy::th::_th_remainder_out(result, self, other);
}
Tensor remainder(const Tensor & self, const Tensor & other) {
- return at::_th_remainder(self, other);
+ return at::legacy::th::_th_remainder(self, other);
}
Tensor & min_out(Tensor & result, const Tensor & self, const Tensor & other) {
- return at::_th_min_out(result, self, other);
+ return at::legacy::th::_th_min_out(result, self, other);
}
Tensor min(const Tensor & self, const Tensor & other) {
- return at::_th_min(self, other);
+ return at::legacy::th::_th_min(self, other);
}
Tensor min(const Tensor & self) {
- return at::_th_min(self);
+ return at::legacy::th::_th_min(self);
}
Tensor & max_out(Tensor & result, const Tensor & self, const Tensor & other) {
- return at::_th_max_out(result, self, other);
+ return at::legacy::th::_th_max_out(result, self, other);
}
Tensor max(const Tensor & self, const Tensor & other) {
- return at::_th_max(self, other);
+ return at::legacy::th::_th_max(self, other);
}
Tensor max(const Tensor & self) {
- return at::_th_max(self);
+ return at::legacy::th::_th_max(self);
}
Tensor median(const Tensor & self) {
- return at::_th_median(self);
+ return at::legacy::th::_th_median(self);
}
std::tuple<Tensor &,Tensor &> sort_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool descending) {
- return at::_th_sort_out(values, indices, self, dim, descending);
+ return at::legacy::th::_th_sort_out(values, indices, self, dim, descending);
}
std::tuple<Tensor,Tensor> sort(const Tensor & self, int64_t dim, bool descending) {
- return at::_th_sort(self, dim, descending);
+ return at::legacy::th::_th_sort(self, dim, descending);
}
std::tuple<Tensor &,Tensor &> topk_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
- return at::_th_topk_out(values, indices, self, k, dim, largest, sorted);
+ return at::legacy::th::_th_topk_out(values, indices, self, k, dim, largest, sorted);
}
std::tuple<Tensor,Tensor> topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
- return at::_th_topk(self, k, dim, largest, sorted);
+ return at::legacy::th::_th_topk(self, k, dim, largest, sorted);
}
Tensor all(const Tensor & self) {
- return at::_th_all(self);
+ return at::legacy::th::_th_all(self);
}
Tensor any(const Tensor & self) {
- return at::_th_any(self);
+ return at::legacy::th::_th_any(self);
}
Tensor & renorm_out(Tensor & result, const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) {
- return at::_th_renorm_out(result, self, p, dim, maxnorm);
+ return at::legacy::th::_th_renorm_out(result, self, p, dim, maxnorm);
}
Tensor renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) {
- return at::_th_renorm(self, p, dim, maxnorm);
+ return at::legacy::th::_th_renorm(self, p, dim, maxnorm);
}
Tensor unfold(const Tensor & self, int64_t dimension, int64_t size, int64_t step) {
- return at::_th_unfold(self, dimension, size, step);
+ return at::legacy::th::_th_unfold(self, dimension, size, step);
}
bool equal(const Tensor & self, const Tensor & other) {
- return at::_th_equal(self, other);
+ return at::legacy::th::_th_equal(self, other);
}
Tensor & pow_out(Tensor & result, const Tensor & self, const Tensor & exponent) {
- return at::_th_pow_out(result, self, exponent);
+ return at::legacy::th::_th_pow_out(result, self, exponent);
}
Tensor pow(const Tensor & self, const Tensor & exponent) {
- return at::_th_pow(self, exponent);
+ return at::legacy::th::_th_pow(self, exponent);
}
Tensor & pow_out(Tensor & result, Scalar self, const Tensor & exponent) {
- return at::_th_pow_out(result, self, exponent);
+ return at::legacy::th::_th_pow_out(result, self, exponent);
}
Tensor pow(Scalar self, const Tensor & exponent) {
- return at::_th_pow(self, exponent);
+ return at::legacy::th::_th_pow(self, exponent);
}
Tensor & normal_out(Tensor & output, const Tensor & mean, double std, Generator * generator) {
- return at::_th_normal_out(output, mean, std, generator);
+ return at::legacy::th::_th_normal_out(output, mean, std, generator);
}
Tensor normal(const Tensor & mean, double std, Generator * generator) {
- return at::_th_normal(mean, std, generator);
+ return at::legacy::th::_th_normal(mean, std, generator);
}
Tensor & normal_out(Tensor & output, double mean, const Tensor & std, Generator * generator) {
- return at::_th_normal_out(output, mean, std, generator);
+ return at::legacy::th::_th_normal_out(output, mean, std, generator);
}
Tensor normal(double mean, const Tensor & std, Generator * generator) {
- return at::_th_normal(mean, std, generator);
+ return at::legacy::th::_th_normal(mean, std, generator);
}
Tensor & normal_out(Tensor & output, const Tensor & mean, const Tensor & std, Generator * generator) {
- return at::_th_normal_out(output, mean, std, generator);
+ return at::legacy::th::_th_normal_out(output, mean, std, generator);
}
Tensor normal(const Tensor & mean, const Tensor & std, Generator * generator) {
- return at::_th_normal(mean, std, generator);
+ return at::legacy::th::_th_normal(mean, std, generator);
}
Tensor alias(const Tensor & self) {
- return at::_th_alias(self);
+ return at::legacy::th::_th_alias(self);
}
Tensor & _dirichlet_grad_out(Tensor & output, const Tensor & x, const Tensor & alpha, const Tensor & total) {
- return at::_th_dirichlet_grad_out(output, x, alpha, total);
+ return at::legacy::th::_th_dirichlet_grad_out(output, x, alpha, total);
}
Tensor _dirichlet_grad(const Tensor & x, const Tensor & alpha, const Tensor & total) {
- return at::_th_dirichlet_grad(x, alpha, total);
+ return at::legacy::th::_th_dirichlet_grad(x, alpha, total);
}
Tensor __and__(const Tensor & self, Scalar other) {
- return at::_th_and(self, other);
+ return at::legacy::th::_th_and(self, other);
}
Tensor __and__(const Tensor & self, const Tensor & other) {
- return at::_th_and(self, other);
+ return at::legacy::th::_th_and(self, other);
}
Tensor __or__(const Tensor & self, Scalar other) {
- return at::_th_or(self, other);
+ return at::legacy::th::_th_or(self, other);
}
Tensor __or__(const Tensor & self, const Tensor & other) {
- return at::_th_or(self, other);
+ return at::legacy::th::_th_or(self, other);
}
Tensor __xor__(const Tensor & self, Scalar other) {
- return at::_th_xor(self, other);
+ return at::legacy::th::_th_xor(self, other);
}
Tensor __xor__(const Tensor & self, const Tensor & other) {
- return at::_th_xor(self, other);
+ return at::legacy::th::_th_xor(self, other);
}
Tensor __lshift__(const Tensor & self, Scalar other) {
- return at::_th_lshift(self, other);
+ return at::legacy::th::_th_lshift(self, other);
}
Tensor __lshift__(const Tensor & self, const Tensor & other) {
- return at::_th_lshift(self, other);
+ return at::legacy::th::_th_lshift(self, other);
}
Tensor __rshift__(const Tensor & self, Scalar other) {
- return at::_th_rshift(self, other);
+ return at::legacy::th::_th_rshift(self, other);
}
Tensor __rshift__(const Tensor & self, const Tensor & other) {
- return at::_th_rshift(self, other);
+ return at::legacy::th::_th_rshift(self, other);
}
Tensor & __iand__(Tensor & self, Scalar other) {
- return at::_th_iand_(self, other);
+ return at::legacy::th::_th_iand_(self, other);
}
Tensor & __iand__(Tensor & self, const Tensor & other) {
- return at::_th_iand_(self, other);
+ return at::legacy::th::_th_iand_(self, other);
}
Tensor & __ior__(Tensor & self, Scalar other) {
- return at::_th_ior_(self, other);
+ return at::legacy::th::_th_ior_(self, other);
}
Tensor & __ior__(Tensor & self, const Tensor & other) {
- return at::_th_ior_(self, other);
+ return at::legacy::th::_th_ior_(self, other);
}
Tensor & __ixor__(Tensor & self, Scalar other) {
- return at::_th_ixor_(self, other);
+ return at::legacy::th::_th_ixor_(self, other);
}
Tensor & __ixor__(Tensor & self, const Tensor & other) {
- return at::_th_ixor_(self, other);
+ return at::legacy::th::_th_ixor_(self, other);
}
Tensor & __ilshift__(Tensor & self, Scalar other) {
- return at::_th_ilshift_(self, other);
+ return at::legacy::th::_th_ilshift_(self, other);
}
Tensor & __ilshift__(Tensor & self, const Tensor & other) {
- return at::_th_ilshift_(self, other);
+ return at::legacy::th::_th_ilshift_(self, other);
}
Tensor & __irshift__(Tensor & self, Scalar other) {
- return at::_th_irshift_(self, other);
+ return at::legacy::th::_th_irshift_(self, other);
}
Tensor & __irshift__(Tensor & self, const Tensor & other) {
- return at::_th_irshift_(self, other);
+ return at::legacy::th::_th_irshift_(self, other);
}
}} // namespace at::native
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
+#include <ATen/LegacyTHFunctions.h>
namespace at { namespace native {
Tensor & binary_cross_entropy_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) {
- return at::_thnn_binary_cross_entropy_out(output, self, target, weight, reduction);
+ return at::legacy::th::_thnn_binary_cross_entropy_out(output, self, target, weight, reduction);
}
Tensor binary_cross_entropy(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) {
- return at::_thnn_binary_cross_entropy(self, target, weight, reduction);
+ return at::legacy::th::_thnn_binary_cross_entropy(self, target, weight, reduction);
}
Tensor & binary_cross_entropy_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) {
- return at::_thnn_binary_cross_entropy_backward_out(grad_input, grad_output, self, target, weight, reduction);
+ return at::legacy::th::_thnn_binary_cross_entropy_backward_out(grad_input, grad_output, self, target, weight, reduction);
}
Tensor binary_cross_entropy_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction) {
- return at::_thnn_binary_cross_entropy_backward(grad_output, self, target, weight, reduction);
+ return at::legacy::th::_thnn_binary_cross_entropy_backward(grad_output, self, target, weight, reduction);
}
Tensor & mse_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_mse_loss_out(output, self, target, reduction);
+ return at::legacy::th::_thnn_mse_loss_out(output, self, target, reduction);
}
Tensor mse_loss(const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_mse_loss(self, target, reduction);
+ return at::legacy::th::_thnn_mse_loss(self, target, reduction);
}
Tensor & mse_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_mse_loss_backward_out(grad_input, grad_output, self, target, reduction);
+ return at::legacy::th::_thnn_mse_loss_backward_out(grad_input, grad_output, self, target, reduction);
}
Tensor mse_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_mse_loss_backward(grad_output, self, target, reduction);
+ return at::legacy::th::_thnn_mse_loss_backward(grad_output, self, target, reduction);
}
Tensor & l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_l1_loss_out(output, self, target, reduction);
+ return at::legacy::th::_thnn_l1_loss_out(output, self, target, reduction);
}
Tensor l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_l1_loss(self, target, reduction);
+ return at::legacy::th::_thnn_l1_loss(self, target, reduction);
}
Tensor & l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_l1_loss_backward_out(grad_input, grad_output, self, target, reduction);
+ return at::legacy::th::_thnn_l1_loss_backward_out(grad_input, grad_output, self, target, reduction);
}
Tensor l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_l1_loss_backward(grad_output, self, target, reduction);
+ return at::legacy::th::_thnn_l1_loss_backward(grad_output, self, target, reduction);
}
Tensor & multi_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target,
Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) {
- return at::_thnn_multi_margin_loss_out(output, self, target, p, margin, weight, reduction);
+ return at::legacy::th::_thnn_multi_margin_loss_out(output, self, target, p, margin, weight, reduction);
}
Tensor multi_margin_loss(const Tensor & self, const Tensor & target,
Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) {
- return at::_thnn_multi_margin_loss(self, target, p, margin, weight, reduction);
+ return at::legacy::th::_thnn_multi_margin_loss(self, target, p, margin, weight, reduction);
}
Tensor & multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target,
Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) {
- return at::_thnn_multi_margin_loss_backward_out(grad_input, grad_output, self, target, p, margin, weight, reduction);
+ return at::legacy::th::_thnn_multi_margin_loss_backward_out(grad_input, grad_output, self, target, p, margin, weight, reduction);
}
Tensor multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target,
Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) {
- return at::_thnn_multi_margin_loss_backward(grad_output, self, target, p, margin, weight, reduction);
+ return at::legacy::th::_thnn_multi_margin_loss_backward(grad_output, self, target, p, margin, weight, reduction);
}
Tensor & multilabel_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) {
}
std::tuple<Tensor &,Tensor &> multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_multilabel_margin_loss_forward_out(output, is_target, self, target, reduction);
+ return at::legacy::th::_thnn_multilabel_margin_loss_forward_out(output, is_target, self, target, reduction);
}
std::tuple<Tensor,Tensor> multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_multilabel_margin_loss_forward(self, target, reduction);
+ return at::legacy::th::_thnn_multilabel_margin_loss_forward(self, target, reduction);
}
Tensor & multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) {
- return at::_thnn_multilabel_margin_loss_backward_out(grad_input, grad_output, self, target, reduction, is_target);
+ return at::legacy::th::_thnn_multilabel_margin_loss_backward_out(grad_input, grad_output, self, target, reduction, is_target);
}
Tensor multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) {
- return at::_thnn_multilabel_margin_loss_backward(grad_output, self, target, reduction, is_target);
+ return at::legacy::th::_thnn_multilabel_margin_loss_backward(grad_output, self, target, reduction, is_target);
}
Tensor & nll_loss_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) {
}
std::tuple<Tensor &,Tensor &> nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) {
- return at::_thnn_nll_loss_forward_out(output, total_weight, self, target, weight, reduction, ignore_index);
+ return at::legacy::th::_thnn_nll_loss_forward_out(output, total_weight, self, target, weight, reduction, ignore_index);
}
std::tuple<Tensor,Tensor> nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) {
- return at::_thnn_nll_loss_forward(self, target, weight, reduction, ignore_index);
+ return at::legacy::th::_thnn_nll_loss_forward(self, target, weight, reduction, ignore_index);
}
Tensor & nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) {
- return at::_thnn_nll_loss_backward_out(grad_input, grad_output, self, target, weight, reduction, ignore_index, total_weight);
+ return at::legacy::th::_thnn_nll_loss_backward_out(grad_input, grad_output, self, target, weight, reduction, ignore_index, total_weight);
}
Tensor nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) {
- return at::_thnn_nll_loss_backward(grad_output, self, target, weight, reduction, ignore_index, total_weight);
+ return at::legacy::th::_thnn_nll_loss_backward(grad_output, self, target, weight, reduction, ignore_index, total_weight);
}
Tensor & nll_loss2d_out(Tensor & output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) {
}
std::tuple<Tensor &,Tensor &> nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) {
- return at::_thnn_nll_loss2d_forward_out(output, total_weight, self, target, weight, reduction, ignore_index);
+ return at::legacy::th::_thnn_nll_loss2d_forward_out(output, total_weight, self, target, weight, reduction, ignore_index);
}
std::tuple<Tensor,Tensor> nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) {
- return at::_thnn_nll_loss2d_forward(self, target, weight, reduction, ignore_index);
+ return at::legacy::th::_thnn_nll_loss2d_forward(self, target, weight, reduction, ignore_index);
}
Tensor & nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) {
- return at::_thnn_nll_loss2d_backward_out(grad_input, grad_output, self, target, weight, reduction, ignore_index, total_weight);
+ return at::legacy::th::_thnn_nll_loss2d_backward_out(grad_input, grad_output, self, target, weight, reduction, ignore_index, total_weight);
}
Tensor nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) {
- return at::_thnn_nll_loss2d_backward(grad_output, self, target, weight, reduction, ignore_index, total_weight);
+ return at::legacy::th::_thnn_nll_loss2d_backward(grad_output, self, target, weight, reduction, ignore_index, total_weight);
}
Tensor & smooth_l1_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_smooth_l1_loss_out(output, self, target, reduction);
+ return at::legacy::th::_thnn_smooth_l1_loss_out(output, self, target, reduction);
}
Tensor smooth_l1_loss(const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_smooth_l1_loss(self, target, reduction);
+ return at::legacy::th::_thnn_smooth_l1_loss(self, target, reduction);
}
Tensor & smooth_l1_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self,
const Tensor & target, int64_t reduction) {
- return at::_thnn_smooth_l1_loss_backward_out(grad_input, grad_output, self, target, reduction);
+ return at::legacy::th::_thnn_smooth_l1_loss_backward_out(grad_input, grad_output, self, target, reduction);
}
Tensor smooth_l1_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_smooth_l1_loss_backward(grad_output, self, target, reduction);
+ return at::legacy::th::_thnn_smooth_l1_loss_backward(grad_output, self, target, reduction);
}
Tensor & soft_margin_loss_out(Tensor & output, const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_soft_margin_loss_out(output, self, target, reduction);
+ return at::legacy::th::_thnn_soft_margin_loss_out(output, self, target, reduction);
}
Tensor soft_margin_loss(const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_soft_margin_loss(self, target, reduction);
+ return at::legacy::th::_thnn_soft_margin_loss(self, target, reduction);
}
Tensor & soft_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self,
const Tensor & target, int64_t reduction) {
- return at::_thnn_soft_margin_loss_backward_out(grad_input, grad_output, self, target, reduction);
+ return at::legacy::th::_thnn_soft_margin_loss_backward_out(grad_input, grad_output, self, target, reduction);
}
Tensor soft_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction) {
- return at::_thnn_soft_margin_loss_backward(grad_output, self, target, reduction);
+ return at::legacy::th::_thnn_soft_margin_loss_backward(grad_output, self, target, reduction);
}
Tensor & elu_out(Tensor & output, const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) {
- return at::_thnn_elu_out(output, self, alpha, scale, input_scale);
+ return at::legacy::th::_thnn_elu_out(output, self, alpha, scale, input_scale);
}
Tensor elu(const Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) {
- return at::_thnn_elu(self, alpha, scale, input_scale);
+ return at::legacy::th::_thnn_elu(self, alpha, scale, input_scale);
}
Tensor & elu_backward_out(Tensor & grad_input, const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) {
- return at::_thnn_elu_backward_out(grad_input, grad_output, alpha, scale, input_scale, output);
+ return at::legacy::th::_thnn_elu_backward_out(grad_input, grad_output, alpha, scale, input_scale, output);
}
Tensor elu_backward(const Tensor & grad_output, Scalar alpha, Scalar scale, Scalar input_scale, const Tensor & output) {
- return at::_thnn_elu_backward(grad_output, alpha, scale, input_scale, output);
+ return at::legacy::th::_thnn_elu_backward(grad_output, alpha, scale, input_scale, output);
}
Tensor & elu_(Tensor & self, Scalar alpha, Scalar scale, Scalar input_scale) {
- return at::_thnn_elu_(self, alpha, scale, input_scale);
+ return at::legacy::th::_thnn_elu_(self, alpha, scale, input_scale);
}
Tensor & glu_out(Tensor & output, const Tensor & self, int64_t dim) {
- return at::_thnn_glu_out(output, self, dim);
+ return at::legacy::th::_thnn_glu_out(output, self, dim);
}
Tensor glu(const Tensor & self, int64_t dim) {
- return at::_thnn_glu(self, dim);
+ return at::legacy::th::_thnn_glu(self, dim);
}
Tensor & glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim) {
- return at::_thnn_glu_backward_out(grad_input, grad_output, self, dim);
+ return at::legacy::th::_thnn_glu_backward_out(grad_input, grad_output, self, dim);
}
Tensor glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) {
- return at::_thnn_glu_backward(grad_output, self, dim);
+ return at::legacy::th::_thnn_glu_backward(grad_output, self, dim);
}
Tensor & hardtanh_out(Tensor & output, const Tensor & self, Scalar min_val, Scalar max_val) {
- return at::_thnn_hardtanh_out(output, self, min_val, max_val);
+ return at::legacy::th::_thnn_hardtanh_out(output, self, min_val, max_val);
}
Tensor hardtanh(const Tensor & self, Scalar min_val, Scalar max_val) {
- return at::_thnn_hardtanh(self, min_val, max_val);
+ return at::legacy::th::_thnn_hardtanh(self, min_val, max_val);
}
Tensor & hardtanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) {
- return at::_thnn_hardtanh_backward_out(grad_input, grad_output, self, min_val, max_val);
+ return at::legacy::th::_thnn_hardtanh_backward_out(grad_input, grad_output, self, min_val, max_val);
}
Tensor hardtanh_backward(const Tensor & grad_output, const Tensor & self, Scalar min_val, Scalar max_val) {
- return at::_thnn_hardtanh_backward(grad_output, self, min_val, max_val);
+ return at::legacy::th::_thnn_hardtanh_backward(grad_output, self, min_val, max_val);
}
Tensor & hardtanh_(Tensor & self, Scalar min_val, Scalar max_val) {
- return at::_thnn_hardtanh_(self, min_val, max_val);
+ return at::legacy::th::_thnn_hardtanh_(self, min_val, max_val);
}
Tensor & leaky_relu_out(Tensor & output, const Tensor & self, Scalar negative_slope) {
- return at::_thnn_leaky_relu_out(output, self, negative_slope);
+ return at::legacy::th::_thnn_leaky_relu_out(output, self, negative_slope);
}
Tensor leaky_relu(const Tensor & self, Scalar negative_slope) {
- return at::_thnn_leaky_relu(self, negative_slope);
+ return at::legacy::th::_thnn_leaky_relu(self, negative_slope);
}
Tensor & leaky_relu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar negative_slope) {
- return at::_thnn_leaky_relu_backward_out(grad_input, grad_output, self, negative_slope);
+ return at::legacy::th::_thnn_leaky_relu_backward_out(grad_input, grad_output, self, negative_slope);
}
Tensor leaky_relu_backward(const Tensor & grad_output, const Tensor & self, Scalar negative_slope) {
- return at::_thnn_leaky_relu_backward(grad_output, self, negative_slope);
+ return at::legacy::th::_thnn_leaky_relu_backward(grad_output, self, negative_slope);
}
Tensor & leaky_relu_(Tensor & self, Scalar negative_slope) {
- return at::_thnn_leaky_relu_(self, negative_slope);
+ return at::legacy::th::_thnn_leaky_relu_(self, negative_slope);
}
Tensor & log_sigmoid_out(Tensor & output, const Tensor & self) {
Tensor buffer = at::empty({0}, self.options());
- return std::get<0>(at::_thnn_log_sigmoid_forward_out(output, buffer, self));
+ return std::get<0>(at::legacy::th::_thnn_log_sigmoid_forward_out(output, buffer, self));
}
Tensor log_sigmoid(const Tensor & self) {
}
std::tuple<Tensor &,Tensor &> log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self) {
- return at::_thnn_log_sigmoid_forward_out(output, buffer, self);
+ return at::legacy::th::_thnn_log_sigmoid_forward_out(output, buffer, self);
}
std::tuple<Tensor,Tensor> log_sigmoid_forward(const Tensor & self) {
- return at::_thnn_log_sigmoid_forward(self);
+ return at::legacy::th::_thnn_log_sigmoid_forward(self);
}
Tensor & log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer) {
- return at::_thnn_log_sigmoid_backward_out(grad_input, grad_output, self, buffer);
+ return at::legacy::th::_thnn_log_sigmoid_backward_out(grad_input, grad_output, self, buffer);
}
Tensor log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) {
- return at::_thnn_log_sigmoid_backward(grad_output, self, buffer);
+ return at::legacy::th::_thnn_log_sigmoid_backward(grad_output, self, buffer);
}
Tensor & rrelu_with_noise_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) {
- return at::_thnn_rrelu_with_noise_out(output, self, noise, lower, upper, training, generator);
+ return at::legacy::th::_thnn_rrelu_with_noise_out(output, self, noise, lower, upper, training, generator);
}
Tensor rrelu_with_noise(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) {
- return at::_thnn_rrelu_with_noise(self, noise, lower, upper, training, generator);
+ return at::legacy::th::_thnn_rrelu_with_noise(self, noise, lower, upper, training, generator);
}
Tensor & rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) {
- return at::_thnn_rrelu_with_noise_backward_out(grad_input, grad_output, self, noise, lower, upper, training);
+ return at::legacy::th::_thnn_rrelu_with_noise_backward_out(grad_input, grad_output, self, noise, lower, upper, training);
}
Tensor rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) {
- return at::_thnn_rrelu_with_noise_backward(grad_output, self, noise, lower, upper, training);
+ return at::legacy::th::_thnn_rrelu_with_noise_backward(grad_output, self, noise, lower, upper, training);
}
Tensor & rrelu_with_noise_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, Generator * generator) {
- return at::_thnn_rrelu_with_noise_(self, noise, lower, upper, training, generator);
+ return at::legacy::th::_thnn_rrelu_with_noise_(self, noise, lower, upper, training, generator);
}
Tensor & softplus_out(Tensor & output, const Tensor & self, Scalar beta, Scalar threshold) {
- return at::_thnn_softplus_out(output, self, beta, threshold);
+ return at::legacy::th::_thnn_softplus_out(output, self, beta, threshold);
}
Tensor softplus(const Tensor & self, Scalar beta, Scalar threshold) {
- return at::_thnn_softplus(self, beta, threshold);
+ return at::legacy::th::_thnn_softplus(self, beta, threshold);
}
Tensor & softplus_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) {
- return at::_thnn_softplus_backward_out(grad_input, grad_output, self, beta, threshold, output);
+ return at::legacy::th::_thnn_softplus_backward_out(grad_input, grad_output, self, beta, threshold, output);
}
Tensor softplus_backward(const Tensor & grad_output, const Tensor & self, Scalar beta, Scalar threshold, const Tensor & output) {
- return at::_thnn_softplus_backward(grad_output, self, beta, threshold, output);
+ return at::legacy::th::_thnn_softplus_backward(grad_output, self, beta, threshold, output);
}
Tensor & softshrink_out(Tensor & output, const Tensor & self, Scalar lambd) {
- return at::_thnn_softshrink_out(output, self, lambd);
+ return at::legacy::th::_thnn_softshrink_out(output, self, lambd);
}
Tensor softshrink(const Tensor & self, Scalar lambd) {
- return at::_thnn_softshrink(self, lambd);
+ return at::legacy::th::_thnn_softshrink(self, lambd);
}
Tensor & softshrink_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, Scalar lambd) {
- return at::_thnn_softshrink_backward_out(grad_input, grad_output, self, lambd);
+ return at::legacy::th::_thnn_softshrink_backward_out(grad_input, grad_output, self, lambd);
}
Tensor softshrink_backward(const Tensor & grad_output, const Tensor & self, Scalar lambd) {
- return at::_thnn_softshrink_backward(grad_output, self, lambd);
+ return at::legacy::th::_thnn_softshrink_backward(grad_output, self, lambd);
}
Tensor & adaptive_avg_pool2d_out(Tensor & output, const Tensor & self, IntList output_size) {
- return at::_thnn_adaptive_avg_pool2d_out(output, self, output_size);
+ return at::legacy::th::_thnn_adaptive_avg_pool2d_out(output, self, output_size);
}
Tensor adaptive_avg_pool2d(const Tensor & self, IntList output_size) {
- return at::_thnn_adaptive_avg_pool2d(self, output_size);
+ return at::legacy::th::_thnn_adaptive_avg_pool2d(self, output_size);
}
Tensor & adaptive_avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self) {
- return at::_thnn_adaptive_avg_pool2d_backward_out(grad_input, grad_output, self);
+ return at::legacy::th::_thnn_adaptive_avg_pool2d_backward_out(grad_input, grad_output, self);
}
Tensor adaptive_avg_pool2d_backward(const Tensor & grad_output, const Tensor & self) {
- return at::_thnn_adaptive_avg_pool2d_backward(grad_output, self);
+ return at::legacy::th::_thnn_adaptive_avg_pool2d_backward(grad_output, self);
}
Tensor & adaptive_avg_pool3d_out(Tensor & output, const Tensor & self, IntList output_size) {
- return at::_thnn_adaptive_avg_pool3d_out(output, self, output_size);
+ return at::legacy::th::_thnn_adaptive_avg_pool3d_out(output, self, output_size);
}
Tensor adaptive_avg_pool3d(const Tensor & self, IntList output_size) {
- return at::_thnn_adaptive_avg_pool3d(self, output_size);
+ return at::legacy::th::_thnn_adaptive_avg_pool3d(self, output_size);
}
Tensor & adaptive_avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self) {
- return at::_thnn_adaptive_avg_pool3d_backward_out(grad_input, grad_output, self);
+ return at::legacy::th::_thnn_adaptive_avg_pool3d_backward_out(grad_input, grad_output, self);
}
Tensor adaptive_avg_pool3d_backward(const Tensor & grad_output, const Tensor & self) {
- return at::_thnn_adaptive_avg_pool3d_backward(grad_output, self);
+ return at::legacy::th::_thnn_adaptive_avg_pool3d_backward(grad_output, self);
}
std::tuple<Tensor &,Tensor &> adaptive_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntList output_size) {
- return at::_thnn_adaptive_max_pool2d_out(output, indices, self, output_size);
+ return at::legacy::th::_thnn_adaptive_max_pool2d_out(output, indices, self, output_size);
}
std::tuple<Tensor,Tensor> adaptive_max_pool2d(const Tensor & self, IntList output_size) {
- return at::_thnn_adaptive_max_pool2d(self, output_size);
+ return at::legacy::th::_thnn_adaptive_max_pool2d(self, output_size);
}
Tensor & adaptive_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices) {
- return at::_thnn_adaptive_max_pool2d_backward_out(grad_input, grad_output, self, indices);
+ return at::legacy::th::_thnn_adaptive_max_pool2d_backward_out(grad_input, grad_output, self, indices);
}
Tensor adaptive_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) {
- return at::_thnn_adaptive_max_pool2d_backward(grad_output, self, indices);
+ return at::legacy::th::_thnn_adaptive_max_pool2d_backward(grad_output, self, indices);
}
std::tuple<Tensor &,Tensor &> adaptive_max_pool3d_out(Tensor & output, Tensor & indices, const Tensor & self, IntList output_size) {
- return at::_thnn_adaptive_max_pool3d_out(output, indices, self, output_size);
+ return at::legacy::th::_thnn_adaptive_max_pool3d_out(output, indices, self, output_size);
}
std::tuple<Tensor,Tensor> adaptive_max_pool3d(const Tensor & self, IntList output_size) {
- return at::_thnn_adaptive_max_pool3d(self, output_size);
+ return at::legacy::th::_thnn_adaptive_max_pool3d(self, output_size);
}
Tensor & adaptive_max_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices) {
- return at::_thnn_adaptive_max_pool3d_backward_out(grad_input, grad_output, self, indices);
+ return at::legacy::th::_thnn_adaptive_max_pool3d_backward_out(grad_input, grad_output, self, indices);
}
Tensor adaptive_max_pool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices) {
- return at::_thnn_adaptive_max_pool3d_backward(grad_output, self, indices);
+ return at::legacy::th::_thnn_adaptive_max_pool3d_backward(grad_output, self, indices);
}
Tensor & avg_pool2d_out(Tensor & output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
- return at::_thnn_avg_pool2d_out(output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
+ return at::legacy::th::_thnn_avg_pool2d_out(output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
}
Tensor avg_pool2d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
- return at::_thnn_avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
+ return at::legacy::th::_thnn_avg_pool2d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
}
Tensor & avg_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
- return at::_thnn_avg_pool2d_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
+ return at::legacy::th::_thnn_avg_pool2d_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
}
Tensor avg_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
- return at::_thnn_avg_pool2d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
+ return at::legacy::th::_thnn_avg_pool2d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
}
Tensor & avg_pool3d_out(Tensor & output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
- return at::_thnn_avg_pool3d_out(output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
+ return at::legacy::th::_thnn_avg_pool3d_out(output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
}
Tensor avg_pool3d(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
- return at::_thnn_avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
+ return at::legacy::th::_thnn_avg_pool3d(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
}
Tensor & avg_pool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
- return at::_thnn_avg_pool3d_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
+ return at::legacy::th::_thnn_avg_pool3d_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
}
Tensor avg_pool3d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, bool ceil_mode, bool count_include_pad) {
- return at::_thnn_avg_pool3d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
+ return at::legacy::th::_thnn_avg_pool3d_backward(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad);
}
std::tuple<Tensor &,Tensor &> fractional_max_pool2d_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) {
- return at::_thnn_fractional_max_pool2d_out(output, indices, self, kernel_size, output_size, random_samples);
+ return at::legacy::th::_thnn_fractional_max_pool2d_out(output, indices, self, kernel_size, output_size, random_samples);
}
std::tuple<Tensor,Tensor> fractional_max_pool2d(const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & random_samples) {
- return at::_thnn_fractional_max_pool2d(self, kernel_size, output_size, random_samples);
+ return at::legacy::th::_thnn_fractional_max_pool2d(self, kernel_size, output_size, random_samples);
}
Tensor & fractional_max_pool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & indices) {
- return at::_thnn_fractional_max_pool2d_backward_out(grad_input, grad_output, self, kernel_size, output_size, indices);
+ return at::legacy::th::_thnn_fractional_max_pool2d_backward_out(grad_input, grad_output, self, kernel_size, output_size, indices);
}
Tensor fractional_max_pool2d_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList output_size, const Tensor & indices) {
- return at::_thnn_fractional_max_pool2d_backward(grad_output, self, kernel_size, output_size, indices);
+ return at::legacy::th::_thnn_fractional_max_pool2d_backward(grad_output, self, kernel_size, output_size, indices);
}
std::tuple<Tensor &,Tensor &> max_pool2d_with_indices_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) {
- return at::_thnn_max_pool2d_with_indices_out(output, indices, self, kernel_size, stride, padding, dilation, ceil_mode);
+ return at::legacy::th::_thnn_max_pool2d_with_indices_out(output, indices, self, kernel_size, stride, padding, dilation, ceil_mode);
}
std::tuple<Tensor,Tensor> max_pool2d_with_indices(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) {
- return at::_thnn_max_pool2d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode);
+ return at::legacy::th::_thnn_max_pool2d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode);
}
Tensor & max_pool2d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) {
- return at::_thnn_max_pool2d_with_indices_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
+ return at::legacy::th::_thnn_max_pool2d_with_indices_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
}
Tensor max_pool2d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) {
- return at::_thnn_max_pool2d_with_indices_backward(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
+ return at::legacy::th::_thnn_max_pool2d_with_indices_backward(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
}
std::tuple<Tensor &,Tensor &> max_pool3d_with_indices_out(Tensor & output, Tensor & indices, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) {
- return at::_thnn_max_pool3d_with_indices_out(output, indices, self, kernel_size, stride, padding, dilation, ceil_mode);
+ return at::legacy::th::_thnn_max_pool3d_with_indices_out(output, indices, self, kernel_size, stride, padding, dilation, ceil_mode);
}
std::tuple<Tensor,Tensor> max_pool3d_with_indices(const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode) {
- return at::_thnn_max_pool3d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode);
+ return at::legacy::th::_thnn_max_pool3d_with_indices(self, kernel_size, stride, padding, dilation, ceil_mode);
}
Tensor & max_pool3d_with_indices_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) {
- return at::_thnn_max_pool3d_with_indices_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
+ return at::legacy::th::_thnn_max_pool3d_with_indices_backward_out(grad_input, grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
}
Tensor max_pool3d_with_indices_backward(const Tensor & grad_output, const Tensor & self, IntList kernel_size, IntList stride, IntList padding, IntList dilation, bool ceil_mode, const Tensor & indices) {
- return at::_thnn_max_pool3d_with_indices_backward(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
+ return at::legacy::th::_thnn_max_pool3d_with_indices_backward(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
}
Tensor & max_unpool2d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntList output_size) {
- return at::_thnn_max_unpool2d_out(output, self, indices, output_size);
+ return at::legacy::th::_thnn_max_unpool2d_out(output, self, indices, output_size);
}
Tensor max_unpool2d(const Tensor & self, const Tensor & indices, IntList output_size) {
- return at::_thnn_max_unpool2d(self, indices, output_size);
+ return at::legacy::th::_thnn_max_unpool2d(self, indices, output_size);
}
Tensor & max_unpool2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size) {
- return at::_thnn_max_unpool2d_backward_out(grad_input, grad_output, self, indices, output_size);
+ return at::legacy::th::_thnn_max_unpool2d_backward_out(grad_input, grad_output, self, indices, output_size);
}
Tensor max_unpool2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size) {
- return at::_thnn_max_unpool2d_backward(grad_output, self, indices, output_size);
+ return at::legacy::th::_thnn_max_unpool2d_backward(grad_output, self, indices, output_size);
}
Tensor & max_unpool3d_out(Tensor & output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) {
- return at::_thnn_max_unpool3d_out(output, self, indices, output_size, stride, padding);
+ return at::legacy::th::_thnn_max_unpool3d_out(output, self, indices, output_size, stride, padding);
}
Tensor max_unpool3d(const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) {
- return at::_thnn_max_unpool3d(self, indices, output_size, stride, padding);
+ return at::legacy::th::_thnn_max_unpool3d(self, indices, output_size, stride, padding);
}
Tensor & max_unpool3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) {
- return at::_thnn_max_unpool3d_backward_out(grad_input, grad_output, self, indices, output_size, stride, padding);
+ return at::legacy::th::_thnn_max_unpool3d_backward_out(grad_input, grad_output, self, indices, output_size, stride, padding);
}
Tensor max_unpool3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & indices, IntList output_size, IntList stride, IntList padding) {
- return at::_thnn_max_unpool3d_backward(grad_output, self, indices, output_size, stride, padding);
+ return at::legacy::th::_thnn_max_unpool3d_backward(grad_output, self, indices, output_size, stride, padding);
}
Tensor & reflection_pad1d_out(Tensor & output, const Tensor & self, IntList padding) {
- return at::_thnn_reflection_pad1d_out(output, self, padding);
+ return at::legacy::th::_thnn_reflection_pad1d_out(output, self, padding);
}
Tensor reflection_pad1d(const Tensor & self, IntList padding) {
- return at::_thnn_reflection_pad1d(self, padding);
+ return at::legacy::th::_thnn_reflection_pad1d(self, padding);
}
Tensor & reflection_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) {
- return at::_thnn_reflection_pad1d_backward_out(grad_input, grad_output, self, padding);
+ return at::legacy::th::_thnn_reflection_pad1d_backward_out(grad_input, grad_output, self, padding);
}
Tensor reflection_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) {
- return at::_thnn_reflection_pad1d_backward(grad_output, self, padding);
+ return at::legacy::th::_thnn_reflection_pad1d_backward(grad_output, self, padding);
}
Tensor & reflection_pad2d_out(Tensor & output, const Tensor & self, IntList padding) {
- return at::_thnn_reflection_pad2d_out(output, self, padding);
+ return at::legacy::th::_thnn_reflection_pad2d_out(output, self, padding);
}
Tensor reflection_pad2d(const Tensor & self, IntList padding) {
- return at::_thnn_reflection_pad2d(self, padding);
+ return at::legacy::th::_thnn_reflection_pad2d(self, padding);
}
Tensor & reflection_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) {
- return at::_thnn_reflection_pad2d_backward_out(grad_input, grad_output, self, padding);
+ return at::legacy::th::_thnn_reflection_pad2d_backward_out(grad_input, grad_output, self, padding);
}
Tensor reflection_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) {
- return at::_thnn_reflection_pad2d_backward(grad_output, self, padding);
+ return at::legacy::th::_thnn_reflection_pad2d_backward(grad_output, self, padding);
}
Tensor & replication_pad1d_out(Tensor & output, const Tensor & self, IntList padding) {
- return at::_thnn_replication_pad1d_out(output, self, padding);
+ return at::legacy::th::_thnn_replication_pad1d_out(output, self, padding);
}
Tensor replication_pad1d(const Tensor & self, IntList padding) {
- return at::_thnn_replication_pad1d(self, padding);
+ return at::legacy::th::_thnn_replication_pad1d(self, padding);
}
Tensor & replication_pad1d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) {
- return at::_thnn_replication_pad1d_backward_out(grad_input, grad_output, self, padding);
+ return at::legacy::th::_thnn_replication_pad1d_backward_out(grad_input, grad_output, self, padding);
}
Tensor replication_pad1d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) {
- return at::_thnn_replication_pad1d_backward(grad_output, self, padding);
+ return at::legacy::th::_thnn_replication_pad1d_backward(grad_output, self, padding);
}
Tensor & replication_pad2d_out(Tensor & output, const Tensor & self, IntList padding) {
- return at::_thnn_replication_pad2d_out(output, self, padding);
+ return at::legacy::th::_thnn_replication_pad2d_out(output, self, padding);
}
Tensor replication_pad2d(const Tensor & self, IntList padding) {
- return at::_thnn_replication_pad2d(self, padding);
+ return at::legacy::th::_thnn_replication_pad2d(self, padding);
}
Tensor & replication_pad2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) {
- return at::_thnn_replication_pad2d_backward_out(grad_input, grad_output, self, padding);
+ return at::legacy::th::_thnn_replication_pad2d_backward_out(grad_input, grad_output, self, padding);
}
Tensor replication_pad2d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) {
- return at::_thnn_replication_pad2d_backward(grad_output, self, padding);
+ return at::legacy::th::_thnn_replication_pad2d_backward(grad_output, self, padding);
}
Tensor & replication_pad3d_out(Tensor & output, const Tensor & self, IntList padding) {
- return at::_thnn_replication_pad3d_out(output, self, padding);
+ return at::legacy::th::_thnn_replication_pad3d_out(output, self, padding);
}
Tensor replication_pad3d(const Tensor & self, IntList padding) {
- return at::_thnn_replication_pad3d(self, padding);
+ return at::legacy::th::_thnn_replication_pad3d(self, padding);
}
Tensor & replication_pad3d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, IntList padding) {
- return at::_thnn_replication_pad3d_backward_out(grad_input, grad_output, self, padding);
+ return at::legacy::th::_thnn_replication_pad3d_backward_out(grad_input, grad_output, self, padding);
}
Tensor replication_pad3d_backward(const Tensor & grad_output, const Tensor & self, IntList padding) {
- return at::_thnn_replication_pad3d_backward(grad_output, self, padding);
+ return at::legacy::th::_thnn_replication_pad3d_backward(grad_output, self, padding);
}
Tensor & upsample_linear1d_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) {
- return at::_thnn_upsample_linear1d_out(output, self, output_size, align_corners);
+ return at::legacy::th::_thnn_upsample_linear1d_out(output, self, output_size, align_corners);
}
Tensor upsample_linear1d(const Tensor & self, IntList output_size, bool align_corners) {
- return at::_thnn_upsample_linear1d(self, output_size, align_corners);
+ return at::legacy::th::_thnn_upsample_linear1d(self, output_size, align_corners);
}
Tensor & upsample_linear1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) {
- return at::_thnn_upsample_linear1d_backward_out(grad_input, grad_output, output_size, input_size, align_corners);
+ return at::legacy::th::_thnn_upsample_linear1d_backward_out(grad_input, grad_output, output_size, input_size, align_corners);
}
Tensor upsample_linear1d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) {
- return at::_thnn_upsample_linear1d_backward(grad_output, output_size, input_size, align_corners);
+ return at::legacy::th::_thnn_upsample_linear1d_backward(grad_output, output_size, input_size, align_corners);
}
Tensor & upsample_bilinear2d_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) {
- return at::_thnn_upsample_bilinear2d_out(output, self, output_size, align_corners);
+ return at::legacy::th::_thnn_upsample_bilinear2d_out(output, self, output_size, align_corners);
}
Tensor upsample_bilinear2d(const Tensor & self, IntList output_size, bool align_corners) {
- return at::_thnn_upsample_bilinear2d(self, output_size, align_corners);
+ return at::legacy::th::_thnn_upsample_bilinear2d(self, output_size, align_corners);
}
Tensor & upsample_bilinear2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) {
- return at::_thnn_upsample_bilinear2d_backward_out(grad_input, grad_output, output_size, input_size, align_corners);
+ return at::legacy::th::_thnn_upsample_bilinear2d_backward_out(grad_input, grad_output, output_size, input_size, align_corners);
}
Tensor upsample_bilinear2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) {
- return at::_thnn_upsample_bilinear2d_backward(grad_output, output_size, input_size, align_corners);
+ return at::legacy::th::_thnn_upsample_bilinear2d_backward(grad_output, output_size, input_size, align_corners);
}
Tensor & upsample_trilinear3d_out(Tensor & output, const Tensor & self, IntList output_size, bool align_corners) {
- return at::_thnn_upsample_trilinear3d_out(output, self, output_size, align_corners);
+ return at::legacy::th::_thnn_upsample_trilinear3d_out(output, self, output_size, align_corners);
}
Tensor upsample_trilinear3d(const Tensor & self, IntList output_size, bool align_corners) {
- return at::_thnn_upsample_trilinear3d(self, output_size, align_corners);
+ return at::legacy::th::_thnn_upsample_trilinear3d(self, output_size, align_corners);
}
Tensor & upsample_trilinear3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) {
- return at::_thnn_upsample_trilinear3d_backward_out(grad_input, grad_output, output_size, input_size, align_corners);
+ return at::legacy::th::_thnn_upsample_trilinear3d_backward_out(grad_input, grad_output, output_size, input_size, align_corners);
}
Tensor upsample_trilinear3d_backward(const Tensor & grad_output, IntList output_size, IntList input_size, bool align_corners) {
- return at::_thnn_upsample_trilinear3d_backward(grad_output, output_size, input_size, align_corners);
+ return at::legacy::th::_thnn_upsample_trilinear3d_backward(grad_output, output_size, input_size, align_corners);
}
Tensor & upsample_nearest1d_out(Tensor & output, const Tensor & self, IntList output_size) {
- return at::_thnn_upsample_nearest1d_out(output, self, output_size);
+ return at::legacy::th::_thnn_upsample_nearest1d_out(output, self, output_size);
}
Tensor upsample_nearest1d(const Tensor & self, IntList output_size) {
- return at::_thnn_upsample_nearest1d(self, output_size);
+ return at::legacy::th::_thnn_upsample_nearest1d(self, output_size);
}
Tensor & upsample_nearest1d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) {
- return at::_thnn_upsample_nearest1d_backward_out(grad_input, grad_output, output_size, input_size);
+ return at::legacy::th::_thnn_upsample_nearest1d_backward_out(grad_input, grad_output, output_size, input_size);
}
Tensor upsample_nearest1d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) {
- return at::_thnn_upsample_nearest1d_backward(grad_output, output_size, input_size);
+ return at::legacy::th::_thnn_upsample_nearest1d_backward(grad_output, output_size, input_size);
}
Tensor & upsample_nearest2d_out(Tensor & output, const Tensor & self, IntList output_size) {
- return at::_thnn_upsample_nearest2d_out(output, self, output_size);
+ return at::legacy::th::_thnn_upsample_nearest2d_out(output, self, output_size);
}
Tensor upsample_nearest2d(const Tensor & self, IntList output_size) {
- return at::_thnn_upsample_nearest2d(self, output_size);
+ return at::legacy::th::_thnn_upsample_nearest2d(self, output_size);
}
Tensor & upsample_nearest2d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) {
- return at::_thnn_upsample_nearest2d_backward_out(grad_input, grad_output, output_size, input_size);
+ return at::legacy::th::_thnn_upsample_nearest2d_backward_out(grad_input, grad_output, output_size, input_size);
}
Tensor upsample_nearest2d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) {
- return at::_thnn_upsample_nearest2d_backward(grad_output, output_size, input_size);
+ return at::legacy::th::_thnn_upsample_nearest2d_backward(grad_output, output_size, input_size);
}
Tensor & upsample_nearest3d_out(Tensor & output, const Tensor & self, IntList output_size) {
- return at::_thnn_upsample_nearest3d_out(output, self, output_size);
+ return at::legacy::th::_thnn_upsample_nearest3d_out(output, self, output_size);
}
Tensor upsample_nearest3d(const Tensor & self, IntList output_size) {
- return at::_thnn_upsample_nearest3d(self, output_size);
+ return at::legacy::th::_thnn_upsample_nearest3d(self, output_size);
}
Tensor & upsample_nearest3d_backward_out(Tensor & grad_input, const Tensor & grad_output, IntList output_size, IntList input_size) {
- return at::_thnn_upsample_nearest3d_backward_out(grad_input, grad_output, output_size, input_size);
+ return at::legacy::th::_thnn_upsample_nearest3d_backward_out(grad_input, grad_output, output_size, input_size);
}
Tensor upsample_nearest3d_backward(const Tensor & grad_output, IntList output_size, IntList input_size) {
- return at::_thnn_upsample_nearest3d_backward(grad_output, output_size, input_size);
+ return at::legacy::th::_thnn_upsample_nearest3d_backward(grad_output, output_size, input_size);
}
Tensor & sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) {
- return at::_thnn_sigmoid_backward_out(grad_input, grad_output, output);
+ return at::legacy::th::_thnn_sigmoid_backward_out(grad_input, grad_output, output);
}
Tensor sigmoid_backward(const Tensor & grad_output, const Tensor & output) {
- return at::_thnn_sigmoid_backward(grad_output, output);
+ return at::legacy::th::_thnn_sigmoid_backward(grad_output, output);
}
Tensor & tanh_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & output) {
- return at::_thnn_tanh_backward_out(grad_input, grad_output, output);
+ return at::legacy::th::_thnn_tanh_backward_out(grad_input, grad_output, output);
}
Tensor tanh_backward(const Tensor & grad_output, const Tensor & output) {
- return at::_thnn_tanh_backward(grad_output, output);
+ return at::legacy::th::_thnn_tanh_backward(grad_output, output);
}
Tensor & thnn_conv_transpose2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) {
}
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) {
- return at::_thnn_conv_transpose2d_forward_out(output, columns, ones, self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
+ return at::legacy::th::_thnn_conv_transpose2d_forward_out(output, columns, ones, self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
}
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) {
- return at::_thnn_conv_transpose2d_forward(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
+ return at::legacy::th::_thnn_conv_transpose2d_forward(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
}
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & columns, const Tensor & ones) {
- return at::_thnn_conv_transpose2d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones);
+ return at::legacy::th::_thnn_conv_transpose2d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones);
}
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) {
- return at::_thnn_conv_transpose2d_backward(grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones, output_mask);
+ return at::legacy::th::_thnn_conv_transpose2d_backward(grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, columns, ones, output_mask);
}
Tensor & thnn_conv_transpose3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) {
}
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) {
- return at::_thnn_conv_transpose3d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
+ return at::legacy::th::_thnn_conv_transpose3d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
}
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList output_padding, IntList dilation) {
- return at::_thnn_conv_transpose3d_forward(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
+ return at::legacy::th::_thnn_conv_transpose3d_forward(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
}
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_transpose3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & finput, const Tensor & fgrad_input) {
- return at::_thnn_conv_transpose3d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input);
+ return at::legacy::th::_thnn_conv_transpose3d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input);
}
std::tuple<Tensor,Tensor,Tensor> thnn_conv_transpose3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList output_padding, IntList dilation, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) {
- return at::_thnn_conv_transpose3d_backward(grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input, output_mask);
+ return at::legacy::th::_thnn_conv_transpose3d_backward(grad_output, self, weight, kernel_size, stride, padding, output_padding, dilation, finput, fgrad_input, output_mask);
}
Tensor & thnn_conv2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
}
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
- return at::_thnn_conv2d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding);
+ return at::legacy::th::_thnn_conv2d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding);
}
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
- return at::_thnn_conv2d_forward(self, weight, kernel_size, bias, stride, padding);
+ return at::legacy::th::_thnn_conv2d_forward(self, weight, kernel_size, bias, stride, padding);
}
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input) {
- return at::_thnn_conv2d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input);
+ return at::legacy::th::_thnn_conv2d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input);
}
std::tuple<Tensor,Tensor,Tensor> thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) {
- return at::_thnn_conv2d_backward(grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask);
+ return at::legacy::th::_thnn_conv2d_backward(grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask);
}
Tensor & thnn_conv3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
}
Tensor & thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
- return at::_thnn_conv_depthwise2d_forward_out(output, self, weight, kernel_size, bias, stride, padding, dilation);
+ return at::legacy::th::_thnn_conv_depthwise2d_forward_out(output, self, weight, kernel_size, bias, stride, padding, dilation);
}
Tensor thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
- return at::_thnn_conv_depthwise2d_forward(self, weight, kernel_size, bias, stride, padding, dilation);
+ return at::legacy::th::_thnn_conv_depthwise2d_forward(self, weight, kernel_size, bias, stride, padding, dilation);
}
std::tuple<Tensor &,Tensor &> thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation) {
- return at::_thnn_conv_depthwise2d_backward_out(grad_input, grad_weight, grad_output, self, weight, kernel_size, stride, padding, dilation);
+ return at::legacy::th::_thnn_conv_depthwise2d_backward_out(grad_input, grad_weight, grad_output, self, weight, kernel_size, stride, padding, dilation);
}
std::tuple<Tensor,Tensor> thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, std::array<bool,2> output_mask) {
- return at::_thnn_conv_depthwise2d_backward(grad_output, self, weight, kernel_size, stride, padding, dilation, output_mask);
+ return at::legacy::th::_thnn_conv_depthwise2d_backward(grad_output, self, weight, kernel_size, stride, padding, dilation, output_mask);
}
Tensor thnn_conv3d(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
}
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_forward_out(Tensor & output, Tensor & finput, Tensor & fgrad_input, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
- return at::_thnn_conv3d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding);
+ return at::legacy::th::_thnn_conv3d_forward_out(output, finput, fgrad_input, self, weight, kernel_size, bias, stride, padding);
}
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding) {
- return at::_thnn_conv3d_forward(self, weight, kernel_size, bias, stride, padding);
+ return at::legacy::th::_thnn_conv3d_forward(self, weight, kernel_size, bias, stride, padding);
}
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input) {
- return at::_thnn_conv3d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input);
+ return at::legacy::th::_thnn_conv3d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input);
}
std::tuple<Tensor,Tensor,Tensor> thnn_conv3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, const Tensor & finput, const Tensor & fgrad_input, std::array<bool,3> output_mask) {
- return at::_thnn_conv3d_backward(grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask);
+ return at::legacy::th::_thnn_conv3d_backward(grad_output, self, weight, kernel_size, stride, padding, finput, fgrad_input, output_mask);
}
Tensor & thnn_conv_dilated2d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
}
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
- return at::_thnn_conv_dilated2d_forward_out(output, columns, ones, self, weight, kernel_size, bias, stride, padding, dilation);
+ return at::legacy::th::_thnn_conv_dilated2d_forward_out(output, columns, ones, self, weight, kernel_size, bias, stride, padding, dilation);
}
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
- return at::_thnn_conv_dilated2d_forward(self, weight, kernel_size, bias, stride, padding, dilation);
+ return at::legacy::th::_thnn_conv_dilated2d_forward(self, weight, kernel_size, bias, stride, padding, dilation);
}
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones) {
- return at::_thnn_conv_dilated2d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones);
+ return at::legacy::th::_thnn_conv_dilated2d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones);
}
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) {
- return at::_thnn_conv_dilated2d_backward(grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones, output_mask);
+ return at::legacy::th::_thnn_conv_dilated2d_backward(grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones, output_mask);
}
Tensor & thnn_conv_dilated3d_out(Tensor & output, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
}
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated3d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
- return at::_thnn_conv_dilated3d_forward_out(output, columns, ones, self, weight, kernel_size, bias, stride, padding, dilation);
+ return at::legacy::th::_thnn_conv_dilated3d_forward_out(output, columns, ones, self, weight, kernel_size, bias, stride, padding, dilation);
}
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated3d_forward(const Tensor & self, const Tensor & weight, IntList kernel_size, const Tensor & bias, IntList stride, IntList padding, IntList dilation) {
- return at::_thnn_conv_dilated3d_forward(self, weight, kernel_size, bias, stride, padding, dilation);
+ return at::legacy::th::_thnn_conv_dilated3d_forward(self, weight, kernel_size, bias, stride, padding, dilation);
}
std::tuple<Tensor &,Tensor &,Tensor &> thnn_conv_dilated3d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones) {
- return at::_thnn_conv_dilated3d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones);
+ return at::legacy::th::_thnn_conv_dilated3d_backward_out(grad_input, grad_weight, grad_bias, grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones);
}
std::tuple<Tensor,Tensor,Tensor> thnn_conv_dilated3d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntList kernel_size, IntList stride, IntList padding, IntList dilation, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) {
- return at::_thnn_conv_dilated3d_backward(grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones, output_mask);
+ return at::legacy::th::_thnn_conv_dilated3d_backward(grad_output, self, weight, kernel_size, stride, padding, dilation, columns, ones, output_mask);
}
}} // namespace at::native
#include <ATen/ExpandUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
+#include <ATen/LegacyTHFunctions.h>
#include <ATen/native/LinearAlgebraUtils.h>
#include <ATen/TensorUtils.h>
#include <ATen/Parallel.h>
Tensor ger(const Tensor& self, const Tensor& vec2) {
check_1d(self, "self", "ger");
check_1d(vec2, "vec2", "ger");
- return at::_th_ger(self, vec2);
+ return at::legacy::th::_th_ger(self, vec2);
}
Tensor& ger_out(Tensor& result, const Tensor& self, const Tensor& vec2) {
check_1d(self, "self", "ger");
check_1d(vec2, "vec2", "ger");
- return at::_th_ger_out(result, self, vec2);
+ return at::legacy::th::_th_ger_out(result, self, vec2);
}
Tensor mm(const Tensor& self, const Tensor& mat2) {
if (self.is_sparse()) {
return mat2.type().addmm(at::zeros({}, mat2.type()), self, mat2, 0, 1);
}
- return at::_th_mm(self, mat2);
+ return at::legacy::th::_th_mm(self, mat2);
}
Tensor& mm_out(Tensor& result, const Tensor& self, const Tensor& mat2) {
if (self.is_sparse()) {
return at::addmm_out(result, at::zeros({}, mat2.options()), self, mat2, 0, 1);
}
- return at::_th_mm_out(result, self, mat2);
+ return at::legacy::th::_th_mm_out(result, self, mat2);
}
Tensor mv(const Tensor& self, const Tensor& vec) {
check_1d(vec, "vec", "mv");
- return at::_th_mv(self, vec);
+ return at::legacy::th::_th_mv(self, vec);
}
Tensor& mv_out(Tensor& result, const Tensor& self, const Tensor& vec) {
check_1d(vec, "vec", "mv");
- return at::_th_mv_out(result, self, vec);
+ return at::legacy::th::_th_mv_out(result, self, vec);
}
Tensor addmv(const Tensor& self, const Tensor& mat, const Tensor& vec, Scalar beta, Scalar alpha) {
check_1d(vec, "vec", "addmv");
- return at::_th_addmv(self, mat, vec, beta, alpha);
+ return at::legacy::th::_th_addmv(self, mat, vec, beta, alpha);
}
Tensor& addmv_(Tensor& self, const Tensor& mat, const Tensor& vec, Scalar beta, Scalar alpha) {
check_1d(vec, "vec", "addmv");
- return at::_th_addmv_(self, mat, vec, beta, alpha);
+ return at::legacy::th::_th_addmv_(self, mat, vec, beta, alpha);
}
Tensor& addmv_out(Tensor &result, const Tensor& self, const Tensor& mat, const Tensor& vec, Scalar beta, Scalar alpha) {
check_1d(vec, "vec", "addmv");
- return at::_th_addmv_out(result, self, mat, vec, beta, alpha);
+ return at::legacy::th::_th_addmv_out(result, self, mat, vec, beta, alpha);
}
Tensor addr(const Tensor& self, const Tensor& vec1, const Tensor& vec2, Scalar beta, Scalar alpha) {
check_1d(vec1, "vec1", "addr");
check_1d(vec2, "vec2", "addr");
- return at::_th_addr(self, vec1, vec2, beta, alpha);
+ return at::legacy::th::_th_addr(self, vec1, vec2, beta, alpha);
}
Tensor& addr_(Tensor& self, const Tensor& vec1, const Tensor& vec2, Scalar beta, Scalar alpha) {
check_1d(vec1, "vec1", "addr");
check_1d(vec2, "vec2", "addr");
- return at::_th_addr_(self, vec1, vec2, beta, alpha);
+ return at::legacy::th::_th_addr_(self, vec1, vec2, beta, alpha);
}
Tensor& addr_out(Tensor &result, const Tensor& self, const Tensor& vec1, const Tensor& vec2, Scalar beta, Scalar alpha) {
check_1d(vec1, "vec1", "addr");
check_1d(vec2, "vec2", "addr");
- return at::_th_addr_out(result, self, vec1, vec2, beta, alpha);
+ return at::legacy::th::_th_addr_out(result, self, vec1, vec2, beta, alpha);
}
template <typename scalar_t, bool is_bmm>
Tensor dot(const Tensor& self, const Tensor& tensor) {
check_1d(self, "self", "dot");
check_1d(tensor, "tensor", "dot");
- return at::_th_dot(self, tensor);
+ return at::legacy::th::_th_dot(self, tensor);
}
Tensor& dot_out(Tensor& result, const Tensor& self, const Tensor& tensor) {
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
+#include <ATen/LegacyTHFunctions.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/WrapDimUtilsMulti.h>
#include <ATen/native/ReduceOpsUtils.h>
}
static inline Tensor cumsum(const Tensor& self, int64_t dim, optional<ScalarType> dtype) {
- return at::_th_cumsum(integer_upcast(self, dtype), dim);
+ return at::legacy::th::_th_cumsum(integer_upcast(self, dtype), dim);
}
Tensor cumsum(const Tensor& self, int64_t dim, ScalarType dtype) {
" and ",
toString(dtype.value()),
".");
- return at::_th_cumsum_out(result, self.toType(result.type().scalarType()), dim);
+ return at::legacy::th::_th_cumsum_out(result, self.toType(result.type().scalarType()), dim);
}
Tensor& cumsum_out(Tensor& result, const Tensor& self, int64_t dim, ScalarType dtype) {
}
static inline Tensor cumprod(const Tensor& self, int64_t dim, optional<ScalarType> dtype) {
- return at::_th_cumprod(integer_upcast(self, dtype), dim);
+ return at::legacy::th::_th_cumprod(integer_upcast(self, dtype), dim);
}
Tensor cumprod(const Tensor& self, int64_t dim, ScalarType dtype) {
" and ",
toString(dtype.value()),
".");
- return at::_th_cumprod_out(result, self.toType(result.type().scalarType()), dim);
+ return at::legacy::th::_th_cumprod_out(result, self.toType(result.type().scalarType()), dim);
}
Tensor& cumprod_out(Tensor& result, const Tensor& self, int64_t dim, ScalarType dtype) {
}
return result;
} else {
- return at::_th_norm_out(result, self, p, dim, keepdim);
+ return at::legacy::th::_th_norm_out(result, self, p, dim, keepdim);
}
}
return result;
} else {
if (self.is_cuda()) {
- return at::_th_norm_out(result, self, p, dim, keepdim);
+ return at::legacy::th::_th_norm_out(result, self, p, dim, keepdim);
} else {
return _norm_out_cpu(result, self, p, dim, keepdim);
}
"norm only supports CPU AND CUDA backend, got: ", toString(self.type().backend()));
AT_CHECK(at::isFloatingType(self.type().scalarType()), "norm only supports floating-point dtypes");
if (self.is_cuda()) {
- return at::_th_norm(self, p);
+ return at::legacy::th::_th_norm(self, p);
} else {
if (self.is_contiguous()) {
Tensor result = CPU(kFloat).scalarTensor(0).toType(self.type());
norm_kernel(kCPU, result, self, p, c10::nullopt);
return result;
} else {
- return at::_th_norm(self, p);
+ return at::legacy::th::_th_norm(self, p);
}
}
}
if (_dimreduce_return_trivial(result, self, 1, dim, keepdim)) {
return result;
} else {
- return at::_th_all_out(result, self, dim, keepdim);
+ return at::legacy::th::_th_all_out(result, self, dim, keepdim);
}
}
if (_dimreduce_return_trivial(result, self, 0, dim, keepdim)) {
return result;
} else {
- return at::_th_any_out(result, self, dim, keepdim);
+ return at::legacy::th::_th_any_out(result, self, dim, keepdim);
}
}
"var only supports CPU AND CUDA backend, got: ", toString(self.type().backend()));
AT_CHECK(at::isFloatingType(self.type().scalarType()), "var only supports floating-point dtypes");
auto trivial_return = _allreduce_return_trivial(self, std::numeric_limits<double>::quiet_NaN());
- return trivial_return.has_value() ? trivial_return.value() : at::_th_var(self, unbiased);
+ return trivial_return.has_value() ? trivial_return.value() : at::legacy::th::_th_var(self, unbiased);
}
Tensor var(const Tensor& self, int64_t dim, bool unbiased, bool keepdim) {
if (_dimreduce_return_trivial(result, self, std::numeric_limits<double>::quiet_NaN(), dim, keepdim)) {
return result;
} else {
- return at::_th_var_out(result, self, dim, unbiased, keepdim);
+ return at::legacy::th::_th_var_out(result, self, dim, unbiased, keepdim);
}
}
"std only supports CPU AND CUDA backend, got: ", toString(self.type().backend()));
AT_CHECK(at::isFloatingType(self.type().scalarType()), "std only supports floating-point dtypes");
auto trivial_return = _allreduce_return_trivial(self, std::numeric_limits<double>::quiet_NaN());
- return trivial_return.has_value() ? trivial_return.value() : at::_th_std(self, unbiased);
+ return trivial_return.has_value() ? trivial_return.value() : at::legacy::th::_th_std(self, unbiased);
}
Tensor std(const Tensor& self, IntList dim, bool unbiased, bool keepdim) {
if (_dimreduce_return_trivial(result, self, std::numeric_limits<double>::quiet_NaN(), one_dim, keepdim)) {
return result;
} else {
- return at::_th_std_out(result, self, one_dim, unbiased, keepdim);
+ return at::legacy::th::_th_std_out(result, self, one_dim, unbiased, keepdim);
}
}
ScalarType dtype = get_dtype(result, self, {}, true);
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
+#include <ATen/LegacyTHFunctions.h>
#include <ATen/native/ReduceOpsUtils.h>
#include <c10/util/Exception.h>
#include <ATen/native/cpu/TensorCompareKernel.h>
indices.resize_({}).fill_(0);
return std::forward_as_tuple(values, indices);
} else {
- return at::_th_kthvalue_out(values, indices, self, k, dim, keepdim);
+ return at::legacy::th::_th_kthvalue_out(values, indices, self, k, dim, keepdim);
}
}
indices.resize_({}).fill_(0);
return std::forward_as_tuple(values, indices);
} else {
- return at::_th_median_out(values, indices, self, dim, keepdim);
+ return at::legacy::th::_th_median_out(values, indices, self, dim, keepdim);
}
}
indices.resize_({}).fill_(0);
return std::forward_as_tuple(values, indices);
} else {
- return at::_th_mode_out(values, indices, self, dim, keepdim);
+ return at::legacy::th::_th_mode_out(values, indices, self, dim, keepdim);
}
}
}
return std::tuple<Tensor &,Tensor &>{max, max_indices};
}
- return at::_th_max_out(max, max_indices, self, dim, keepdim);
+ return at::legacy::th::_th_max_out(max, max_indices, self, dim, keepdim);
}
std::tuple<Tensor, Tensor> max(const Tensor& self, int64_t dim, bool keepdim) {
return std::forward_as_tuple(max, max_indices);
} else {
if (self.is_cuda()) {
- return at::_th_max_out(max, max_indices, self, dim, keepdim);
+ return at::legacy::th::_th_max_out(max, max_indices, self, dim, keepdim);
} else {
return _max_out_cpu(max, max_indices, self, dim, keepdim);
}
}
return std::tuple<Tensor &,Tensor &>{min, min_indices};
}
- return at::_th_min_out(min, min_indices, self, dim, keepdim);
+ return at::legacy::th::_th_min_out(min, min_indices, self, dim, keepdim);
}
std::tuple<Tensor, Tensor> min(const Tensor& self, int64_t dim, bool keepdim) {
return std::forward_as_tuple(min, min_indices);
} else {
if (self.is_cuda()) {
- return at::_th_min_out(min, min_indices, self, dim, keepdim);
+ return at::legacy::th::_th_min_out(min, min_indices, self, dim, keepdim);
} else {
return _min_out_cpu(min, min_indices, self, dim, keepdim);
}
#include <ATen/CheckGenerator.h>
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
+#include <ATen/LegacyTHFunctions.h>
+#include <ATen/LegacyTHDispatcher.h>
#include <c10/core/ScalarType.h>
#include <ATen/core/Deprecated.h>
#include <c10/core/TensorOptions.h>
window_length);
}
+// FIXME: point to LegacyTHDispatcher.
const TypeExtendedInterface& getFactoryType(const TensorOptions& options) {
return at::getType(options);
}
}
Tensor& arange_out(Tensor& result, Scalar start, Scalar end, Scalar step) {
- return at::_th_arange_out(result, start, end, step);
+ return at::legacy::th::_th_arange_out(result, start, end, step);
}
Tensor arange(Scalar end, const TensorOptions& options) {
}
Tensor& arange_out(Tensor& result, Scalar end) {
- return at::_th_arange_out(result, end);
+ return at::legacy::th::_th_arange_out(result, end);
}
Tensor _dim_arange(const Tensor& like, int64_t dim) {
- return at::getType(like.options().dtype(at::kLong))._th_arange(like.size(dim));
+ return getFactoryType(like.options().dtype(at::kLong))._th_arange(like.size(dim));
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ empty ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
}
Tensor& linspace_out(Tensor& result, Scalar start, Scalar end, int64_t steps) {
- return at::_th_linspace_out(result, start, end, steps);
+ return at::legacy::th::_th_linspace_out(result, start, end, steps);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ logspace ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
}
Tensor& logspace_out(Tensor& result, Scalar start, Scalar end, int64_t steps) {
- return at::_th_logspace_out(result, start, end, steps);
+ return at::legacy::th::_th_logspace_out(result, start, end, steps);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ones ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
}
Tensor& range_out(Tensor& result, Scalar start, Scalar end, Scalar step) {
- return at::_th_range_out(result, start, end, step);
+ return at::legacy::th::_th_range_out(result, start, end, step);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ zeros ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#include <ATen/ExpandUtils.h>
#include <ATen/InferSize.h>
#include <ATen/NativeFunctions.h>
+#include <ATen/LegacyTHFunctions.h>
#include <ATen/WrapDimUtils.h>
#include <c10/util/Exception.h>
#include <c10/util/Optional.h>
Tensor & cat_out(Tensor & result, TensorList tensors, int64_t dim) {
check_cat_no_zero_dim(tensors);
dim = legacy_cat_wrap_dim(dim, tensors);
- return at::_th_cat_out(result, tensors, dim);
+ return at::legacy::th::_th_cat_out(result, tensors, dim);
}
static bool sizes_match_except(IntList s1, IntList s2, int64_t dim_except /* should already be wrapped */) {
}
check_cat_no_zero_dim(tensors);
dim = legacy_cat_wrap_dim(dim, tensors);
- return at::_th_cat(tensors, dim);
+ return at::legacy::th::_th_cat(tensors, dim);
}
std::vector<Tensor> chunk(const Tensor& self, int64_t chunks, int64_t dim) {
#include <ATen/Dispatch.h>
#include <ATen/ExpandUtils.h>
#include <ATen/NativeFunctions.h>
+#include <ATen/LegacyTHFunctions.h>
#include <ATen/WrapDimUtils.h>
#include <ATen/CPUApplyUtils.h>
optional<Scalar> min,
optional<Scalar> max) {
if (min && max) {
- _th_clamp_out(result, self, *min, *max);
+ legacy::th::_th_clamp_out(result, self, *min, *max);
} else if (max) {
- _th_clamp_max_out(result, self, *max);
+ legacy::th::_th_clamp_max_out(result, self, *max);
} else if (min) {
- _th_clamp_min_out(result, self, *min);
+ legacy::th::_th_clamp_min_out(result, self, *min);
} else {
AT_ERROR("At least one of 'min' or 'max' must not be None");
}
}
Tensor& _clamp_max__cpu(Tensor& self, Scalar max) {
- return _th_clamp_max_out(self, self, max);
+ return legacy::th::_th_clamp_max_out(self, self, max);
}
Tensor& _clamp_max_out_cpu(Tensor& result, const Tensor& self, Scalar max) {
- return _th_clamp_max_out(result, self, max);
+ return legacy::th::_th_clamp_max_out(result, self, max);
}
Tensor& _clamp_min__cpu(Tensor& self, Scalar min) {
- return _th_clamp_min_out(self, self, min);
+ return legacy::th::_th_clamp_min_out(self, self, min);
}
Tensor& _clamp_min_out_cpu(Tensor& result, const Tensor& self, Scalar min) {
- return _th_clamp_min_out(result, self, min);
+ return legacy::th::_th_clamp_min_out(result, self, min);
}
Tensor& fill_(Tensor& self, Scalar value) {
- return at::_th_fill_(self, value);
+ return at::legacy::th::_th_fill_(self, value);
}
Tensor& fill_(Tensor& self, const Tensor& value) {
- return at::_th_fill_(self, value);
+ return at::legacy::th::_th_fill_(self, value);
}
Tensor mvlgamma(const Tensor& self, int64_t p) {
} \
Tensor& _##op##_out_cpu(Tensor& result, const Tensor& self) { \
result.resize_(self.sizes()); \
- return at::_th_##op##_out(result, self); \
+ return at::legacy::th::_th_##op##_out(result, self); \
}
// NB: Temp. defaulting to TH implementation of abs due to issues with Apple
--- /dev/null
+#include "ATen/LegacyTHDispatcher.h"
+
+// ${generated_comment}
+
+namespace at {
+
+// template: legacy_type_method_definitions
+
+}
#pragma once
+// ${generated_comment}
+
#include <c10/core/TensorTypeIdRegistration.h>
namespace at {
struct CAFFE2_API LegacyTHDispatcher {
- explicit LegacyTHDispatcher(TensorTypeId type_id, bool is_undefined)
+ explicit LegacyTHDispatcher(TensorTypeId type_id)
: type_id_(type_id) {}
virtual ~LegacyTHDispatcher() {}
--- /dev/null
+#include "ATen/${Dispatcher}.h"
+
+// ${generated_comment}
+
+namespace at {
+
+${Dispatcher}::${Dispatcher}()
+ : LegacyTHDispatcher(${Backend}TensorId()) {}
+
+}
--- /dev/null
+#pragma once
+
+// ${generated_comment}
+
+#include "ATen/LegacyTHDispatcher.h"
+
+namespace at {
+
+struct ${Dispatcher} final : public LegacyTHDispatcher {
+ explicit ${Dispatcher}();
+
+};
+
+} // namespace at
--- /dev/null
+#pragma once
+
+// ${generated_comment}
+
+namespace at {
+namespace legacy {
+namespace th {
+
+namespace detail {
+
+static inline LegacyTHDispatcher & infer_dispatcher(const Tensor & t) {
+ AT_CHECK(t.defined(), "undefined Tensor");
+ return getLegacyTHDispatcher(t);
+}
+static inline LegacyTHDispatcher & infer_dispatcher(const TensorList & tl) {
+ AT_CHECK(tl.size() > 0, "expected a non-empty list of Tensors");
+ return getLegacyTHDispatcher(tl[0]);
+}
+
+} // namespace detail
+
+// function definitions are all static inline because
+// they are one-line statically dispatched functions that
+// invoke the actual dynamic dispatch on the correct argument
+
+}
+}
+}
+
+// FIXME: this is temporary until we start generating into at::legacy::th
+
+#include <ATen/Functions.h>
+
+namespace at {
+namespace legacy {
+namespace th {
+ using namespace at;
+}
+}
+}