void checkBackend(CheckedFrom c, const Tensor& t, Backend backend) {
AT_CHECK(
- t.type().backend() == backend,
+ !t.defined() || t.type().backend() == backend,
"Expected tensor to have ", toString(backend),
" Backend, but got tensor with ", toString(t.type().backend()), " Backend ",
"(while checking arguments for ", c, ")");
std::tuple<Tensor, Tensor, Tensor> batch_norm_cpu(const Tensor& self, const Tensor& weight, const Tensor& bias,
const Tensor& running_mean, const Tensor& running_var,
bool train, double momentum, double eps) {
+ checkBackend("batch_norm_cpu", {self, weight, bias, running_mean, running_var}, Backend::CPU);
+
return AT_DISPATCH_FLOATING_TYPES(self.type(), "batch_norm", [&] {
if (!train) {
return batch_norm_cpu_transform_input_template<scalar_t>(self, weight, bias, {}, {}, running_mean, running_var, train, eps);
const Tensor& running_mean_, const Tensor& running_var_,
bool train, double momentum, double epsilon) {
+ TensorArg input_arg{ input_, "input", 1 },
+ weight_arg{ weight_, "weight", 2 },
+ bias_arg{ bias_, "bias", 3 },
+ run_mean_arg{ running_mean_, "running_mean", 4 },
+ run_var_arg{ running_var_, "running_var", 5 };
+ CheckedFrom c = "batch_norm_cuda";
+ checkAllSameGPU(c, {input_arg, weight_arg, bias_arg, run_mean_arg, run_var_arg});
+
using accscalar_t = at::acc_type<scalar_t, true>;
int64_t n_input = input_.size(1);
Tensor save_mean_;