Add backend checks for batch norm (#15955)
authorvishwakftw <cs15btech11043@iith.ac.in>
Fri, 11 Jan 2019 19:19:56 +0000 (11:19 -0800)
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>
Fri, 11 Jan 2019 19:28:45 +0000 (11:28 -0800)
Summary:
Fixes #15826

Changelog:
- Add backend checks in `batch_norm_cpu` and `batch_norm_cuda`
- Modify check in `checkBackend` to pass on undefined tensors.

Differential Revision: D13636410

Pulled By: soumith

fbshipit-source-id: 3b1cfe5ca8b7c0346569077163503065e75c2659

aten/src/ATen/TensorUtils.cpp
aten/src/ATen/native/Normalization.cpp
aten/src/ATen/native/cuda/Normalization.cuh

index c1ee209..d8bbd26 100644 (file)
@@ -196,7 +196,7 @@ void checkAllDefined(CheckedFrom c, ArrayRef<TensorArg> ts) {
 
 void checkBackend(CheckedFrom c, const Tensor& t, Backend backend) {
   AT_CHECK(
-    t.type().backend() == backend,
+    !t.defined() || t.type().backend() == backend,
     "Expected tensor to have ", toString(backend),
     " Backend, but got tensor with ", toString(t.type().backend()), " Backend ",
     "(while checking arguments for ", c, ")");
index 85f517f..f5f4cfa 100644 (file)
@@ -461,6 +461,8 @@ std::tuple<Tensor, Tensor> batch_norm_update_stats_cpu(
 std::tuple<Tensor, Tensor, Tensor> batch_norm_cpu(const Tensor& self, const Tensor& weight, const Tensor& bias,
                                                   const Tensor& running_mean, const Tensor& running_var,
                                                   bool train, double momentum, double eps) {
+  checkBackend("batch_norm_cpu", {self, weight, bias, running_mean, running_var}, Backend::CPU);
+
   return AT_DISPATCH_FLOATING_TYPES(self.type(), "batch_norm", [&] {
       if (!train) {
         return batch_norm_cpu_transform_input_template<scalar_t>(self, weight, bias, {}, {}, running_mean, running_var, train, eps);
index bf3e9a5..0a08129 100644 (file)
@@ -395,6 +395,14 @@ std::tuple<Tensor, Tensor, Tensor> batch_norm_cuda_template(const Tensor& input_
                                                             const Tensor& running_mean_, const Tensor& running_var_,
                                                             bool train, double momentum, double epsilon) {
 
+  TensorArg input_arg{ input_, "input", 1 },
+            weight_arg{ weight_, "weight", 2 },
+            bias_arg{ bias_, "bias", 3 },
+            run_mean_arg{ running_mean_, "running_mean", 4 },
+            run_var_arg{ running_var_, "running_var", 5 };
+  CheckedFrom c = "batch_norm_cuda";
+  checkAllSameGPU(c, {input_arg, weight_arg, bias_arg, run_mean_arg, run_var_arg});
+
   using accscalar_t = at::acc_type<scalar_t, true>;
   int64_t n_input = input_.size(1);
   Tensor save_mean_;