Hook up non_differentiability in derivatives.yaml when no autograd function is genera...
authorGregory Chanan <gchanan@fb.com>
Sun, 21 Apr 2019 20:43:02 +0000 (13:43 -0700)
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>
Sun, 21 Apr 2019 20:48:55 +0000 (13:48 -0700)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/19520
ghimport-source-id: a1272aa0b23692fb189974c4daba7b2e4e0dad50

Differential Revision: D15021380

Pulled By: gchanan

fbshipit-source-id: ec83efd4bb6d17714c060f13a0527a33a10452db

aten/src/ATen/native/native_functions.yaml
tools/autograd/derivatives.yaml
tools/autograd/gen_variable_type.py
tools/autograd/load_derivatives.py

index 428982c..eefd661 100644 (file)
@@ -41,8 +41,7 @@
   dispatch:
     CUDA: _cudnn_rnn
 
-- func: _cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, BoolTensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
-  matches_jit_signature: False
+- func: _cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[])
   dispatch:
     CUDA: _cudnn_rnn_backward
 
index 159a75d..301d933 100644 (file)
   output_differentiability: [True, True, True, False, False]
   input, hx, cx, weight: "_cudnn_rnn_backward(input, weight, weight_stride0, result4, hx, cx, result0, grads[0], grads[1], grads[2], mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, retain_variables ? result3.clone() : result3, grad_input_mask)"
 
+- name: _cudnn_rnn_backward(Tensor input, TensorList weight, int64_t weight_stride0, Tensor weight_buf, Tensor hx, Tensor cx, Tensor output, Tensor grad_output, Tensor grad_hy, Tensor grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, IntArrayRef batch_sizes, Tensor dropout_state, Tensor reserve, std::array<bool,4> output_mask)
+  dropout_state: non_differentiable
+
 # miopen
 
 - name: miopen_convolution_transpose(Tensor self, Tensor weight, Tensor bias, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef stride, IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic)
index 5e5b412..45e4ffc 100644 (file)
@@ -491,6 +491,8 @@ def emit_body(declaration):
             # assert name.startswith('_th_'), \
             # "IndexTensor and BoolTensor are restricted to legacy _th_ functions only.
             return False
+        if arg['name'] in declaration.get('non_differentiable_arg_names', []):
+            return False
         return True
 
     def find_args_with_derivatives(differentiable_inputs):
index 70bbb3f..63a77ff 100644 (file)
@@ -383,5 +383,5 @@ def match_declarations_with_differentiability_info(declarations, differentiabili
     for declaration in declarations:
         info = find_info(declaration)
         declaration['derivative'] = info['autograd_fn'] if info else None
-        declaration['non_differentiable_arg_names'] = info['non_differentiable_arg_names'] if info else None
+        declaration['non_differentiable_arg_names'] = info['non_differentiable_arg_names'] if info else []
         declaration['output_differentiability'] = info['output_differentiability'] if info else None