- func: pow(Scalar self, Tensor exponent) -> Tensor
matches_jit_signature: True
-- func: normal(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) output) -> Tensor(a!)
+- func: normal(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
- func: normal(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor
matches_jit_signature: True
-- func: normal(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) output) -> Tensor(a!)
+- func: normal(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
- func: normal(float mean, Tensor std, *, Generator? generator=None) -> Tensor
matches_jit_signature: True
-- func: normal(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) output) -> Tensor(a!)
+- func: normal(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
- func: normal(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor
matches_jit_signature: True
matches_jit_signature: True
variants: method, function
-- func: _dirichlet_grad(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) output) -> Tensor(a!)
+- func: _dirichlet_grad(Tensor x, Tensor alpha, Tensor total, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
- func: _dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor
matches_jit_signature: True
## NN wrappers
-- func: binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) output) -> Tensor(a!)
+- func: binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor
python_module: nn
- func: binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor weight, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor weight, int reduction) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: mse_loss(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) output) -> Tensor(a!)
+- func: mse_loss(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
python_module: nn
- func: mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: l1_loss(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) output) -> Tensor(a!)
+- func: l1_loss(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
python_module: nn
- func: l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) output) -> Tensor(a!)
+- func: multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor
python_module: nn
- func: multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor weight, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor weight, int reduction) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) output) -> Tensor(a!)
+- func: multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
python_module: nn
- func: multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) output) -> Tensor(a!)
+- func: nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor
python_module: nn
- func: nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) output) -> Tensor(a!)
+- func: nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor
python_module: nn
- func: nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) output) -> Tensor(a!)
+- func: smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
python_module: nn
- func: smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: soft_margin_loss(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) output) -> Tensor(a!)
+- func: soft_margin_loss(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor
python_module: nn
- func: soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) output) -> Tensor(a!)
+- func: elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor
python_module: nn
- func: elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: glu(Tensor self, int dim=-1, *, Tensor(a!) output) -> Tensor(a!)
+- func: glu(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: glu(Tensor self, int dim=-1) -> Tensor
python_module: nn
- func: glu_backward(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) output) -> Tensor(a!)
+- func: hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor
python_module: nn
- func: hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: leaky_relu(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) output) -> Tensor(a!)
+- func: leaky_relu(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor
python_module: nn
- func: leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: log_sigmoid(Tensor self, *, Tensor(a!) output) -> Tensor(a!)
+- func: log_sigmoid(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: log_sigmoid(Tensor self) -> Tensor
python_module: nn
- func: log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) output) -> Tensor(a!)
+- func: rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)
matches_jit_signature: False # TODO: The default value of upper and some Caffe2 builds will trigger the assert.
python_module: nn
python_module: nn
- func: rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training) -> Tensor
matches_jit_signature: False # TODO: The default value of upper and some Caffe2 builds will trigger the assert.
python_module: nn
-- func: softplus(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) output) -> Tensor(a!)
+- func: softplus(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor
python_module: nn
- func: softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: softshrink(Tensor self, Scalar lambd=0.5, *, Tensor(a!) output) -> Tensor(a!)
+- func: softshrink(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: softshrink(Tensor self, Scalar lambd=0.5) -> Tensor
python_module: nn
- func: softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: adaptive_avg_pool2d(Tensor self, int[2] output_size, *, Tensor(a!) output) -> Tensor(a!)
+- func: adaptive_avg_pool2d(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
dispatch:
CPU: adaptive_avg_pool2d_out_cpu
CPU: adaptive_avg_pool2d_backward_cpu
CUDA: adaptive_avg_pool2d_backward_cuda
-- func: adaptive_avg_pool3d(Tensor self, int[3] output_size, *, Tensor(a!) output) -> Tensor(a!)
+- func: adaptive_avg_pool3d(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor
python_module: nn
- func: adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor
python_module: nn
- func: adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
python_module: nn
- func: adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, *, Tensor(a!) output) -> Tensor(a!)
+- func: avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor
python_module: nn
- func: avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, *, Tensor(a!) output) -> Tensor(a!)
+- func: avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor
python_module: nn
- func: avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad) -> Tensor
CUDA: fractional_max_pool2d_cuda
- func: fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
dispatch:
CPU: fractional_max_pool2d_backward_out_cpu
CUDA: fractional_max_pool3d_cuda
- func: fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
dispatch:
CPU: fractional_max_pool3d_backward_out_cpu
python_module: nn
- func: max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor
python_module: nn
- func: max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: max_unpool2d(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) output) -> Tensor(a!)
+- func: max_unpool2d(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor
python_module: nn
- func: max_unpool2d_backward(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: max_unpool2d_backward(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) output) -> Tensor(a!)
+- func: max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor
python_module: nn
- func: max_unpool3d_backward(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: max_unpool3d_backward(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: reflection_pad1d(Tensor self, int[2] padding, *, Tensor(a!) output) -> Tensor(a!)
+- func: reflection_pad1d(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
dispatch:
CPU: reflection_pad1d_out_cpu
CUDA: reflection_pad1d_cuda
- func: reflection_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
dispatch:
CPU: reflection_pad1d_backward_out_cpu
CPU: reflection_pad1d_backward_cpu
CUDA: reflection_pad1d_backward_cuda
-- func: reflection_pad2d(Tensor self, int[4] padding, *, Tensor(a!) output) -> Tensor(a!)
+- func: reflection_pad2d(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
dispatch:
CPU: reflection_pad2d_out_cpu
CUDA: reflection_pad2d_cuda
- func: reflection_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
dispatch:
CPU: reflection_pad2d_backward_out_cpu
CPU: reflection_pad2d_backward_cpu
CUDA: reflection_pad2d_backward_cuda
-- func: replication_pad1d(Tensor self, int[2] padding, *, Tensor(a!) output) -> Tensor(a!)
+- func: replication_pad1d(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
dispatch:
CPU: replication_pad1d_out_cpu
CUDA: replication_pad1d_cuda
- func: replication_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
dispatch:
CPU: replication_pad1d_backward_out_cpu
CPU: replication_pad1d_backward_cpu
CUDA: replication_pad1d_backward_cuda
-- func: replication_pad2d(Tensor self, int[4] padding, *, Tensor(a!) output) -> Tensor(a!)
+- func: replication_pad2d(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
dispatch:
CPU: replication_pad2d_out_cpu
CUDA: replication_pad2d_cuda
- func: replication_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
dispatch:
CPU: replication_pad2d_backward_out_cpu
CPU: replication_pad2d_backward_cpu
CUDA: replication_pad2d_backward_cuda
-- func: replication_pad3d(Tensor self, int[6] padding, *, Tensor(a!) output) -> Tensor(a!)
+- func: replication_pad3d(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
dispatch:
CPU: replication_pad3d_out_cpu
CUDA: replication_pad3d_cuda
- func: replication_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
dispatch:
CPU: replication_pad3d_backward_out_cpu
CPU: replication_pad3d_backward_cpu
CUDA: replication_pad3d_backward_cuda
-- func: upsample_linear1d(Tensor self, int[1] output_size, bool align_corners, *, Tensor(a!) output) -> Tensor(a!)
+- func: upsample_linear1d(Tensor self, int[1] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: upsample_linear1d(Tensor self, int[1] output_size, bool align_corners) -> Tensor
python_module: nn
- func: upsample_linear1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: upsample_linear1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners, *, Tensor(a!) output) -> Tensor(a!)
+- func: upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners) -> Tensor
python_module: nn
- func: upsample_bilinear2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: upsample_bilinear2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: upsample_bicubic2d(Tensor self, int[2] output_size, bool align_corners, *, Tensor(a!) output) -> Tensor(a!)
+- func: upsample_bicubic2d(Tensor self, int[2] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: upsample_bicubic2d(Tensor self, int[2] output_size, bool align_corners) -> Tensor
python_module: nn
- func: upsample_bicubic2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: upsample_bicubic2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners, *, Tensor(a!) output) -> Tensor(a!)
+- func: upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners) -> Tensor
python_module: nn
- func: upsample_trilinear3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: upsample_trilinear3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: upsample_nearest1d(Tensor self, int[1] output_size, *, Tensor(a!) output) -> Tensor(a!)
+- func: upsample_nearest1d(Tensor self, int[1] output_size, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: upsample_nearest1d(Tensor self, int[1] output_size) -> Tensor
python_module: nn
- func: upsample_nearest1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: upsample_nearest1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: upsample_nearest2d(Tensor self, int[2] output_size, *, Tensor(a!) output) -> Tensor(a!)
+- func: upsample_nearest2d(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: upsample_nearest2d(Tensor self, int[2] output_size) -> Tensor
python_module: nn
- func: upsample_nearest2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: upsample_nearest2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: upsample_nearest3d(Tensor self, int[3] output_size, *, Tensor(a!) output) -> Tensor(a!)
+- func: upsample_nearest3d(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: upsample_nearest3d(Tensor self, int[3] output_size) -> Tensor
python_module: nn
- func: upsample_nearest3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: upsample_nearest3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size) -> Tensor
python_module: nn
- func: sigmoid_backward(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor
python_module: nn
- func: tanh_backward(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: tanh_backward(Tensor grad_output, Tensor output) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: thnn_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) output) -> Tensor(a!)
+- func: thnn_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: thnn_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: thnn_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) output) -> Tensor(a!)
+- func: thnn_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: thnn_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) output) -> Tensor(a!)
+- func: thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: thnn_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) output) -> Tensor(a!)
+- func: thnn_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: thnn_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: thnn_conv_depthwise2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) output) -> Tensor(a!)
+- func: thnn_conv_depthwise2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: thnn_conv_depthwise2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: thnn_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) output) -> Tensor(a!)
+- func: thnn_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: thnn_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: thnn_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) output) -> Tensor(a!)
+- func: thnn_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: thnn_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor
matches_jit_signature: True
python_module: nn
-- func: thnn_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, *, Tensor(a!) output) -> Tensor(a!)
+- func: thnn_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!)
+ matches_jit_signature: True
python_module: nn
- func: thnn_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1) -> Tensor