void host_softmax(Tensor output, const Tensor& input, const int64_t dim) {
int64_t outer_size = 1;
int64_t dim_size = input.size(dim);
- if (input.numel() == 0) {
- return;
- }
int64_t inner_size = 1;
for (int64_t i = 0; i < dim; ++i)
outer_size *= input.size(i);
auto input = input_.contiguous();
Tensor output = at::native::empty_like(input);
int64_t dim = maybe_wrap_dim(dim_, input.dim());
- if (input.dim() == 0)
+
+ if (input.numel() == 0) {
+ return output;
+ }
+ if (input.dim() == 0)
input = input.view(1);
AT_CHECK(
dim >= 0 && dim < input.dim(),
auto input = input_.contiguous();
Tensor output = at::native::empty_like(input);
int64_t dim = maybe_wrap_dim(dim_, input.dim());
+
+ if (input.numel() == 0) {
+ return output;
+ }
if (input.dim() == 0)
input = input.view(1);
AT_CHECK(
auto output = output_.contiguous();
Tensor grad_input = at::native::empty_like(grad);
+ if (output.numel() == 0) {
+ return grad_input;
+ }
if (grad.dim() == 0)
grad = grad.view(1);
if (output.dim() == 0)
auto output = output_.contiguous();
Tensor grad_input = at::native::empty_like(grad);
+ if (output.numel() == 0) {
+ return grad_input;
+ }
if (grad.dim() == 0)
grad = grad.view(1);
if (output.dim() == 0)
# softmax, logsoftmax
self.assertEqual(x, torch.nn.functional.softmax(x, 0))
self.assertEqual(x, torch.nn.functional.softmax(x, 2))
+ self.assertEqual(x, torch.nn.functional.softmax(x, 3))
self.assertEqual(x, torch.nn.functional.log_softmax(x, 0))
self.assertEqual(x, torch.nn.functional.log_softmax(x, 2))
+ self.assertEqual(x, torch.nn.functional.log_softmax(x, 3))
# cumsum, cumprod
self.assertEqual(shape, torch.cumsum(x, 0).shape)