NO_ARGS, [skipIfNoLapack]),
('matrix_power', lambda: random_fullrank_matrix_distinct_singular_value(S, S), [-2], "n=-2",
NO_ARGS, [skipIfNoLapack]),
+ ('mvlgamma', torch.empty(S,).uniform_(0.5, 1), [1], "p=1"),
+ ('mvlgamma', torch.empty(S,).uniform_(1, 2), [2], "p=2"),
+ ('mvlgamma', torch.empty(S, S).uniform_(1.5, 3), [3], "p=3"),
+ ('mvlgamma', torch.empty(S, S).uniform_(2.5, 5), [5], "p=5"),
('addcmul', (S, S), ((S, S), (S, S))),
('addcmul', (S, S), ((S, 1), (1, S)), 'broadcast_rhs'),
('addcmul', (1,), ((S, S, 1), (1, S)), 'broadcast_all'),
args_variable, kwargs_variable = create_input(args, requires_grad=not is_inplace, call_kwargs=kwargs)
self_tensor = deepcopy(self_variable.data)
args_tensor = deepcopy(unpack_variables(args_variable))
- output_variable = getattr(self_variable, name)(*args_variable, **kwargs_variable)
def fn(*inputs, **kwargs):
output = getattr(inputs[0], name)(*inputs[1:], **kwargs)
}
Tensor mvlgamma_backward(Tensor grad, const Tensor & self, int64_t p) {
- Tensor args = at::arange(-p + 1, 1, -1, self.options()).div_(2.);
+ Tensor args = at::arange(-p / 2. + 0.5, 0.5, 0.5, self.options());
args = args.add(self.unsqueeze(-1));
- return grad * args.digamma_().sum(-1).add_(p * (p - 1) * std::log(M_PI) / 4.);
+ return grad * args.digamma_().sum(-1);
}
Tensor permute_backwards(const Tensor & grad, IntList fwd_dims) {