- real weight
]]
[[
- name: _th_logspace
- cname: logspace
- types:
- - Float
- - Double
- backends:
- - CPU
- - CUDA
- variants:
- - function
- return: argument 0
- arguments:
- - arg: THTensor* result
- output: True
- - real start
- - real end
- - long steps
-]]
-[[
name: _th_histc
cname: histc
types:
return result;
}
+Tensor& logspace_cpu_out(Tensor& result, Scalar start, Scalar end, int64_t steps) {
+ AT_CHECK(steps >= 0, "number of steps must be non-negative");
+
+ if (result.numel() != steps) {
+ result.resize_({steps});
+ }
+ Tensor r = result.is_contiguous() ? result : result.contiguous();
+
+ if (steps == 0) {
+ // skip
+ } else if (steps == 1) {
+ r.fill_(std::pow(10.0, start.to<double>()));
+ } else {
+ AT_DISPATCH_FLOATING_TYPES(r.type(), "logspace", [&]() {
+ scalar_t base10 = 10;
+ scalar_t scalar_start = start.to<scalar_t>();
+ scalar_t scalar_end = end.to<scalar_t>();
+ scalar_t *data_ptr = r.data<scalar_t>();
+ scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
+ at::parallel_for(0, steps, internal::GRAIN_SIZE, [&](int64_t p_begin, int64_t p_end) {
+ scalar_t is = static_cast<scalar_t>(p_begin);
+ for (int64_t i = p_begin; i < p_end; ++i, ++is) {
+ data_ptr[i]= std::pow(base10, scalar_start + step*is);
+ }
+ });
+ });
+ }
+
+ if (!result.is_contiguous()) {
+ result.copy_(r);
+ }
+ return result;
+}
+
}} // namespace at::native
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ logspace ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Tensor logspace(Scalar start, Scalar end, const TensorOptions& options) {
- return native::logspace(start, end, /*steps=*/100, options);
-}
-
Tensor logspace(
Scalar start,
Scalar end,
int64_t steps,
const TensorOptions& options) {
- // Note [Native bindings for legacy TH factory functions]
- return getFactoryType(options)._th_logspace(start, end, steps);
-}
-
-Tensor& logspace_out(Tensor& result, Scalar start, Scalar end) {
- return native::logspace_out(result, start, end, /*steps=*/100);
-}
-
-Tensor& logspace_out(Tensor& result, Scalar start, Scalar end, int64_t steps) {
- return at::legacy::th::_th_logspace_out(result, start, end, steps);
+ Tensor result = at::empty({steps}, options);
+ return at::logspace_out(result, start, end, steps);
}
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ones ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
const accT start_, step_;
};
+template<typename T, typename accT = T>
+struct LogspaceOp {
+ __host__ __device__ LogspaceOp(accT start, accT step):
+ start_(start), step_(step) { }
+ __device__ __forceinline__ T operator()(ptrdiff_t index) {
+ accT increment = step_ * static_cast<accT>(index);
+ accT base10 = 10;
+ accT value = std::pow(base10, start_ + increment);
+ return static_cast<T>(value);
+ }
+
+ const accT start_, step_;
+};
+
Tensor& linspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps) {
AT_CHECK(steps >= 0, "number of steps must be non-negative");
return result;
}
+Tensor& logspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps) {
+ AT_CHECK(steps >= 0, "number of steps must be non-negative");
+
+ if (result.numel() != steps) {
+ result.resize_({steps});
+ }
+ Tensor r = result.is_contiguous() ? result : result.contiguous();
+
+ if (steps == 0) {
+ // skip
+ } else if (steps == 1) {
+ r.fill_(std::pow(10.0, start.to<double>()));
+ } else {
+ AT_DISPATCH_FLOATING_TYPES(r.type(), "logspace", [&]() {
+ scalar_t scalar_start = start.to<scalar_t>();
+ scalar_t scalar_end = end.to<scalar_t>();
+ scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
+ LogspaceOp<scalar_t> logspace_method(scalar_start, step);
+ thrust::device_ptr<scalar_t> data_(r.data<scalar_t>());
+ thrust::tabulate(data_, data_ + steps, logspace_method);
+ });
+ }
+
+ if (!result.is_contiguous()) {
+ result.copy_(r);
+ }
+ AT_CUDA_CHECK(cudaGetLastError());
+ return result;
+}
+
}} // namespace at::native
- func: logdet(Tensor self) -> Tensor
variants: function, method
-- func: logspace(Scalar start, Scalar end, TensorOptions options={}) -> Tensor
+- func: logspace(Scalar start, Scalar end, int64_t steps=100, TensorOptions options={}) -> Tensor
-- func: logspace(Scalar start, Scalar end, int64_t steps, TensorOptions options={}) -> Tensor
-
-- func: logspace_out(Tensor result, Scalar start, Scalar end) -> Tensor
-
-- func: logspace_out(Tensor result, Scalar start, Scalar end, int64_t steps) -> Tensor
+- func: logspace_out(Tensor result, Scalar start, Scalar end, int64_t steps=100) -> Tensor
+ dispatch:
+ CPU: logspace_cpu_out
+ CUDA: logspace_cuda_out
# FIXME: These could be combined as optional<ScalarType> but for https://github.com/pytorch/pytorch/issues/6593.
- func: log_softmax(Tensor self, int64_t dim, ScalarType dtype) -> Tensor
TH_API accreal THTensor_(stdall)(THTensor *self, int biased);
TH_API accreal THTensor_(normall)(THTensor *t, scalar_t value);
-TH_API void THTensor_(logspace)(THTensor *r_, scalar_t a, scalar_t b, int64_t n);
-
TH_API void THTensor_(dirichlet_grad)(THTensor *self, THTensor *x, THTensor *alpha, THTensor *total);
#endif
return sqrt(THTensor_(varall)(tensor, biased));
}
-void THTensor_(logspace)(THTensor *r_, scalar_t a, scalar_t b, int64_t n)
-{
- scalar_t i = 0;
-
- THArgCheck((n >= 0), 3, "number of points must be non-negative");
-
- if (THTensor_(nElement)(r_) != n) {
- THTensor_(resize1d)(r_, n);
- }
-
- if (n == 0) {
- } else if (n == 1) {
- THTensor_(set1d)(r_, 0, TH_MATH_NAME(pow)(10.0, a));
- } else {
- TH_TENSOR_APPLY(scalar_t, r_,
- *r__data = TH_MATH_NAME(pow)(10.0, a + i*(b-a)/((scalar_t)(n-1)));
- i++;
- );
- }
-}
-
void THTensor_(histc)(THTensor *hist, THTensor *tensor, int64_t nbins, scalar_t minvalue, scalar_t maxvalue)
{
scalar_t minval;
const accT start_, step_;
};
-template<typename T, typename accT = T>
-struct LogspaceOp {
- __host__ __device__ LogspaceOp(accT start, accT step):
- start_(start), step_(step) { }
- __device__ __forceinline__ T operator()(ptrdiff_t index) {
- accT increment = THCNumerics<accT>::mul(step_, ScalarConvert<ptrdiff_t,accT>::to(index));
- accT value = THCNumerics<accT>::exp10(THCNumerics<accT>::add(start_, increment));
- return ScalarConvert<accT,T>::to(value);
- }
-
- const accT start_, step_;
-};
-
-
#include <THC/generic/THCTensorMath.cu>
#include <THC/THCGenerateAllTypes.h>
return trace;
}
-#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
-
-void THCTensor_(logspace)(THCState *state, THCTensor *r_, scalar_t a, scalar_t b, int64_t n) {
- THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
- THArgCheck((n >= 0), 3, "number of points must be non-negative");
- if (THCTensor_(nElement)(state, r_) != n) THCTensor_(resize1d)(state, r_, n);
- if (n == 0) {
- // skip
- } else if (n == 1) THCTensor_(fill)(state, r_, THCNumerics<scalar_t>::exp10(a));
- else {
- THCTensor *r = THCTensor_(isContiguous)(state, r_)
- ? r_
- : THCTensor_(newContiguous)(state, r_);
- scalar_t step = THCNumerics<scalar_t>::div(THCNumerics<scalar_t>::sub(b, a),
- ScalarConvert<int64_t,scalar_t>::to(n - 1));
- LogspaceOp<scalar_t> logspace_method(a, step);
- thrust::device_ptr<scalar_t> data_(THCTensor_(data)(state, r));
- thrust::tabulate(data_, data_ + n, logspace_method);
- if (!THCTensor_(isContiguous)(state, r_)) {
- THCTensor_(freeCopyTo)(state, r, r_);
- }
- }
- THCudaCheck(cudaGetLastError());
-}
-
-#endif
-
void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, r_));
THArgCheck(step > 0 || step < 0, 3, "step must be nonzero");
THC_API accreal THCTensor_(trace)(THCState *state, THCTensor *self);
-#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
-
-THC_API void THCTensor_(logspace)(THCState *state, THCTensor *r_, scalar_t a, scalar_t b, int64_t n);
-
-#endif
-
THC_API void THCTensor_(range)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step);
THC_API void THCTensor_(arange)(THCState *state, THCTensor *r_, accreal xmin, accreal xmax, accreal step);