TH_API void THTensor_(cmaxValue)(THTensor *r, THTensor *t, scalar_t value);
TH_API void THTensor_(cminValue)(THTensor *r, THTensor *t, scalar_t value);
-TH_API void THTensor_(zerosLike)(THTensor *r_, THTensor *input);
-TH_API void THTensor_(onesLike)(THTensor *r_, THTensor *input);
TH_API void THTensor_(diag)(THTensor *r_, THTensor *t, int k);
-TH_API void THTensor_(eye)(THTensor *r_, int64_t n, int64_t m);
-TH_API void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, int64_t n);
TH_API void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder);
TH_API void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int64_t k, int dim, int dir, int sorted);
TH_API void THTensor_(trunc)(THTensor *r_, THTensor *t);
TH_API void THTensor_(frac)(THTensor *r_, THTensor *t);
-TH_API void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension, int keepdim);
TH_API void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim);
TH_API void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim);
TH_API void THTensor_(norm)(THTensor *r_, THTensor *t, scalar_t value, int dimension, int keepdim);
*r_data = *t_data > value ? value : *t_data;); // this order propagates NaN
}
-void THTensor_(zerosLike)(THTensor *r_, THTensor *input)
-{
- THTensor_(resizeAs)(r_, input);
- THTensor_(zero)(r_);
-}
-
-void THTensor_(onesLike)(THTensor *r_, THTensor *input)
-{
- THTensor_(resizeAs)(r_, input);
- THTensor_(fill)(r_, 1);
-}
-
void THTensor_(diag)(THTensor *r_, THTensor *t, int k)
{
THArgCheck(THTensor_(nDimensionLegacyNoScalars)(t) == 1 || THTensor_(nDimensionLegacyNoScalars)(t) == 2, 1, "matrix or a vector expected");
}
}
-void THTensor_(eye)(THTensor *r_, int64_t n, int64_t m)
-{
- scalar_t *r__data;
- int64_t i, sz;
-
- THArgCheck(n > 0, 1, "invalid argument");
-
- if(m <= 0)
- m = n;
-
- THTensor_(resize2d)(r_, n, m);
- THTensor_(zero)(r_);
-
- i = 0;
- r__data = r_->data<scalar_t>();
- sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1));
- for(i = 0; i < sz; i++)
- r__data[i*(r_->stride(0)+r_->stride(1))] = 1;
-}
-
-void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, int64_t n)
-{
- std::lock_guard<std::mutex> lock(_generator->mutex);
- scalar_t *r__data;
- int64_t r__stride_0;
- int64_t i;
-
- THArgCheck(n > 0, 1, "must be strictly positive");
-
- THTensor_(resize1d)(r_, n);
- r__data = r_->data<scalar_t>();
- r__stride_0 = THTensor_(stride)(r_,0);
-
- for(i = 0; i < n; i++)
- r__data[i*r__stride_0] = (scalar_t)(i);
-
- for(i = 0; i < n-1; i++)
- {
- int64_t z = THRandom_random(_generator) % (n-i);
- scalar_t sav = r__data[i*r__stride_0];
- r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0];
- r__data[(z+i)*r__stride_0] = sav;
- }
-}
/* I cut and pasted (slightly adapted) the quicksort code from
Sedgewick's 1978 "Implementing Quicksort Programs" article
}
}
-void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension, int keepdim)
-{
- THArgCheck(dimension >= 0 && dimension < THTensor_(nDimensionLegacyAll)(t), 2, "invalid dimension %d",
- dimension);
-
- THTensor_(sum)(r_, t, dimension, keepdim);
- THTensor_(div)(r_, r_, THTensor_sizeLegacyNoScalars(t, dimension));
-}
-
void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim)
{
THArgCheck(dimension >= 0 && dimension < THTensor_(nDimensionLegacyAll)(t), 3, "invalid dimension %d",
THC_API void THCTensor_(fill)(THCState *state, THCTensor *self, scalar_t value);
THC_API void THCTensor_(zero)(THCState *state, THCTensor *self);
-THC_API void THCTensor_(zerosLike)(THCState *state, THCTensor *r_, THCTensor* input);
-THC_API void THCTensor_(onesLike)(THCState *state, THCTensor *r_, THCTensor* input);
THC_API ptrdiff_t THCTensor_(numel)(THCState *state, THCTensor *t);
THC_API void THCTensor_(cat)(THCState *state, THCTensor *result, THCTensor *ta, THCTensor *tb, int dimension);
THC_API void THCTensor_(catArray)(THCState *state, THCTensor *result, THCTensor **inputs, int numInputs, int dimension);
THC_API void THCTensor_(triu)(THCState *state, THCTensor *self, THCTensor *src, int64_t k);
THC_API void THCTensor_(diag)(THCState *state, THCTensor *self, THCTensor *src, int64_t k);
-THC_API void THCTensor_(eye)(THCState *state, THCTensor *self, int64_t n, int64_t k);
THC_API accreal THCTensor_(trace)(THCState *state, THCTensor *self);
#define THC_GENERIC_FILE "THC/generic/THCTensorMathReduce.cu"
#else
-void THCTensor_(sum)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
- THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
- if (!THC_reduceDim<scalar_t>(state, self, src,
- thrust::identity<accreal>{},
- ReduceAdd<accreal>{},
- thrust::identity<accreal>{},
- scalar_cast<accreal>(0),
- dimension,
- keepdim)) {
- THArgCheck(false, 2, CUTORCH_DIM_WARNING);
- }
-
- THCudaCheck(cudaGetLastError());
-}
-
void THCTensor_(prod)(THCState* state, THCTensor *self, THCTensor *src, int dimension, int keepdim) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
if (!THC_reduceDim<scalar_t>(state, self, src,
THCudaCheck(cudaGetLastError());
}
-void THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim)
-{
- THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src));
- const accreal size = scalar_cast<accreal>(THCTensor_(size)(state, src, dim));
- if (!THC_reduceDim<scalar_t>(state, self, src,
- thrust::identity<accreal>{},
- ReduceAdd<accreal>{},
- ReduceDivide<accreal>{size},
- scalar_cast<accreal>(0),
- dim,
- keepdim)) {
- THArgCheck(false, 2, CUTORCH_DIM_WARNING);
- }
-
- THCudaCheck(cudaGetLastError());
-}
-
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF)
void THCTensor_(renorm)(THCState *state, THCTensor* self, THCTensor* src, scalar_t value, int dimension, scalar_t maxnorm)
return val;
}
-accreal THCTensor_(prodall)(THCState *state, THCTensor *self) {
- THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
- accreal val;
- if (!THC_reduceAll<scalar_t>(state, self,
- thrust::identity<accreal>{},
- ReduceMultiply<accreal>{},
- scalar_cast<accreal>(1),
- &val, 0)) {
- THArgCheck(false, 1, CUTORCH_DIM_WARNING);
- }
-
- THCudaCheck(cudaGetLastError());
- return val;
-}
-
accreal THCTensor_(meanall)(THCState *state, THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self));
#endif
-THC_API void THCTensor_(sum)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim);
THC_API void THCTensor_(prod)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim);
-THC_API void THCTensor_(mean)(THCState *state, THCTensor *self, THCTensor *src, int dim, int keepdim);
THC_API accreal THCTensor_(sumall)(THCState *state, THCTensor *self);
-THC_API accreal THCTensor_(prodall)(THCState *state, THCTensor *self);
THC_API accreal THCTensor_(meanall)(THCState *state, THCTensor *self);
THC_API void THCTensor_(min)(THCState *state,