// to implement this without going through Derived Types (which are not part of core).
inline at::Tensor scalar_to_tensor(Scalar s) {
if (s.isFloatingPoint()) {
- return at::CPU(kDouble).scalarTensor(s);
+ return at::scalar_tensor(s, at::CPU(kDouble).options());
} else {
AT_ASSERT(s.isIntegral());
- return at::CPU(kLong).scalarTensor(s);
+ return at::scalar_tensor(s, at::CPU(kLong).options());
}
}
virtual Tensor tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function<void(void*)> & deleter=noop_deleter) const = 0;
virtual Tensor tensorWithAllocator(IntList sizes, Allocator* allocator) const = 0;
virtual Tensor tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const = 0;
- virtual Tensor scalarTensor(Scalar s) const = 0;
bool operator==(const Type& other) const {
return this == &other;
env, wrapped_tensor=wrapped_tensor, maybe_scalar=maybe_scalar))
# return the same underlying Tensor type for both real and accreal; this ensures
# e.g. x.sum(0) and x.sum() return the same type. We explicitly cast to the
- # ScalarType before constructing the scalarTensor to avoid overflow checking.
+ # ScalarType before constructing the scalar_tensor to avoid overflow checking.
elif ret['type'] == 'accreal' or ret['type'] == 'real':
- return_scalar = 'return scalarTensor(convert<${ScalarType}>(${call}));'
+ return_scalar = 'return at::scalar_tensor(convert<${ScalarType}>(${call}), options());'
body.append(CodeTemplate(return_scalar).substitute(env, call=call))
else:
# we using int64_t for long in the API, so correct it here...
// types (int, float, etc.) to Tensor (only to Scalar). They're not exposed
// to Python.
-static Tensor scalar_tensor(Scalar scalar) {
+static Tensor wrapped_scalar_tensor(Scalar scalar) {
auto tensor = scalar_to_tensor(scalar);
tensor.unsafeGetTensorImpl()->set_wrapped_number(true);
return tensor;
}
Tensor add(const Tensor& self, Scalar other, Scalar alpha) {
- return native::add(self, scalar_tensor(other), alpha);
+ return native::add(self, wrapped_scalar_tensor(other), alpha);
}
Tensor& add_(Tensor& self, Scalar other, Scalar alpha) {
- return native::add_(self, scalar_tensor(other), alpha);
+ return native::add_(self, wrapped_scalar_tensor(other), alpha);
}
Tensor div(const Tensor& self, Scalar other) {
- return native::div(self, scalar_tensor(other));
+ return native::div(self, wrapped_scalar_tensor(other));
}
Tensor& div_(Tensor& self, Scalar other) {
- return native::div_(self, scalar_tensor(other));
+ return native::div_(self, wrapped_scalar_tensor(other));
}
Tensor mul(const Tensor& self, Scalar other) {
- return native::mul(self, scalar_tensor(other));
+ return native::mul(self, wrapped_scalar_tensor(other));
}
Tensor& mul_(Tensor& self, Scalar other) {
- return native::mul_(self, scalar_tensor(other));
+ return native::mul_(self, wrapped_scalar_tensor(other));
}
Tensor sub(const Tensor& self, Scalar other, Scalar alpha) {
- return native::sub(self, scalar_tensor(other), alpha);
+ return native::sub(self, wrapped_scalar_tensor(other), alpha);
}
Tensor& sub_(Tensor& self, Scalar other, Scalar alpha) {
- return native::sub_(self, scalar_tensor(other), alpha);
+ return native::sub_(self, wrapped_scalar_tensor(other), alpha);
}
Tensor rsub(const Tensor& self, Scalar other, Scalar alpha) {
- return native::rsub(self, scalar_tensor(other), alpha);
+ return native::rsub(self, wrapped_scalar_tensor(other), alpha);
}
}
Tensor result = at::native::sum(self);
return result.div_(self.numel());
} else {
- return self.type().scalarTensor(std::numeric_limits<double>::quiet_NaN());
+ return at::scalar_tensor(std::numeric_limits<double>::quiet_NaN(), self.options());
}
}
return at::legacy::th::_th_norm(self, p);
} else {
if (self.is_contiguous()) {
- Tensor result = CPU(kFloat).scalarTensor(0).toType(self.type());
+ Tensor result = at::scalar_tensor(0, CPU(kFloat).options()).toType(self.type());
norm_kernel(kCPU, result, self, p, c10::nullopt);
return result;
} else {
Scalar ident) {
// Return identity
if (self.numel() == 0) {
- return self.type().scalarTensor(ident);
+ return at::scalar_tensor(ident, self.options());
}
return c10::nullopt;
}
return native::ones(self.sizes(), options);
}
+// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ scalar_tensor ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Tensor scalar_tensor(Scalar s, const TensorOptions& options) {
+ return at::empty({}, options).fill_(s);
+}
+
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ rand ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tensor rand(IntList size, const TensorOptions& options) {
Tensor& randperm_out_cuda(Tensor& result, int64_t n, Generator* generator) {
AT_CHECK(n >= 0, "n must be non-negative, got", n);
- AT_CHECK(result.type().scalarTensor(n).defined(),
+ AT_CHECK(at::scalar_tensor(n, result.options()).defined(),
"n is too large for result tensor type: '", result.type().toString(), "'");
result.resize_({n});
- func: pinverse(Tensor self, double rcond=1e-15) -> Tensor
variants: function, method
+- func: scalar_tensor(Scalar s, *, TensorOptions options={}) -> Tensor
+
- func: rand(IntList size, *, TensorOptions options={}) -> Tensor
- func: rand(IntList size, *, Generator* generator, TensorOptions options={}) -> Tensor
// mul(SparseTensor, Scalar)
// --------------------------------------------------------------------
-static Tensor scalar_tensor(Scalar s) {
+static Tensor wrapped_scalar_tensor(Scalar s) {
auto tensor = scalar_to_tensor(s);
tensor.unsafeGetTensorImpl()->set_wrapped_number(true);
return tensor;
}
SparseTensor& mul_out_sparse_scalar(SparseTensor& r, const SparseTensor& t, Scalar value) {
- return mul_out_sparse_zerodim(r, t, scalar_tensor(value));
+ return mul_out_sparse_zerodim(r, t, wrapped_scalar_tensor(value));
}
// --------------------------------------------------------------------
}
SparseTensor& div_out_sparse_scalar(SparseTensor& r, const SparseTensor& t, Scalar value) {
- return div_out_sparse_zerodim(r, t, scalar_tensor(value));
+ return div_out_sparse_zerodim(r, t, wrapped_scalar_tensor(value));
}
// --------------------------------------------------------------------
int64_t nnz = sparse._nnz();
if (nnz == 0) {
- at::mul_out(r, t, r.type().scalarTensor(beta));
+ at::mul_out(r, t, at::scalar_tensor(beta, r.options()));
return r;
}
virtual Tensor tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function<void(void*)> & deleter=noop_deleter) const = 0;
virtual Tensor tensorWithAllocator(IntList sizes, Allocator* allocator) const = 0;
virtual Tensor tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const = 0;
- virtual Tensor scalarTensor(Scalar s) const = 0;
bool operator==(const Type& other) const {
return this == &other;
return Storage(c10::intrusive_ptr<StorageImpl>::reclaim(static_cast<StorageImpl*>(th_pointer)));
}
-
-Tensor TypeDefault::scalarTensor(Scalar s) const {
- return at::empty({}, this->options()).fill_(s);
-}
-
${type_method_definitions}
}
Tensor tensorFromBlob(void * data, IntList sizes, IntList strides, const std::function<void(void*)> & deleter=noop_deleter) const override;
Tensor tensorWithAllocator(IntList sizes, Allocator* allocator) const override;
Tensor tensorWithAllocator(IntList sizes, IntList strides, Allocator* allocator) const override;
- Tensor scalarTensor(Scalar s) const override;
Storage storage(bool resizable = false) const override;
Storage storage(size_t size, bool resizable = false) const override;
}
}
void TestAbsValue(Type& type) {
- Tensor r = at::abs(type.scalarTensor(-3));
+ Tensor r = at::abs(at::scalar_tensor(-3, type.options()));
ASSERT_EQ_RESOLVED(r.item<int32_t>(), 3);
}
/*
}
void TestZeroDim(Type& type) {
- Tensor a = type.scalarTensor(4); // rand(type, {1});
+ Tensor a = at::scalar_tensor(4, type.options()); // rand(type, {1});
Tensor b = rand({3, 4}, type);
ASSERT_EQ_RESOLVED((a + a).dim(), 0);
return reinterpret_cast<THPVariable*>(value)->cdata;
}
if (THPUtils_checkLong(value)) {
- return type.scalarTensor(Scalar(THPUtils_unpackLong(value)));
+ return at::scalar_tensor(Scalar(THPUtils_unpackLong(value)), type.options());
}
if (PyFloat_Check(value)) {
- return type.scalarTensor(Scalar(THPUtils_unpackDouble(value)));
+ return at::scalar_tensor(Scalar(THPUtils_unpackDouble(value)), type.options());
}
throw TypeError("can't assign a %s to a %s", Py_TYPE(value)->tp_name, type.toString());
}