auto algorithm = nnp_convolution_algorithm_auto;
// All Tensors must be float Tensors
- if (input.dispatch_type().ID() != at::TypeID::CPUFloat ||
- weight.dispatch_type().ID() != at::TypeID::CPUFloat ||
- output.dispatch_type().ID() != at::TypeID::CPUFloat ||
- (bias.defined() && bias.dispatch_type().ID() != at::TypeID::CPUFloat)) {
+ if (input.device().type() != kCPU || input.scalar_type() != kFloat ||
+ weight.device().type() != kCPU || weight.scalar_type() != kFloat ||
+ output.device().type() != kCPU || output.scalar_type() != kFloat ||
+ (bias.defined() && (bias.device().type() != kCPU || bias.scalar_type() != kFloat))) {
throw std::runtime_error(
"Mismatched Tensor types in NNPack convolutionOutput");
}
namespace torch { namespace nn {
-inline bool check_type(PyObject* obj, at::TypeID typeID) {
+inline bool check_type(PyObject* obj, at::TensorTypeId id, at::ScalarType dtype) {
if (THPVariable_Check(obj)) {
auto& tensor = ((THPVariable*)obj)->cdata;
- return at::globalContext().getNonVariableType(tensor.type().backend(), tensor.scalar_type()).ID() == typeID;
+ return tensor.type_id() == id && tensor.dtype() == dtype;
}
return false;
}
}
static inline bool THNN_FloatTensor_Check(PyObject* obj) {
- return torch::nn::check_type(obj, at::TypeID::CPUFloat);
+ return torch::nn::check_type(obj, at::CPUTensorId(), at::kFloat);
}
static inline bool THNN_DoubleTensor_Check(PyObject* obj) {
- return torch::nn::check_type(obj, at::TypeID::CPUDouble);
+ return torch::nn::check_type(obj, at::CPUTensorId(), at::kDouble);
}
static inline bool THNN_LongTensor_Check(PyObject* obj) {
- return torch::nn::check_type(obj, at::TypeID::CPULong);
+ return torch::nn::check_type(obj, at::CPUTensorId(), at::kLong);
}
static inline bool THNN_IntTensor_Check(PyObject* obj) {
- return torch::nn::check_type(obj, at::TypeID::CPUInt);
+ return torch::nn::check_type(obj, at::CPUTensorId(), at::kInt);
}
static inline THFloatTensor* THNN_FloatTensor_Unpack(PyObject* obj) {
#ifdef USE_CUDA
static inline bool THNN_CudaHalfTensor_Check(PyObject* obj) {
- return torch::nn::check_type(obj, at::TypeID::CUDAHalf);
+ return torch::nn::check_type(obj, at::CUDATensorId(), at::kHalf);
}
static inline bool THNN_CudaFloatTensor_Check(PyObject* obj) {
- return torch::nn::check_type(obj, at::TypeID::CUDAFloat);
+ return torch::nn::check_type(obj, at::CUDATensorId(), at::kFloat);
}
static inline bool THNN_CudaDoubleTensor_Check(PyObject* obj) {
- return torch::nn::check_type(obj, at::TypeID::CUDADouble);
+ return torch::nn::check_type(obj, at::CUDATensorId(), at::kDouble);
}
static inline bool THNN_CudaLongTensor_Check(PyObject* obj) {
- return torch::nn::check_type(obj, at::TypeID::CUDALong);
+ return torch::nn::check_type(obj, at::CUDATensorId(), at::kLong);
}
static inline THCudaHalfTensor* THNN_CudaHalfTensor_Unpack(PyObject* obj) {
std::vector<TensorGroup> results;
// an overapproximation, but at least we won't have to copy stuff around
results.reserve(tensors.size());
- std::map<TypeID, TensorGroup> groups;
+ std::unordered_map<DeprecatedTypeProperties*, TensorGroup> groups;
size_t cur_group_size = 0;
for (const auto & tensor : tensors) {
tensor_size = tensor.numel() * tensor.element_size();
}
- auto& type_group = groups[tensor.dispatch_type().ID()];
+ auto& type_group = groups[&tensor.type()];
type_group.tensors.push_back(tensor);
if (fine_grained) {