/* implicit */ TensorOptions(Args&&... args)
: TensorOptions(Device(std::forward<Args>(args)...)) {}
- /// Constructs a `TensorOptions` object from a backend, forwarded to the
- /// `Device` constructor.
- /* implicit */ TensorOptions(Backend backend)
- : TensorOptions(Device(backendToDeviceType(backend))) {}
-
/// Constructs a `TensorOptions` object with the given dtype.
/* implicit */ TensorOptions(caffe2::TypeMeta dtype) : TensorOptions() {
this->set_dtype(dtype);
return outputs;
}
-void testAllreduce(const std::string& path, const at::Backend b) {
+void testAllreduce(const std::string& path, const at::DeviceType b) {
const auto size = 4;
auto tests = CollectiveTest::initialize(path, size);
}
}
-void testBroadcast(const std::string& path, const at::Backend b) {
+void testBroadcast(const std::string& path, const at::DeviceType b) {
const auto size = 2;
const auto stride = 2;
auto tests = CollectiveTest::initialize(path, size);
// This won't work if we ever support sparse CUDA
at::OptionalDeviceGuard deviceGuard;
for (auto l = 0; l < stride; l++) {
- if (b == at::Backend::CUDA) {
+ if (b == at::DeviceType::CUDA) {
deviceGuard.reset_device(at::Device(at::kCUDA, l));
}
inputs[k][l] = at::ones({16, 16}, b) * (k * stride + l);
{
TemporaryFile file;
- testAllreduce(file.path, at::Backend::CPU);
+ testAllreduce(file.path, at::DeviceType::CPU);
}
#ifdef USE_CUDA
{
TemporaryFile file;
- testAllreduce(file.path, at::Backend::CUDA);
+ testAllreduce(file.path, at::DeviceType::CUDA);
}
#endif
{
TemporaryFile file;
- testBroadcast(file.path, at::Backend::CPU);
+ testBroadcast(file.path, at::DeviceType::CPU);
}
#ifdef USE_CUDA
{
TemporaryFile file;
- testBroadcast(file.path, at::Backend::CUDA);
+ testBroadcast(file.path, at::DeviceType::CUDA);
}
#endif