From 260841614639a906a12462723cf38ee3604f69ba Mon Sep 17 00:00:00 2001 From: Yongjoo Ahn Date: Thu, 7 Sep 2023 17:58:40 +0900 Subject: [PATCH] [pytorch] Fix load model with use_gpu option - In the recent pytorch, `model->to (device)` does not work properly. - Use `torch::jit::load (model, torch::Device)` instead. Signed-off-by: Yongjoo Ahn --- ext/nnstreamer/tensor_filter/tensor_filter_pytorch.cc | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_pytorch.cc b/ext/nnstreamer/tensor_filter/tensor_filter_pytorch.cc index 5a21876..b6362a9 100644 --- a/ext/nnstreamer/tensor_filter/tensor_filter_pytorch.cc +++ b/ext/nnstreamer/tensor_filter/tensor_filter_pytorch.cc @@ -202,7 +202,10 @@ TorchCore::loadModel () #endif try { - model = std::make_shared (torch::jit::load (model_path)); + torch::Device device + = use_gpu ? torch::Device (torch::kCUDA) : torch::Device (torch::kCPU); + model = std::make_shared ( + torch::jit::load (model_path, device)); } catch (const std::invalid_argument &ia) { ml_loge ("Invalid argument while loading the model: %s", ia.what ()); return -1; @@ -219,10 +222,6 @@ TorchCore::loadModel () return -1; } - if (use_gpu) { - model->to (at::kCUDA); - } - /** set the model to evaluation mode */ model->eval (); -- 2.7.4