From f68faa35c05210203bc5e300153a56728484c8a6 Mon Sep 17 00:00:00 2001 From: Sebastian Messmer Date: Fri, 22 Mar 2019 14:05:50 -0700 Subject: [PATCH] Avoid refcount when looking up dispatch key Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/18294 Reviewed By: ezyang Differential Revision: D14512979 fbshipit-source-id: 45e548974f06184c375c2bb8339e3049a4ebd880 --- aten/src/ATen/core/dispatch/DispatchTable.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/aten/src/ATen/core/dispatch/DispatchTable.h b/aten/src/ATen/core/dispatch/DispatchTable.h index 08e43c4..78b048b 100644 --- a/aten/src/ATen/core/dispatch/DispatchTable.h +++ b/aten/src/ATen/core/dispatch/DispatchTable.h @@ -184,12 +184,13 @@ private: reverse_index_of_first_tensor_arg_ ); if (first_tensor_arg_is_tensor_list_) { - auto tensor_list = first_tensor_arg.toTensorList(); - if (tensor_list->elements().size() == 0) { + const auto& tensor_list = first_tensor_arg.toTensorListRef(); + if (tensor_list.size() == 0) { throw std::runtime_error("Tried to dispatch based on an empty tensor list. When the first tensor argument of an operator is a tensor list, then it must not be empty."); } - return tensor_list->elements()[0].type_id(); + return tensor_list[0].type_id(); } else { + // TODO Avoid bumping the refcounter return first_tensor_arg.toTensor().type_id(); } } -- 2.7.4