From c1b92f518d64c51eacd2e2ec11c198a3fdb107b8 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 11 Apr 2019 09:14:31 -0700 Subject: [PATCH] Remove ProcessGroup::getGroupRank (#19147) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/19147 After #14809 was merged there is no longer a need for getGroupRank. Every ProcessGroup object has its own rank and size fields which are accurate for the global group as well as subgroups. Strictly speaking removing a function in a minor version bump is a big no-no, but I highly doubt this was ever used outside of `torch.distributed` itself. This will result in a compile error for folks who have subclassed the ProcessGroup class though. If this is a concern we can delay merging until a later point in time, but eventually this will need to be cleaned up. Differential Revision: D14889736 fbshipit-source-id: 3846fe118b3265b50a10ab8b1c75425dad06932d --- torch/csrc/distributed/c10d/init.cpp | 5 ----- torch/lib/c10d/ProcessGroup.hpp | 2 -- torch/lib/c10d/ProcessGroupGloo.cpp | 4 ---- torch/lib/c10d/ProcessGroupGloo.hpp | 2 -- torch/lib/c10d/ProcessGroupMPI.cpp | 4 ---- torch/lib/c10d/ProcessGroupMPI.hpp | 2 -- torch/lib/c10d/ProcessGroupNCCL.cpp | 4 ---- torch/lib/c10d/ProcessGroupNCCL.hpp | 2 -- 8 files changed, 25 deletions(-) diff --git a/torch/csrc/distributed/c10d/init.cpp b/torch/csrc/distributed/c10d/init.cpp index a0c5ab2..a610fc7 100644 --- a/torch/csrc/distributed/c10d/init.cpp +++ b/torch/csrc/distributed/c10d/init.cpp @@ -341,11 +341,6 @@ They are used in specifying strategies for reduction collectives, e.g., "barrier", &::c10d::ProcessGroup::barrier, py::arg("opts") = ::c10d::BarrierOptions(), - py::call_guard()) - - .def( - "group_ranks", - &::c10d::ProcessGroup::getGroupRank, py::call_guard()); auto processGroupGloo = shared_ptr_class_<::c10d::ProcessGroupGloo>( diff --git a/torch/lib/c10d/ProcessGroup.hpp b/torch/lib/c10d/ProcessGroup.hpp index b05d8a0..f96dfdb 100644 --- a/torch/lib/c10d/ProcessGroup.hpp +++ b/torch/lib/c10d/ProcessGroup.hpp @@ -144,8 +144,6 @@ class ProcessGroup { virtual std::shared_ptr barrier( const BarrierOptions& opts = BarrierOptions()) = 0; - virtual std::unordered_map getGroupRank() = 0; - protected: const int rank_; const int size_; diff --git a/torch/lib/c10d/ProcessGroupGloo.cpp b/torch/lib/c10d/ProcessGroupGloo.cpp index e799c81..492d923 100644 --- a/torch/lib/c10d/ProcessGroupGloo.cpp +++ b/torch/lib/c10d/ProcessGroupGloo.cpp @@ -1525,8 +1525,4 @@ std::shared_ptr ProcessGroupGloo::barrier( return work; } -std::unordered_map ProcessGroupGloo::getGroupRank() { - throw std::runtime_error("ProcessGroupGloo does not support getGroupRank"); -} - } // namespace c10d diff --git a/torch/lib/c10d/ProcessGroupGloo.hpp b/torch/lib/c10d/ProcessGroupGloo.hpp index 1e8ea37..56bed13 100644 --- a/torch/lib/c10d/ProcessGroupGloo.hpp +++ b/torch/lib/c10d/ProcessGroupGloo.hpp @@ -179,8 +179,6 @@ class ProcessGroupGloo : public ProcessGroup { std::shared_ptr barrier( const BarrierOptions& opts = BarrierOptions()) override; - std::unordered_map getGroupRank() override; - protected: std::unique_ptr<::gloo::rendezvous::Store> store_; std::vector> contexts_; diff --git a/torch/lib/c10d/ProcessGroupMPI.cpp b/torch/lib/c10d/ProcessGroupMPI.cpp index 395bbb8..7bac679 100644 --- a/torch/lib/c10d/ProcessGroupMPI.cpp +++ b/torch/lib/c10d/ProcessGroupMPI.cpp @@ -635,8 +635,4 @@ std::shared_ptr ProcessGroupMPI::barrier( return enqueue(std::move(entry)); } -std::unordered_map ProcessGroupMPI::getGroupRank() { - throw std::runtime_error("ProcessGroupMPI does not support getGroupRank"); -} - } // namespace c10d diff --git a/torch/lib/c10d/ProcessGroupMPI.hpp b/torch/lib/c10d/ProcessGroupMPI.hpp index b3511dc..d482999 100644 --- a/torch/lib/c10d/ProcessGroupMPI.hpp +++ b/torch/lib/c10d/ProcessGroupMPI.hpp @@ -150,8 +150,6 @@ class ProcessGroupMPI : public ProcessGroup { std::shared_ptr barrier( const BarrierOptions& opts = BarrierOptions()) override; - std::unordered_map getGroupRank(); - // Creating a new ProcessGroupMPI, will initiialize MPI if not initialized static std::shared_ptr createProcessGroupMPI( std::vector ranks = {}); diff --git a/torch/lib/c10d/ProcessGroupNCCL.cpp b/torch/lib/c10d/ProcessGroupNCCL.cpp index 486d2ce..7e1ca62 100644 --- a/torch/lib/c10d/ProcessGroupNCCL.cpp +++ b/torch/lib/c10d/ProcessGroupNCCL.cpp @@ -689,8 +689,4 @@ std::shared_ptr ProcessGroupNCCL::recvAnysource( throw std::runtime_error("ProcessGroupNCCL does not support recv"); } -std::unordered_map ProcessGroupNCCL::getGroupRank() { - throw std::runtime_error("ProcessGroupNCCL doest not support getGroupRank"); -} - } // namespace c10d diff --git a/torch/lib/c10d/ProcessGroupNCCL.hpp b/torch/lib/c10d/ProcessGroupNCCL.hpp index bfccae71..9d9c8f5 100644 --- a/torch/lib/c10d/ProcessGroupNCCL.hpp +++ b/torch/lib/c10d/ProcessGroupNCCL.hpp @@ -164,8 +164,6 @@ class ProcessGroupNCCL : public ProcessGroup { std::shared_ptr barrier( const BarrierOptions& opts = BarrierOptions()) override; - std::unordered_map getGroupRank() override; - protected: // Helper that broadcasts nccl unique ID to all ranks through the store void broadcastUniqueNCCLID(ncclUniqueId* ncclID); -- 2.7.4