From 8d742c4a8b2b8d1f59954be8adcb78017f93926f Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 30 Nov 2023 12:56:06 +0100 Subject: [PATCH] afs: fix the usage of read_seqbegin_or_lock() in afs_lookup_volume_rcu() [ Upstream commit 4121b4337146b64560d1e46ebec77196d9287802 ] David Howells says: (2) afs_lookup_volume_rcu(). There can be a lot of volumes known by a system. A thousand would require a 10-step walk and this is drivable by remote operation, so I think this should probably take a lock on the second pass too. Make the "seq" counter odd on the 2nd pass, otherwise read_seqbegin_or_lock() never takes the lock. Signed-off-by: Oleg Nesterov Signed-off-by: David Howells cc: Marc Dionne cc: linux-afs@lists.infradead.org Link: https://lore.kernel.org/r/20231130115606.GA21571@redhat.com/ Signed-off-by: Sasha Levin --- fs/afs/callback.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/afs/callback.c b/fs/afs/callback.c index a484fa6..90f9b2a 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c @@ -110,13 +110,14 @@ static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell, { struct afs_volume *volume = NULL; struct rb_node *p; - int seq = 0; + int seq = 1; do { /* Unfortunately, rbtree walking doesn't give reliable results * under just the RCU read lock, so we have to check for * changes. */ + seq++; /* 2 on the 1st/lockless path, otherwise odd */ read_seqbegin_or_lock(&cell->volume_lock, &seq); p = rcu_dereference_raw(cell->volumes.rb_node); -- 2.7.4