From 228804a35caa7edae4a81049281e7f106dea1ad1 Mon Sep 17 00:00:00 2001 From: Andreas Gruenbacher Date: Sat, 10 Dec 2022 02:21:32 +0100 Subject: [PATCH] gfs2: Make glock lru list scanning safer In __gfs2_glock_put(), remove the glock from the lru list *after* dropping the glock lock. This prevents deadlocks against gfs2_scan_glock_lru(). In gfs2_scan_glock_lru(), make sure that the glock's reference count is zero before moving the glock to the dispose list. This skips glocks that are marked dead as well as glocks that are still in use. Additionally, switch to spin_trylock() as we already do in gfs2_dispose_glock_lru(); this alone would also be enough to prevent deadlocks against __gfs2_glock_put(). Signed-off-by: Andreas Gruenbacher --- fs/gfs2/glock.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 05ef0ff..fbfbf7a 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -274,9 +274,8 @@ static void __gfs2_glock_put(struct gfs2_glock *gl) struct address_space *mapping = gfs2_glock2aspace(gl); lockref_mark_dead(&gl->gl_lockref); - - gfs2_glock_remove_from_lru(gl); spin_unlock(&gl->gl_lockref.lock); + gfs2_glock_remove_from_lru(gl); GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); if (mapping) { truncate_inode_pages_final(mapping); @@ -1995,9 +1994,14 @@ static long gfs2_scan_glock_lru(int nr) break; /* Test for being demotable */ if (!test_bit(GLF_LOCK, &gl->gl_flags)) { - list_move(&gl->gl_lru, &dispose); - atomic_dec(&lru_count); - freed++; + if (!spin_trylock(&gl->gl_lockref.lock)) + continue; + if (!gl->gl_lockref.count) { + list_move(&gl->gl_lru, &dispose); + atomic_dec(&lru_count); + freed++; + } + spin_unlock(&gl->gl_lockref.lock); } } if (!list_empty(&dispose)) -- 2.7.4