gfs2: Remove LM_FLAG_PRIORITY flag
authorAndreas Gruenbacher <agruenba@redhat.com>
Tue, 8 Aug 2023 18:27:46 +0000 (20:27 +0200)
committerAndreas Gruenbacher <agruenba@redhat.com>
Tue, 5 Sep 2023 13:58:16 +0000 (15:58 +0200)
The last user of this flag was removed in commit b77b4a4815a9 ("gfs2:
Rework freeze / thaw logic").

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Documentation/filesystems/gfs2-glocks.rst
fs/gfs2/glock.c
fs/gfs2/glock.h
fs/gfs2/lock_dlm.c

index d14f230..93a690b 100644 (file)
@@ -20,8 +20,7 @@ The gl_holders list contains all the queued lock requests (not
 just the holders) associated with the glock. If there are any
 held locks, then they will be contiguous entries at the head
 of the list. Locks are granted in strictly the order that they
-are queued, except for those marked LM_FLAG_PRIORITY which are
-used only during recovery, and even then only for journal locks.
+are queued.
 
 There are three lock states that users of the glock layer can request,
 namely shared (SH), deferred (DF) and exclusive (EX). Those translate
index d0c82b7..72346f3 100644 (file)
@@ -591,8 +591,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
                if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
                        /* move to back of queue and try next entry */
                        if (ret & LM_OUT_CANCELED) {
-                               if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
-                                       list_move_tail(&gh->gh_list, &gl->gl_holders);
+                               list_move_tail(&gh->gh_list, &gl->gl_holders);
                                gh = find_first_waiter(gl);
                                gl->gl_target = gh->gh_state;
                                goto retry;
@@ -679,8 +678,7 @@ __acquires(&gl->gl_lockref.lock)
            gh && !(gh->gh_flags & LM_FLAG_NOEXP))
                goto skip_inval;
 
-       lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
-                     LM_FLAG_PRIORITY);
+       lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP);
        GLOCK_BUG_ON(gl, gl->gl_state == target);
        GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
        if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
@@ -1515,27 +1513,20 @@ fail:
                }
                if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
                        continue;
-               if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
-                       insert_pt = &gh2->gh_list;
        }
        trace_gfs2_glock_queue(gh, 1);
        gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
        gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
        if (likely(insert_pt == NULL)) {
                list_add_tail(&gh->gh_list, &gl->gl_holders);
-               if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
-                       goto do_cancel;
                return;
        }
        list_add_tail(&gh->gh_list, insert_pt);
-do_cancel:
        gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
-       if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
-               spin_unlock(&gl->gl_lockref.lock);
-               if (sdp->sd_lockstruct.ls_ops->lm_cancel)
-                       sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
-               spin_lock(&gl->gl_lockref.lock);
-       }
+       spin_unlock(&gl->gl_lockref.lock);
+       if (sdp->sd_lockstruct.ls_ops->lm_cancel)
+               sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
+       spin_lock(&gl->gl_lockref.lock);
        return;
 
 trap_recursive:
@@ -2227,8 +2218,6 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
                *p++ = 'e';
        if (flags & LM_FLAG_ANY)
                *p++ = 'A';
-       if (flags & LM_FLAG_PRIORITY)
-               *p++ = 'p';
        if (flags & LM_FLAG_NODE_SCOPE)
                *p++ = 'n';
        if (flags & GL_ASYNC)
index 1f1ba92..c8685ca 100644 (file)
@@ -68,14 +68,6 @@ enum {
  * also be granted in SHARED.  The preferred state is whichever is compatible
  * with other granted locks, or the specified state if no other locks exist.
  *
- * LM_FLAG_PRIORITY
- * Override fairness considerations.  Suppose a lock is held in a shared state
- * and there is a pending request for the deferred state.  A shared lock
- * request with the priority flag would be allowed to bypass the deferred
- * request and directly join the other shared lock.  A shared lock request
- * without the priority flag might be forced to wait until the deferred
- * requested had acquired and released the lock.
- *
  * LM_FLAG_NODE_SCOPE
  * This holder agrees to share the lock within this node. In other words,
  * the glock is held in EX mode according to DLM, but local holders on the
@@ -86,7 +78,6 @@ enum {
 #define LM_FLAG_TRY_1CB                0x0002
 #define LM_FLAG_NOEXP          0x0004
 #define LM_FLAG_ANY            0x0008
-#define LM_FLAG_PRIORITY       0x0010
 #define LM_FLAG_NODE_SCOPE     0x0020
 #define GL_ASYNC               0x0040
 #define GL_EXACT               0x0080
index 5491129..59ab18c 100644 (file)
@@ -222,11 +222,6 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
                lkf |= DLM_LKF_NOQUEUEBAST;
        }
 
-       if (gfs_flags & LM_FLAG_PRIORITY) {
-               lkf |= DLM_LKF_NOORDER;
-               lkf |= DLM_LKF_HEADQUE;
-       }
-
        if (gfs_flags & LM_FLAG_ANY) {
                if (req == DLM_LOCK_PR)
                        lkf |= DLM_LKF_ALTCW;