1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
9 *******************************************************************************
10 ******************************************************************************/
12 #include "dlm_internal.h"
13 #include "lockspace.h"
26 * Recovery waiting routines: these functions wait for a particular reply from
27 * a remote node, or for the remote node to report a certain status. They need
28 * to abort if the lockspace is stopped indicating a node has failed (perhaps
29 * the one being waited for).
33 * Wait until given function returns non-zero or lockspace is stopped
34 * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another
35 * function thinks it could have completed the waited-on task, they should wake
36 * up ls_wait_general to get an immediate response rather than waiting for the
37 * timeout. This uses a timeout so it can check periodically if the wait
38 * should abort due to node failure (which doesn't cause a wake_up).
39 * This should only be called by the dlm_recoverd thread.
42 int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
48 rv = wait_event_timeout(ls->ls_wait_general,
49 testfn(ls) || dlm_recovery_stopped(ls),
50 dlm_config.ci_recover_timer * HZ);
53 if (test_bit(LSFL_RCOM_WAIT, &ls->ls_flags)) {
54 log_debug(ls, "dlm_wait_function timed out");
59 if (dlm_recovery_stopped(ls)) {
60 log_debug(ls, "dlm_wait_function aborted");
67 * An efficient way for all nodes to wait for all others to have a certain
68 * status. The node with the lowest nodeid polls all the others for their
69 * status (wait_status_all) and all the others poll the node with the low id
70 * for its accumulated result (wait_status_low). When all nodes have set
71 * status flag X, then status flag X_ALL will be set on the low nodeid.
74 uint32_t dlm_recover_status(struct dlm_ls *ls)
77 spin_lock(&ls->ls_recover_lock);
78 status = ls->ls_recover_status;
79 spin_unlock(&ls->ls_recover_lock);
83 static void _set_recover_status(struct dlm_ls *ls, uint32_t status)
85 ls->ls_recover_status |= status;
88 void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
90 spin_lock(&ls->ls_recover_lock);
91 _set_recover_status(ls, status);
92 spin_unlock(&ls->ls_recover_lock);
95 static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
96 int save_slots, uint64_t seq)
98 struct dlm_rcom *rc = ls->ls_recover_buf;
99 struct dlm_member *memb;
100 int error = 0, delay;
102 list_for_each_entry(memb, &ls->ls_nodes, list) {
105 if (dlm_recovery_stopped(ls)) {
110 error = dlm_rcom_status(ls, memb->nodeid, 0, seq);
115 dlm_slot_save(ls, rc, memb);
117 if (le32_to_cpu(rc->rc_result) & wait_status)
128 static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
129 uint32_t status_flags, uint64_t seq)
131 struct dlm_rcom *rc = ls->ls_recover_buf;
132 int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
135 if (dlm_recovery_stopped(ls)) {
140 error = dlm_rcom_status(ls, nodeid, status_flags, seq);
144 if (le32_to_cpu(rc->rc_result) & wait_status)
154 static int wait_status(struct dlm_ls *ls, uint32_t status, uint64_t seq)
156 uint32_t status_all = status << 1;
159 if (ls->ls_low_nodeid == dlm_our_nodeid()) {
160 error = wait_status_all(ls, status, 0, seq);
162 dlm_set_recover_status(ls, status_all);
164 error = wait_status_low(ls, status_all, 0, seq);
169 int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq)
171 struct dlm_member *memb;
172 struct dlm_slot *slots;
173 int num_slots, slots_size;
177 list_for_each_entry(memb, &ls->ls_nodes, list) {
179 memb->generation = 0;
182 if (ls->ls_low_nodeid == dlm_our_nodeid()) {
183 error = wait_status_all(ls, DLM_RS_NODES, 1, seq);
187 /* slots array is sparse, slots_size may be > num_slots */
189 rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen);
191 spin_lock(&ls->ls_recover_lock);
192 _set_recover_status(ls, DLM_RS_NODES_ALL);
193 ls->ls_num_slots = num_slots;
194 ls->ls_slots_size = slots_size;
195 ls->ls_slots = slots;
196 ls->ls_generation = gen;
197 spin_unlock(&ls->ls_recover_lock);
199 dlm_set_recover_status(ls, DLM_RS_NODES_ALL);
202 error = wait_status_low(ls, DLM_RS_NODES_ALL,
203 DLM_RSF_NEED_SLOTS, seq);
207 dlm_slots_copy_in(ls);
213 int dlm_recover_directory_wait(struct dlm_ls *ls, uint64_t seq)
215 return wait_status(ls, DLM_RS_DIR, seq);
218 int dlm_recover_locks_wait(struct dlm_ls *ls, uint64_t seq)
220 return wait_status(ls, DLM_RS_LOCKS, seq);
223 int dlm_recover_done_wait(struct dlm_ls *ls, uint64_t seq)
225 return wait_status(ls, DLM_RS_DONE, seq);
229 * The recover_list contains all the rsb's for which we've requested the new
230 * master nodeid. As replies are returned from the resource directories the
231 * rsb's are removed from the list. When the list is empty we're done.
233 * The recover_list is later similarly used for all rsb's for which we've sent
234 * new lkb's and need to receive new corresponding lkid's.
236 * We use the address of the rsb struct as a simple local identifier for the
237 * rsb so we can match an rcom reply with the rsb it was sent for.
240 static int recover_list_empty(struct dlm_ls *ls)
244 spin_lock(&ls->ls_recover_list_lock);
245 empty = list_empty(&ls->ls_recover_list);
246 spin_unlock(&ls->ls_recover_list_lock);
251 static void recover_list_add(struct dlm_rsb *r)
253 struct dlm_ls *ls = r->res_ls;
255 spin_lock(&ls->ls_recover_list_lock);
256 if (list_empty(&r->res_recover_list)) {
257 list_add_tail(&r->res_recover_list, &ls->ls_recover_list);
258 ls->ls_recover_list_count++;
261 spin_unlock(&ls->ls_recover_list_lock);
264 static void recover_list_del(struct dlm_rsb *r)
266 struct dlm_ls *ls = r->res_ls;
268 spin_lock(&ls->ls_recover_list_lock);
269 list_del_init(&r->res_recover_list);
270 ls->ls_recover_list_count--;
271 spin_unlock(&ls->ls_recover_list_lock);
276 static void recover_list_clear(struct dlm_ls *ls)
278 struct dlm_rsb *r, *s;
280 spin_lock(&ls->ls_recover_list_lock);
281 list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
282 list_del_init(&r->res_recover_list);
283 r->res_recover_locks_count = 0;
285 ls->ls_recover_list_count--;
288 if (ls->ls_recover_list_count != 0) {
289 log_error(ls, "warning: recover_list_count %d",
290 ls->ls_recover_list_count);
291 ls->ls_recover_list_count = 0;
293 spin_unlock(&ls->ls_recover_list_lock);
296 static int recover_idr_empty(struct dlm_ls *ls)
300 spin_lock(&ls->ls_recover_idr_lock);
301 if (ls->ls_recover_list_count)
303 spin_unlock(&ls->ls_recover_idr_lock);
308 static int recover_idr_add(struct dlm_rsb *r)
310 struct dlm_ls *ls = r->res_ls;
313 idr_preload(GFP_NOFS);
314 spin_lock(&ls->ls_recover_idr_lock);
319 rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT);
324 ls->ls_recover_list_count++;
328 spin_unlock(&ls->ls_recover_idr_lock);
333 static void recover_idr_del(struct dlm_rsb *r)
335 struct dlm_ls *ls = r->res_ls;
337 spin_lock(&ls->ls_recover_idr_lock);
338 idr_remove(&ls->ls_recover_idr, r->res_id);
340 ls->ls_recover_list_count--;
341 spin_unlock(&ls->ls_recover_idr_lock);
346 static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
350 spin_lock(&ls->ls_recover_idr_lock);
351 r = idr_find(&ls->ls_recover_idr, (int)id);
352 spin_unlock(&ls->ls_recover_idr_lock);
356 static void recover_idr_clear(struct dlm_ls *ls)
361 spin_lock(&ls->ls_recover_idr_lock);
363 idr_for_each_entry(&ls->ls_recover_idr, r, id) {
364 idr_remove(&ls->ls_recover_idr, id);
366 r->res_recover_locks_count = 0;
367 ls->ls_recover_list_count--;
372 if (ls->ls_recover_list_count != 0) {
373 log_error(ls, "warning: recover_list_count %d",
374 ls->ls_recover_list_count);
375 ls->ls_recover_list_count = 0;
377 spin_unlock(&ls->ls_recover_idr_lock);
381 /* Master recovery: find new master node for rsb's that were
382 mastered on nodes that have been removed.
386 dlm_send_rcom_lookup -> receive_rcom_lookup
388 receive_rcom_lookup_reply <-
389 dlm_recover_master_reply
396 * Set the lock master for all LKBs in a lock queue
397 * If we are the new master of the rsb, we may have received new
398 * MSTCPY locks from other nodes already which we need to ignore
399 * when setting the new nodeid.
402 static void set_lock_master(struct list_head *queue, int nodeid)
406 list_for_each_entry(lkb, queue, lkb_statequeue) {
407 if (!test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) {
408 lkb->lkb_nodeid = nodeid;
414 static void set_master_lkbs(struct dlm_rsb *r)
416 set_lock_master(&r->res_grantqueue, r->res_nodeid);
417 set_lock_master(&r->res_convertqueue, r->res_nodeid);
418 set_lock_master(&r->res_waitqueue, r->res_nodeid);
422 * Propagate the new master nodeid to locks
423 * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
424 * The NEW_MASTER2 flag tells recover_lvb() and recover_grant() which
428 static void set_new_master(struct dlm_rsb *r)
431 rsb_set_flag(r, RSB_NEW_MASTER);
432 rsb_set_flag(r, RSB_NEW_MASTER2);
436 * We do async lookups on rsb's that need new masters. The rsb's
437 * waiting for a lookup reply are kept on the recover_list.
439 * Another node recovering the master may have sent us a rcom lookup,
440 * and our dlm_master_lookup() set it as the new master, along with
441 * NEW_MASTER so that we'll recover it here (this implies dir_nodeid
442 * equals our_nodeid below).
445 static int recover_master(struct dlm_rsb *r, unsigned int *count, uint64_t seq)
447 struct dlm_ls *ls = r->res_ls;
448 int our_nodeid, dir_nodeid;
455 is_removed = dlm_is_removed(ls, r->res_nodeid);
457 if (!is_removed && !rsb_flag(r, RSB_NEW_MASTER))
460 our_nodeid = dlm_our_nodeid();
461 dir_nodeid = dlm_dir_nodeid(r);
463 if (dir_nodeid == our_nodeid) {
465 r->res_master_nodeid = our_nodeid;
469 /* set master of lkbs to ourself when is_removed, or to
470 another new master which we set along with NEW_MASTER
471 in dlm_master_lookup */
476 error = dlm_send_rcom_lookup(r, dir_nodeid, seq);
484 * All MSTCPY locks are purged and rebuilt, even if the master stayed the same.
485 * This is necessary because recovery can be started, aborted and restarted,
486 * causing the master nodeid to briefly change during the aborted recovery, and
487 * change back to the original value in the second recovery. The MSTCPY locks
488 * may or may not have been purged during the aborted recovery. Another node
489 * with an outstanding request in waiters list and a request reply saved in the
490 * requestqueue, cannot know whether it should ignore the reply and resend the
491 * request, or accept the reply and complete the request. It must do the
492 * former if the remote node purged MSTCPY locks, and it must do the later if
493 * the remote node did not. This is solved by always purging MSTCPY locks, in
494 * which case, the request reply would always be ignored and the request
498 static int recover_master_static(struct dlm_rsb *r, unsigned int *count)
500 int dir_nodeid = dlm_dir_nodeid(r);
501 int new_master = dir_nodeid;
503 if (dir_nodeid == dlm_our_nodeid())
506 dlm_purge_mstcpy_locks(r);
507 r->res_master_nodeid = dir_nodeid;
508 r->res_nodeid = new_master;
515 * Go through local root resources and for each rsb which has a master which
516 * has departed, get the new master nodeid from the directory. The dir will
517 * assign mastery to the first node to look up the new master. That means
518 * we'll discover in this lookup if we're the new master of any rsb's.
520 * We fire off all the dir lookup requests individually and asynchronously to
521 * the correct dir node.
524 int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq)
527 unsigned int total = 0;
528 unsigned int count = 0;
529 int nodir = dlm_no_directory(ls);
532 log_rinfo(ls, "dlm_recover_masters");
534 down_read(&ls->ls_root_sem);
535 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
536 if (dlm_recovery_stopped(ls)) {
537 up_read(&ls->ls_root_sem);
544 error = recover_master_static(r, &count);
546 error = recover_master(r, &count, seq);
552 up_read(&ls->ls_root_sem);
556 up_read(&ls->ls_root_sem);
558 log_rinfo(ls, "dlm_recover_masters %u of %u", count, total);
560 error = dlm_wait_function(ls, &recover_idr_empty);
563 recover_idr_clear(ls);
567 int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc)
570 int ret_nodeid, new_master;
572 r = recover_idr_find(ls, le64_to_cpu(rc->rc_id));
574 log_error(ls, "dlm_recover_master_reply no id %llx",
575 (unsigned long long)le64_to_cpu(rc->rc_id));
579 ret_nodeid = le32_to_cpu(rc->rc_result);
581 if (ret_nodeid == dlm_our_nodeid())
584 new_master = ret_nodeid;
587 r->res_master_nodeid = ret_nodeid;
588 r->res_nodeid = new_master;
593 if (recover_idr_empty(ls))
594 wake_up(&ls->ls_wait_general);
600 /* Lock recovery: rebuild the process-copy locks we hold on a
601 remastered rsb on the new rsb master.
606 dlm_send_rcom_lock -> receive_rcom_lock
607 dlm_recover_master_copy
608 receive_rcom_lock_reply <-
609 dlm_recover_process_copy
614 * keep a count of the number of lkb's we send to the new master; when we get
615 * an equal number of replies then recovery for the rsb is done
618 static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head,
624 list_for_each_entry(lkb, head, lkb_statequeue) {
625 error = dlm_send_rcom_lock(r, lkb, seq);
628 r->res_recover_locks_count++;
634 static int recover_locks(struct dlm_rsb *r, uint64_t seq)
640 DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r););
642 error = recover_locks_queue(r, &r->res_grantqueue, seq);
645 error = recover_locks_queue(r, &r->res_convertqueue, seq);
648 error = recover_locks_queue(r, &r->res_waitqueue, seq);
652 if (r->res_recover_locks_count)
655 rsb_clear_flag(r, RSB_NEW_MASTER);
661 int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq)
664 int error, count = 0;
666 down_read(&ls->ls_root_sem);
667 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
669 rsb_clear_flag(r, RSB_NEW_MASTER);
673 if (!rsb_flag(r, RSB_NEW_MASTER))
676 if (dlm_recovery_stopped(ls)) {
678 up_read(&ls->ls_root_sem);
682 error = recover_locks(r, seq);
684 up_read(&ls->ls_root_sem);
688 count += r->res_recover_locks_count;
690 up_read(&ls->ls_root_sem);
692 log_rinfo(ls, "dlm_recover_locks %d out", count);
694 error = dlm_wait_function(ls, &recover_list_empty);
697 recover_list_clear(ls);
701 void dlm_recovered_lock(struct dlm_rsb *r)
703 DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r););
705 r->res_recover_locks_count--;
706 if (!r->res_recover_locks_count) {
707 rsb_clear_flag(r, RSB_NEW_MASTER);
711 if (recover_list_empty(r->res_ls))
712 wake_up(&r->res_ls->ls_wait_general);
716 * The lvb needs to be recovered on all master rsb's. This includes setting
717 * the VALNOTVALID flag if necessary, and determining the correct lvb contents
718 * based on the lvb's of the locks held on the rsb.
720 * RSB_VALNOTVALID is set in two cases:
722 * 1. we are master, but not new, and we purged an EX/PW lock held by a
723 * failed node (in dlm_recover_purge which set RSB_RECOVER_LVB_INVAL)
725 * 2. we are a new master, and there are only NL/CR locks left.
726 * (We could probably improve this by only invaliding in this way when
727 * the previous master left uncleanly. VMS docs mention that.)
729 * The LVB contents are only considered for changing when this is a new master
730 * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with
731 * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken
732 * from the lkb with the largest lvb sequence number.
735 static void recover_lvb(struct dlm_rsb *r)
737 struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL;
738 uint32_t high_seq = 0;
739 int lock_lvb_exists = 0;
740 int lvblen = r->res_ls->ls_lvblen;
742 if (!rsb_flag(r, RSB_NEW_MASTER2) &&
743 rsb_flag(r, RSB_RECOVER_LVB_INVAL)) {
745 rsb_set_flag(r, RSB_VALNOTVALID);
749 if (!rsb_flag(r, RSB_NEW_MASTER2))
752 /* we are the new master, so figure out if VALNOTVALID should
753 be set, and set the rsb lvb from the best lkb available. */
755 list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) {
756 if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
761 if (iter->lkb_grmode > DLM_LOCK_CR) {
766 if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
768 high_seq = iter->lkb_lvbseq;
772 list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) {
773 if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
778 if (iter->lkb_grmode > DLM_LOCK_CR) {
783 if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
785 high_seq = iter->lkb_lvbseq;
790 if (!lock_lvb_exists)
793 /* lvb is invalidated if only NL/CR locks remain */
795 rsb_set_flag(r, RSB_VALNOTVALID);
797 if (!r->res_lvbptr) {
798 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
804 r->res_lvbseq = big_lkb->lkb_lvbseq;
805 memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen);
806 } else if (high_lkb) {
807 r->res_lvbseq = high_lkb->lkb_lvbseq;
808 memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
811 memset(r->res_lvbptr, 0, lvblen);
817 /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks
818 converting PR->CW or CW->PR need to have their lkb_grmode set. */
820 static void recover_conversion(struct dlm_rsb *r)
822 struct dlm_ls *ls = r->res_ls;
826 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
827 if (lkb->lkb_grmode == DLM_LOCK_PR ||
828 lkb->lkb_grmode == DLM_LOCK_CW) {
829 grmode = lkb->lkb_grmode;
834 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
835 if (lkb->lkb_grmode != DLM_LOCK_IV)
838 log_debug(ls, "recover_conversion %x set gr to rq %d",
839 lkb->lkb_id, lkb->lkb_rqmode);
840 lkb->lkb_grmode = lkb->lkb_rqmode;
842 log_debug(ls, "recover_conversion %x set gr %d",
843 lkb->lkb_id, grmode);
844 lkb->lkb_grmode = grmode;
849 /* We've become the new master for this rsb and waiting/converting locks may
850 need to be granted in dlm_recover_grant() due to locks that may have
851 existed from a removed node. */
853 static void recover_grant(struct dlm_rsb *r)
855 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
856 rsb_set_flag(r, RSB_RECOVER_GRANT);
859 void dlm_recover_rsbs(struct dlm_ls *ls)
862 unsigned int count = 0;
864 down_read(&ls->ls_root_sem);
865 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
868 if (rsb_flag(r, RSB_RECOVER_CONVERT))
869 recover_conversion(r);
871 /* recover lvb before granting locks so the updated
872 lvb/VALNOTVALID is presented in the completion */
875 if (rsb_flag(r, RSB_NEW_MASTER2))
879 rsb_clear_flag(r, RSB_VALNOTVALID);
881 rsb_clear_flag(r, RSB_RECOVER_CONVERT);
882 rsb_clear_flag(r, RSB_RECOVER_LVB_INVAL);
883 rsb_clear_flag(r, RSB_NEW_MASTER2);
886 up_read(&ls->ls_root_sem);
889 log_rinfo(ls, "dlm_recover_rsbs %d done", count);
892 /* Create a single list of all root rsb's to be used during recovery */
894 int dlm_create_root_list(struct dlm_ls *ls)
900 down_write(&ls->ls_root_sem);
901 if (!list_empty(&ls->ls_root_list)) {
902 log_error(ls, "root list not empty");
907 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
908 spin_lock(&ls->ls_rsbtbl[i].lock);
909 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
910 r = rb_entry(n, struct dlm_rsb, res_hashnode);
911 list_add(&r->res_root_list, &ls->ls_root_list);
915 if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss))
916 log_error(ls, "dlm_create_root_list toss not empty");
917 spin_unlock(&ls->ls_rsbtbl[i].lock);
920 up_write(&ls->ls_root_sem);
924 void dlm_release_root_list(struct dlm_ls *ls)
926 struct dlm_rsb *r, *safe;
928 down_write(&ls->ls_root_sem);
929 list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) {
930 list_del_init(&r->res_root_list);
933 up_write(&ls->ls_root_sem);
936 void dlm_clear_toss(struct dlm_ls *ls)
938 struct rb_node *n, *next;
940 unsigned int count = 0;
943 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
944 spin_lock(&ls->ls_rsbtbl[i].lock);
945 for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) {
947 r = rb_entry(n, struct dlm_rsb, res_hashnode);
948 rb_erase(n, &ls->ls_rsbtbl[i].toss);
952 spin_unlock(&ls->ls_rsbtbl[i].lock);
956 log_rinfo(ls, "dlm_clear_toss %u done", count);