4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
28 #include <linux/drbd_limits.h>
32 struct after_state_chg_work {
36 enum chg_state_flags flags;
37 struct completion *done;
40 static int w_after_state_ch(struct drbd_work *w, int unused);
41 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
42 union drbd_state ns, enum chg_state_flags flags);
43 static void after_all_state_ch(struct drbd_tconn *tconn);
44 static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
45 static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state);
46 static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
47 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
48 const char **warn_sync_abort);
50 bool conn_all_vols_unconf(struct drbd_tconn *tconn)
52 struct drbd_conf *mdev;
55 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
56 if (mdev->state.disk != D_DISKLESS ||
57 mdev->state.conn != C_STANDALONE ||
58 mdev->state.role != R_SECONDARY)
64 /* Unfortunately the states where not correctly ordered, when
65 they where defined. therefore can not use max_t() here. */
66 static enum drbd_role max_role(enum drbd_role role1, enum drbd_role role2)
68 if (role1 == R_PRIMARY || role2 == R_PRIMARY)
70 if (role1 == R_SECONDARY || role2 == R_SECONDARY)
74 static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
76 if (role1 == R_UNKNOWN || role2 == R_UNKNOWN)
78 if (role1 == R_SECONDARY || role2 == R_SECONDARY)
83 enum drbd_role conn_highest_role(struct drbd_tconn *tconn)
85 enum drbd_role role = R_UNKNOWN;
86 struct drbd_conf *mdev;
89 idr_for_each_entry(&tconn->volumes, mdev, vnr)
90 role = max_role(role, mdev->state.role);
95 enum drbd_role conn_highest_peer(struct drbd_tconn *tconn)
97 enum drbd_role peer = R_UNKNOWN;
98 struct drbd_conf *mdev;
101 idr_for_each_entry(&tconn->volumes, mdev, vnr)
102 peer = max_role(peer, mdev->state.peer);
107 enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn)
109 enum drbd_disk_state ds = D_DISKLESS;
110 struct drbd_conf *mdev;
113 idr_for_each_entry(&tconn->volumes, mdev, vnr)
114 ds = max_t(enum drbd_disk_state, ds, mdev->state.disk);
119 enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn)
121 enum drbd_disk_state ds = D_DISKLESS;
122 struct drbd_conf *mdev;
125 idr_for_each_entry(&tconn->volumes, mdev, vnr)
126 ds = max_t(enum drbd_disk_state, ds, mdev->state.pdsk);
132 * cl_wide_st_chg() - true if the state change is a cluster wide one
133 * @mdev: DRBD device.
134 * @os: old (current) state.
135 * @ns: new (wanted) state.
137 static int cl_wide_st_chg(struct drbd_conf *mdev,
138 union drbd_state os, union drbd_state ns)
140 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
141 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
142 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
143 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
144 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
145 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
146 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
149 static union drbd_state
150 apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
153 ns.i = (os.i & ~mask.i) | val.i;
158 drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
159 union drbd_state mask, union drbd_state val)
163 enum drbd_state_rv rv;
165 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
166 ns = apply_mask_val(mdev->state, mask, val);
167 rv = _drbd_set_state(mdev, ns, f, NULL);
169 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
175 * drbd_force_state() - Impose a change which happens outside our control on our state
176 * @mdev: DRBD device.
177 * @mask: mask of state bits to change.
178 * @val: value of new state bits.
180 void drbd_force_state(struct drbd_conf *mdev,
181 union drbd_state mask, union drbd_state val)
183 drbd_change_state(mdev, CS_HARD, mask, val);
186 static enum drbd_state_rv
187 _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
188 union drbd_state val)
190 union drbd_state os, ns;
192 enum drbd_state_rv rv;
194 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
195 return SS_CW_SUCCESS;
197 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
198 return SS_CW_FAILED_BY_PEER;
200 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
202 ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
203 rv = is_valid_transition(os, ns);
204 if (rv == SS_SUCCESS)
205 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
207 if (!cl_wide_st_chg(mdev, os, ns))
209 if (rv == SS_UNKNOWN_ERROR) {
210 rv = is_valid_state(mdev, ns);
211 if (rv == SS_SUCCESS) {
212 rv = is_valid_soft_transition(os, ns);
213 if (rv == SS_SUCCESS)
214 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
217 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
223 * drbd_req_state() - Perform an eventually cluster wide state change
224 * @mdev: DRBD device.
225 * @mask: mask of state bits to change.
226 * @val: value of new state bits.
229 * Should not be called directly, use drbd_request_state() or
230 * _drbd_request_state().
232 static enum drbd_state_rv
233 drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
234 union drbd_state val, enum chg_state_flags f)
236 struct completion done;
238 union drbd_state os, ns;
239 enum drbd_state_rv rv;
241 init_completion(&done);
243 if (f & CS_SERIALIZE)
244 mutex_lock(mdev->state_mutex);
246 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
248 ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
249 rv = is_valid_transition(os, ns);
250 if (rv < SS_SUCCESS) {
251 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
255 if (cl_wide_st_chg(mdev, os, ns)) {
256 rv = is_valid_state(mdev, ns);
257 if (rv == SS_SUCCESS)
258 rv = is_valid_soft_transition(os, ns);
259 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
261 if (rv < SS_SUCCESS) {
263 print_st_err(mdev, os, ns, rv);
267 if (drbd_send_state_req(mdev, mask, val)) {
268 rv = SS_CW_FAILED_BY_PEER;
270 print_st_err(mdev, os, ns, rv);
274 wait_event(mdev->state_wait,
275 (rv = _req_st_cond(mdev, mask, val)));
277 if (rv < SS_SUCCESS) {
279 print_st_err(mdev, os, ns, rv);
282 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
283 ns = apply_mask_val(mdev->state, mask, val);
284 rv = _drbd_set_state(mdev, ns, f, &done);
286 rv = _drbd_set_state(mdev, ns, f, &done);
289 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
291 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
292 D_ASSERT(current != mdev->tconn->worker.task);
293 wait_for_completion(&done);
297 if (f & CS_SERIALIZE)
298 mutex_unlock(mdev->state_mutex);
304 * _drbd_request_state() - Request a state change (with flags)
305 * @mdev: DRBD device.
306 * @mask: mask of state bits to change.
307 * @val: value of new state bits.
310 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
311 * flag, or when logging of failed state change requests is not desired.
314 _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
315 union drbd_state val, enum chg_state_flags f)
317 enum drbd_state_rv rv;
319 wait_event(mdev->state_wait,
320 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
325 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
327 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
329 drbd_conn_str(ns.conn),
330 drbd_role_str(ns.role),
331 drbd_role_str(ns.peer),
332 drbd_disk_str(ns.disk),
333 drbd_disk_str(ns.pdsk),
334 is_susp(ns) ? 's' : 'r',
335 ns.aftr_isp ? 'a' : '-',
336 ns.peer_isp ? 'p' : '-',
337 ns.user_isp ? 'u' : '-',
338 ns.susp_fen ? 'F' : '-',
339 ns.susp_nod ? 'N' : '-'
343 void print_st_err(struct drbd_conf *mdev, union drbd_state os,
344 union drbd_state ns, enum drbd_state_rv err)
346 if (err == SS_IN_TRANSIENT_STATE)
348 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
349 print_st(mdev, " state", os);
350 print_st(mdev, "wanted", ns);
353 static long print_state_change(char *pb, union drbd_state os, union drbd_state ns,
354 enum chg_state_flags flags)
359 if (ns.role != os.role && flags & CS_DC_ROLE)
360 pbp += sprintf(pbp, "role( %s -> %s ) ",
361 drbd_role_str(os.role),
362 drbd_role_str(ns.role));
363 if (ns.peer != os.peer && flags & CS_DC_PEER)
364 pbp += sprintf(pbp, "peer( %s -> %s ) ",
365 drbd_role_str(os.peer),
366 drbd_role_str(ns.peer));
367 if (ns.conn != os.conn && flags & CS_DC_CONN)
368 pbp += sprintf(pbp, "conn( %s -> %s ) ",
369 drbd_conn_str(os.conn),
370 drbd_conn_str(ns.conn));
371 if (ns.disk != os.disk && flags & CS_DC_DISK)
372 pbp += sprintf(pbp, "disk( %s -> %s ) ",
373 drbd_disk_str(os.disk),
374 drbd_disk_str(ns.disk));
375 if (ns.pdsk != os.pdsk && flags & CS_DC_PDSK)
376 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
377 drbd_disk_str(os.pdsk),
378 drbd_disk_str(ns.pdsk));
379 if (is_susp(ns) != is_susp(os))
380 pbp += sprintf(pbp, "susp( %d -> %d ) ",
383 if (ns.aftr_isp != os.aftr_isp)
384 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
387 if (ns.peer_isp != os.peer_isp)
388 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
391 if (ns.user_isp != os.user_isp)
392 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
399 static void drbd_pr_state_change(struct drbd_conf *mdev, union drbd_state os, union drbd_state ns,
400 enum chg_state_flags flags)
404 if (print_state_change(pb, os, ns, flags ^ CS_DC_MASK))
405 dev_info(DEV, "%s\n", pb);
408 static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os, union drbd_state ns,
409 enum chg_state_flags flags)
413 if (print_state_change(pb, os, ns, flags))
414 conn_info(tconn, "%s\n", pb);
419 * is_valid_state() - Returns an SS_ error code if ns is not valid
420 * @mdev: DRBD device.
421 * @ns: State to consider.
423 static enum drbd_state_rv
424 is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
426 /* See drbd_state_sw_errors in drbd_strings.c */
428 enum drbd_fencing_p fp;
429 enum drbd_state_rv rv = SS_SUCCESS;
432 if (get_ldev(mdev)) {
433 fp = mdev->ldev->dc.fencing;
437 if (get_net_conf(mdev->tconn)) {
438 if (!mdev->tconn->net_conf->two_primaries && ns.role == R_PRIMARY) {
439 if (ns.peer == R_PRIMARY)
440 rv = SS_TWO_PRIMARIES;
441 else if (conn_highest_peer(mdev->tconn) == R_PRIMARY)
442 rv = SS_O_VOL_PEER_PRI;
444 put_net_conf(mdev->tconn);
448 /* already found a reason to abort */;
449 else if (ns.role == R_SECONDARY && mdev->open_cnt)
450 rv = SS_DEVICE_IN_USE;
452 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
453 rv = SS_NO_UP_TO_DATE_DISK;
455 else if (fp >= FP_RESOURCE &&
456 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
459 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
460 rv = SS_NO_UP_TO_DATE_DISK;
462 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
463 rv = SS_NO_LOCAL_DISK;
465 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
466 rv = SS_NO_REMOTE_DISK;
468 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
469 rv = SS_NO_UP_TO_DATE_DISK;
471 else if ((ns.conn == C_CONNECTED ||
472 ns.conn == C_WF_BITMAP_S ||
473 ns.conn == C_SYNC_SOURCE ||
474 ns.conn == C_PAUSED_SYNC_S) &&
475 ns.disk == D_OUTDATED)
476 rv = SS_CONNECTED_OUTDATES;
478 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
479 (mdev->tconn->net_conf->verify_alg[0] == 0))
480 rv = SS_NO_VERIFY_ALG;
482 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
483 mdev->tconn->agreed_pro_version < 88)
484 rv = SS_NOT_SUPPORTED;
486 else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
487 rv = SS_CONNECTED_OUTDATES;
493 * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
494 * This function limits state transitions that may be declined by DRBD. I.e.
495 * user requests (aka soft transitions).
496 * @mdev: DRBD device.
500 static enum drbd_state_rv
501 is_valid_soft_transition(union drbd_state os, union drbd_state ns)
503 enum drbd_state_rv rv = SS_SUCCESS;
505 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
506 os.conn > C_CONNECTED)
507 rv = SS_RESYNC_RUNNING;
509 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
510 rv = SS_ALREADY_STANDALONE;
512 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
515 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
516 rv = SS_NO_NET_CONFIG;
518 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
519 rv = SS_LOWER_THAN_OUTDATED;
521 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
522 rv = SS_IN_TRANSIENT_STATE;
524 /* if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
525 rv = SS_IN_TRANSIENT_STATE; */
527 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
528 rv = SS_NEED_CONNECTION;
530 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
531 ns.conn != os.conn && os.conn > C_CONNECTED)
532 rv = SS_RESYNC_RUNNING;
534 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
535 os.conn < C_CONNECTED)
536 rv = SS_NEED_CONNECTION;
538 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
539 && os.conn < C_WF_REPORT_PARAMS)
540 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
545 static enum drbd_state_rv
546 is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc)
548 enum drbd_state_rv rv = SS_SUCCESS;
550 /* Disallow Network errors to configure a device's network part */
551 if ((nc >= C_TIMEOUT && nc <= C_TEAR_DOWN) && oc <= C_DISCONNECTING)
552 rv = SS_NEED_CONNECTION;
554 /* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
555 if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING)
556 rv = SS_IN_TRANSIENT_STATE;
558 /* After C_DISCONNECTING only C_STANDALONE may follow */
559 if (oc == C_DISCONNECTING && nc != C_STANDALONE)
560 rv = SS_IN_TRANSIENT_STATE;
567 * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
568 * This limits hard state transitions. Hard state transitions are facts there are
569 * imposed on DRBD by the environment. E.g. disk broke or network broke down.
570 * But those hard state transitions are still not allowed to do everything.
574 static enum drbd_state_rv
575 is_valid_transition(union drbd_state os, union drbd_state ns)
577 enum drbd_state_rv rv;
579 rv = is_valid_conn_transition(os.conn, ns.conn);
581 /* we cannot fail (again) if we already detached */
582 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
585 /* if we are only D_ATTACHING yet,
586 * we can (and should) go directly to D_DISKLESS. */
587 if (ns.disk == D_FAILED && os.disk == D_ATTACHING) {
588 printk("TODO: FIX ME\n");
596 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
597 * @mdev: DRBD device.
602 * When we loose connection, we have to set the state of the peers disk (pdsk)
603 * to D_UNKNOWN. This rule and many more along those lines are in this function.
605 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
606 const char **warn_sync_abort)
608 enum drbd_fencing_p fp;
609 enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
612 if (get_ldev(mdev)) {
613 fp = mdev->ldev->dc.fencing;
617 /* Implications from connection to peer and peer_isp */
618 if (ns.conn < C_CONNECTED) {
621 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
625 /* Clear the aftr_isp when becoming unconfigured */
626 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
629 /* An implication of the disk states onto the connection state */
630 /* Abort resync if a disk fails/detaches */
631 if (ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
634 ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T ?
635 "Online-verify" : "Resync";
636 ns.conn = C_CONNECTED;
639 /* Connection breaks down before we finished "Negotiating" */
640 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
641 get_ldev_if_state(mdev, D_NEGOTIATING)) {
642 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
643 ns.disk = mdev->new_state_tmp.disk;
644 ns.pdsk = mdev->new_state_tmp.pdsk;
646 dev_alert(DEV, "Connection lost while negotiating, no data!\n");
647 ns.disk = D_DISKLESS;
653 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
654 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
655 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
656 ns.disk = D_UP_TO_DATE;
657 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
658 ns.pdsk = D_UP_TO_DATE;
661 /* Implications of the connection stat on the disk states */
662 disk_min = D_DISKLESS;
663 disk_max = D_UP_TO_DATE;
664 pdsk_min = D_INCONSISTENT;
665 pdsk_max = D_UNKNOWN;
666 switch ((enum drbd_conns)ns.conn) {
668 case C_PAUSED_SYNC_T:
669 case C_STARTING_SYNC_T:
672 disk_min = D_INCONSISTENT;
673 disk_max = D_OUTDATED;
674 pdsk_min = D_UP_TO_DATE;
675 pdsk_max = D_UP_TO_DATE;
679 disk_min = D_UP_TO_DATE;
680 disk_max = D_UP_TO_DATE;
681 pdsk_min = D_UP_TO_DATE;
682 pdsk_max = D_UP_TO_DATE;
685 disk_min = D_DISKLESS;
686 disk_max = D_UP_TO_DATE;
687 pdsk_min = D_DISKLESS;
688 pdsk_max = D_UP_TO_DATE;
691 case C_PAUSED_SYNC_S:
692 case C_STARTING_SYNC_S:
694 disk_min = D_UP_TO_DATE;
695 disk_max = D_UP_TO_DATE;
696 pdsk_min = D_INCONSISTENT;
697 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
700 disk_min = D_INCONSISTENT;
701 disk_max = D_INCONSISTENT;
702 pdsk_min = D_UP_TO_DATE;
703 pdsk_max = D_UP_TO_DATE;
706 disk_min = D_UP_TO_DATE;
707 disk_max = D_UP_TO_DATE;
708 pdsk_min = D_INCONSISTENT;
709 pdsk_max = D_INCONSISTENT;
712 case C_DISCONNECTING:
716 case C_NETWORK_FAILURE:
717 case C_PROTOCOL_ERROR:
719 case C_WF_CONNECTION:
720 case C_WF_REPORT_PARAMS:
724 if (ns.disk > disk_max)
727 if (ns.disk < disk_min) {
728 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
729 drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
732 if (ns.pdsk > pdsk_max)
735 if (ns.pdsk < pdsk_min) {
736 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
737 drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
741 if (fp == FP_STONITH &&
742 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
743 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
745 if (mdev->tconn->res_opts.on_no_data == OND_SUSPEND_IO &&
746 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
747 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
749 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
750 if (ns.conn == C_SYNC_SOURCE)
751 ns.conn = C_PAUSED_SYNC_S;
752 if (ns.conn == C_SYNC_TARGET)
753 ns.conn = C_PAUSED_SYNC_T;
755 if (ns.conn == C_PAUSED_SYNC_S)
756 ns.conn = C_SYNC_SOURCE;
757 if (ns.conn == C_PAUSED_SYNC_T)
758 ns.conn = C_SYNC_TARGET;
764 void drbd_resume_al(struct drbd_conf *mdev)
766 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
767 dev_info(DEV, "Resumed AL updates\n");
770 /* helper for __drbd_set_state */
771 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
773 if (mdev->tconn->agreed_pro_version < 90)
774 mdev->ov_start_sector = 0;
775 mdev->rs_total = drbd_bm_bits(mdev);
776 mdev->ov_position = 0;
777 if (cs == C_VERIFY_T) {
778 /* starting online verify from an arbitrary position
779 * does not fit well into the existing protocol.
780 * on C_VERIFY_T, we initialize ov_left and friends
781 * implicitly in receive_DataRequest once the
782 * first P_OV_REQUEST is received */
783 mdev->ov_start_sector = ~(sector_t)0;
785 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
786 if (bit >= mdev->rs_total) {
787 mdev->ov_start_sector =
788 BM_BIT_TO_SECT(mdev->rs_total - 1);
791 mdev->rs_total -= bit;
792 mdev->ov_position = mdev->ov_start_sector;
794 mdev->ov_left = mdev->rs_total;
798 * __drbd_set_state() - Set a new DRBD state
799 * @mdev: DRBD device.
802 * @done: Optional completion, that will get completed after the after_state_ch() finished
804 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
807 __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
808 enum chg_state_flags flags, struct completion *done)
811 enum drbd_state_rv rv = SS_SUCCESS;
812 const char *warn_sync_abort = NULL;
813 struct after_state_chg_work *ascw;
817 ns = sanitize_state(mdev, ns, &warn_sync_abort);
819 return SS_NOTHING_TO_DO;
821 rv = is_valid_transition(os, ns);
825 if (!(flags & CS_HARD)) {
826 /* pre-state-change checks ; only look at ns */
827 /* See drbd_state_sw_errors in drbd_strings.c */
829 rv = is_valid_state(mdev, ns);
830 if (rv < SS_SUCCESS) {
831 /* If the old state was illegal as well, then let
834 if (is_valid_state(mdev, os) == rv)
835 rv = is_valid_soft_transition(os, ns);
837 rv = is_valid_soft_transition(os, ns);
840 if (rv < SS_SUCCESS) {
841 if (flags & CS_VERBOSE)
842 print_st_err(mdev, os, ns, rv);
847 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
849 drbd_pr_state_change(mdev, os, ns, flags);
851 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
852 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
853 * drbd_ldev_destroy() won't happen before our corresponding
854 * after_state_ch works run, where we put_ldev again. */
855 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
856 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
857 atomic_inc(&mdev->local_cnt);
861 /* solve the race between becoming unconfigured,
862 * worker doing the cleanup, and
863 * admin reconfiguring us:
864 * on (re)configure, first set CONFIG_PENDING,
865 * then wait for a potentially exiting worker,
866 * start the worker, and schedule one no_op.
867 * then proceed with configuration.
869 if(conn_all_vols_unconf(mdev->tconn) &&
870 !test_and_set_bit(CONFIG_PENDING, &mdev->tconn->flags))
871 set_bit(OBJECT_DYING, &mdev->tconn->flags);
873 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
874 drbd_print_uuids(mdev, "attached to UUIDs");
876 wake_up(&mdev->misc_wait);
877 wake_up(&mdev->state_wait);
878 wake_up(&mdev->tconn->ping_wait);
880 /* aborted verify run. log the last position */
881 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
882 ns.conn < C_CONNECTED) {
883 mdev->ov_start_sector =
884 BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
885 dev_info(DEV, "Online Verify reached sector %llu\n",
886 (unsigned long long)mdev->ov_start_sector);
889 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
890 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
891 dev_info(DEV, "Syncer continues.\n");
892 mdev->rs_paused += (long)jiffies
893 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
894 if (ns.conn == C_SYNC_TARGET)
895 mod_timer(&mdev->resync_timer, jiffies);
898 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
899 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
900 dev_info(DEV, "Resync suspended\n");
901 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
904 if (os.conn == C_CONNECTED &&
905 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
906 unsigned long now = jiffies;
909 set_ov_position(mdev, ns.conn);
910 mdev->rs_start = now;
911 mdev->rs_last_events = 0;
912 mdev->rs_last_sect_ev = 0;
913 mdev->ov_last_oos_size = 0;
914 mdev->ov_last_oos_start = 0;
916 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
917 mdev->rs_mark_left[i] = mdev->ov_left;
918 mdev->rs_mark_time[i] = now;
921 drbd_rs_controller_reset(mdev);
923 if (ns.conn == C_VERIFY_S) {
924 dev_info(DEV, "Starting Online Verify from sector %llu\n",
925 (unsigned long long)mdev->ov_position);
926 mod_timer(&mdev->resync_timer, jiffies);
930 if (get_ldev(mdev)) {
931 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
932 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
933 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
935 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
936 mdf |= MDF_CRASHED_PRIMARY;
937 if (mdev->state.role == R_PRIMARY ||
938 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
939 mdf |= MDF_PRIMARY_IND;
940 if (mdev->state.conn > C_WF_REPORT_PARAMS)
941 mdf |= MDF_CONNECTED_IND;
942 if (mdev->state.disk > D_INCONSISTENT)
943 mdf |= MDF_CONSISTENT;
944 if (mdev->state.disk > D_OUTDATED)
945 mdf |= MDF_WAS_UP_TO_DATE;
946 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
947 mdf |= MDF_PEER_OUT_DATED;
948 if (mdf != mdev->ldev->md.flags) {
949 mdev->ldev->md.flags = mdf;
950 drbd_md_mark_dirty(mdev);
952 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
953 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
957 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
958 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
959 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
960 set_bit(CONSIDER_RESYNC, &mdev->flags);
962 /* Receiver should clean up itself */
963 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
964 drbd_thread_stop_nowait(&mdev->tconn->receiver);
966 /* Now the receiver finished cleaning up itself, it should die */
967 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
968 drbd_thread_stop_nowait(&mdev->tconn->receiver);
970 /* Upon network failure, we need to restart the receiver. */
971 if (os.conn > C_TEAR_DOWN &&
972 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
973 drbd_thread_restart_nowait(&mdev->tconn->receiver);
975 /* Resume AL writing if we get a connection */
976 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
977 drbd_resume_al(mdev);
979 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
984 ascw->w.cb = w_after_state_ch;
987 drbd_queue_work(&mdev->tconn->data.work, &ascw->w);
989 dev_err(DEV, "Could not kmalloc an ascw\n");
995 static int w_after_state_ch(struct drbd_work *w, int unused)
997 struct after_state_chg_work *ascw =
998 container_of(w, struct after_state_chg_work, w);
999 struct drbd_conf *mdev = w->mdev;
1001 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1002 if (ascw->flags & CS_WAIT_COMPLETE) {
1003 D_ASSERT(ascw->done != NULL);
1004 complete(ascw->done);
1011 static void abw_start_sync(struct drbd_conf *mdev, int rv)
1014 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1015 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1019 switch (mdev->state.conn) {
1020 case C_STARTING_SYNC_T:
1021 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1023 case C_STARTING_SYNC_S:
1024 drbd_start_resync(mdev, C_SYNC_SOURCE);
1029 int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1030 int (*io_fn)(struct drbd_conf *),
1031 char *why, enum bm_flag flags)
1035 D_ASSERT(current == mdev->tconn->worker.task);
1037 /* open coded non-blocking drbd_suspend_io(mdev); */
1038 set_bit(SUSPEND_IO, &mdev->flags);
1040 drbd_bm_lock(mdev, why, flags);
1042 drbd_bm_unlock(mdev);
1044 drbd_resume_io(mdev);
1050 * after_state_ch() - Perform after state change actions that may sleep
1051 * @mdev: DRBD device.
1056 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1057 union drbd_state ns, enum chg_state_flags flags)
1059 enum drbd_fencing_p fp;
1060 enum drbd_req_event what = NOTHING;
1061 union drbd_state nsm;
1062 struct sib_info sib;
1064 sib.sib_reason = SIB_STATE_CHANGE;
1068 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1069 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1071 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1075 if (get_ldev(mdev)) {
1076 fp = mdev->ldev->dc.fencing;
1080 /* Inform userspace about the change... */
1081 drbd_bcast_event(mdev, &sib);
1083 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1084 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1085 drbd_khelper(mdev, "pri-on-incon-degr");
1087 /* Here we have the actions that are performed after a
1088 state change. This function might sleep */
1092 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1095 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
1096 what = RESTART_FROZEN_DISK_IO;
1098 if (what != NOTHING)
1103 /* case1: The outdate peer handler is successful: */
1104 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
1105 tl_clear(mdev->tconn);
1106 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1107 drbd_uuid_new_current(mdev);
1108 clear_bit(NEW_CUR_UUID, &mdev->flags);
1110 spin_lock_irq(&mdev->tconn->req_lock);
1111 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1112 spin_unlock_irq(&mdev->tconn->req_lock);
1114 /* case2: The connection was established again: */
1115 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1116 clear_bit(NEW_CUR_UUID, &mdev->flags);
1122 if (what != NOTHING) {
1123 spin_lock_irq(&mdev->tconn->req_lock);
1124 _tl_restart(mdev->tconn, what);
1125 nsm.i &= mdev->state.i;
1126 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
1127 spin_unlock_irq(&mdev->tconn->req_lock);
1130 /* Became sync source. With protocol >= 96, we still need to send out
1131 * the sync uuid now. Need to do that before any drbd_send_state, or
1132 * the other side may go "paused sync" before receiving the sync uuids,
1133 * which is unexpected. */
1134 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1135 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1136 mdev->tconn->agreed_pro_version >= 96 && get_ldev(mdev)) {
1137 drbd_gen_and_send_sync_uuid(mdev);
1141 /* Do not change the order of the if above and the two below... */
1142 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1143 drbd_send_uuids(mdev);
1144 drbd_send_state(mdev);
1146 /* No point in queuing send_bitmap if we don't have a connection
1147 * anymore, so check also the _current_ state, not only the new state
1148 * at the time this work was queued. */
1149 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1150 mdev->state.conn == C_WF_BITMAP_S)
1151 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
1152 "send_bitmap (WFBitMapS)",
1153 BM_LOCKED_TEST_ALLOWED);
1155 /* Lost contact to peer's copy of the data */
1156 if ((os.pdsk >= D_INCONSISTENT &&
1157 os.pdsk != D_UNKNOWN &&
1158 os.pdsk != D_OUTDATED)
1159 && (ns.pdsk < D_INCONSISTENT ||
1160 ns.pdsk == D_UNKNOWN ||
1161 ns.pdsk == D_OUTDATED)) {
1162 if (get_ldev(mdev)) {
1163 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1164 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1165 if (is_susp(mdev->state)) {
1166 set_bit(NEW_CUR_UUID, &mdev->flags);
1168 drbd_uuid_new_current(mdev);
1169 drbd_send_uuids(mdev);
1176 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1177 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
1178 drbd_uuid_new_current(mdev);
1179 drbd_send_uuids(mdev);
1182 /* D_DISKLESS Peer becomes secondary */
1183 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1184 /* We may still be Primary ourselves.
1185 * No harm done if the bitmap still changes,
1186 * redirtied pages will follow later. */
1187 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1188 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
1192 /* Write out all changed bits on demote.
1193 * Though, no need to da that just yet
1194 * if there is a resync going on still */
1195 if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1196 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
1197 /* No changes to the bitmap expected this time, so assert that,
1198 * even though no harm was done if it did change. */
1199 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1200 "demote", BM_LOCKED_TEST_ALLOWED);
1204 /* Last part of the attaching process ... */
1205 if (ns.conn >= C_CONNECTED &&
1206 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1207 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
1208 drbd_send_uuids(mdev);
1209 drbd_send_state(mdev);
1212 /* We want to pause/continue resync, tell peer. */
1213 if (ns.conn >= C_CONNECTED &&
1214 ((os.aftr_isp != ns.aftr_isp) ||
1215 (os.user_isp != ns.user_isp)))
1216 drbd_send_state(mdev);
1218 /* In case one of the isp bits got set, suspend other devices. */
1219 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1220 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1221 suspend_other_sg(mdev);
1223 /* Make sure the peer gets informed about eventual state
1224 changes (ISP bits) while we were in WFReportParams. */
1225 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1226 drbd_send_state(mdev);
1228 if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1229 drbd_send_state(mdev);
1231 /* We are in the progress to start a full sync... */
1232 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1233 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1234 /* no other bitmap changes expected during this phase */
1235 drbd_queue_bitmap_io(mdev,
1236 &drbd_bmio_set_n_write, &abw_start_sync,
1237 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
1239 /* We are invalidating our self... */
1240 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1241 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1242 /* other bitmap operation expected during this phase */
1243 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1244 "set_n_write from invalidate", BM_LOCKED_MASK);
1246 /* first half of local IO error, failure to attach,
1247 * or administrative detach */
1248 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1249 enum drbd_io_error_p eh;
1251 /* corresponding get_ldev was in __drbd_set_state, to serialize
1252 * our cleanup here with the transition to D_DISKLESS,
1253 * so it is safe to dreference ldev here. */
1254 eh = mdev->ldev->dc.on_io_error;
1255 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1257 /* current state still has to be D_FAILED,
1258 * there is only one way out: to D_DISKLESS,
1259 * and that may only happen after our put_ldev below. */
1260 if (mdev->state.disk != D_FAILED)
1262 "ASSERT FAILED: disk is %s during detach\n",
1263 drbd_disk_str(mdev->state.disk));
1265 if (!drbd_send_state(mdev))
1266 dev_warn(DEV, "Notified peer that I am detaching my disk\n");
1268 dev_err(DEV, "Sending state for detaching disk failed\n");
1270 drbd_rs_cancel_all(mdev);
1272 /* In case we want to get something to stable storage still,
1273 * this may be the last chance.
1274 * Following put_ldev may transition to D_DISKLESS. */
1278 if (was_io_error && eh == EP_CALL_HELPER)
1279 drbd_khelper(mdev, "local-io-error");
1282 /* second half of local IO error, failure to attach,
1283 * or administrative detach,
1284 * after local_cnt references have reached zero again */
1285 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1286 /* We must still be diskless,
1287 * re-attach has to be serialized with this! */
1288 if (mdev->state.disk != D_DISKLESS)
1290 "ASSERT FAILED: disk is %s while going diskless\n",
1291 drbd_disk_str(mdev->state.disk));
1294 mdev->rs_failed = 0;
1295 atomic_set(&mdev->rs_pending_cnt, 0);
1297 if (!drbd_send_state(mdev))
1298 dev_warn(DEV, "Notified peer that I'm now diskless.\n");
1299 /* corresponding get_ldev in __drbd_set_state
1300 * this may finally trigger drbd_ldev_destroy. */
1304 /* Notify peer that I had a local IO error, and did not detached.. */
1305 if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
1306 drbd_send_state(mdev);
1308 /* Disks got bigger while they were detached */
1309 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1310 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1311 if (ns.conn == C_CONNECTED)
1312 resync_after_online_grow(mdev);
1315 /* A resync finished or aborted, wake paused devices... */
1316 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1317 (os.peer_isp && !ns.peer_isp) ||
1318 (os.user_isp && !ns.user_isp))
1319 resume_next_sg(mdev);
1321 /* sync target done with resync. Explicitly notify peer, even though
1322 * it should (at least for non-empty resyncs) already know itself. */
1323 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1324 drbd_send_state(mdev);
1326 /* This triggers bitmap writeout of potentially still unwritten pages
1327 * if the resync finished cleanly, or aborted because of peer disk
1328 * failure, or because of connection loss.
1329 * For resync aborted because of local disk failure, we cannot do
1330 * any bitmap writeout anymore.
1331 * No harm done if some bits change during this phase.
1333 if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1334 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1335 "write from resync_finished", BM_LOCKED_SET_ALLOWED);
1339 if (ns.disk == D_DISKLESS &&
1340 ns.conn == C_STANDALONE &&
1341 ns.role == R_SECONDARY) {
1342 if (os.aftr_isp != ns.aftr_isp)
1343 resume_next_sg(mdev);
1346 after_all_state_ch(mdev->tconn);
1351 struct after_conn_state_chg_work {
1354 union drbd_state nms; /* new, max state, over all mdevs */
1355 enum chg_state_flags flags;
1358 static void after_all_state_ch(struct drbd_tconn *tconn)
1360 if (conn_all_vols_unconf(tconn) &&
1361 test_bit(OBJECT_DYING, &tconn->flags)) {
1362 drbd_thread_stop_nowait(&tconn->worker);
1366 static int w_after_conn_state_ch(struct drbd_work *w, int unused)
1368 struct after_conn_state_chg_work *acscw =
1369 container_of(w, struct after_conn_state_chg_work, w);
1370 struct drbd_tconn *tconn = w->tconn;
1371 enum drbd_conns oc = acscw->oc;
1372 union drbd_state nms = acscw->nms;
1376 /* Upon network configuration, we need to start the receiver */
1377 if (oc == C_STANDALONE && nms.conn == C_UNCONNECTED)
1378 drbd_thread_start(&tconn->receiver);
1380 //conn_err(tconn, STATE_FMT, STATE_ARGS("nms", nms));
1381 after_all_state_ch(tconn);
1386 void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum chg_state_flags *pf)
1388 enum chg_state_flags flags = ~0;
1389 union drbd_state os, cs = {}; /* old_state, common_state */
1390 struct drbd_conf *mdev;
1391 int vnr, first_vol = 1;
1393 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1402 if (cs.role != os.role)
1403 flags &= ~CS_DC_ROLE;
1405 if (cs.peer != os.peer)
1406 flags &= ~CS_DC_PEER;
1408 if (cs.conn != os.conn)
1409 flags &= ~CS_DC_CONN;
1411 if (cs.disk != os.disk)
1412 flags &= ~CS_DC_DISK;
1414 if (cs.pdsk != os.pdsk)
1415 flags &= ~CS_DC_PDSK;
1423 static enum drbd_state_rv
1424 conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1425 enum chg_state_flags flags)
1427 enum drbd_state_rv rv = SS_SUCCESS;
1428 union drbd_state ns, os;
1429 struct drbd_conf *mdev;
1432 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1434 ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
1439 rv = is_valid_transition(os, ns);
1440 if (rv < SS_SUCCESS)
1443 if (!(flags & CS_HARD)) {
1444 rv = is_valid_state(mdev, ns);
1445 if (rv < SS_SUCCESS) {
1446 if (is_valid_state(mdev, os) == rv)
1447 rv = is_valid_soft_transition(os, ns);
1449 rv = is_valid_soft_transition(os, ns);
1451 if (rv < SS_SUCCESS)
1455 if (rv < SS_SUCCESS && flags & CS_VERBOSE)
1456 print_st_err(mdev, os, ns, rv);
1461 static union drbd_state
1462 conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1463 enum chg_state_flags flags)
1465 union drbd_state ns, os, ms = { };
1466 struct drbd_conf *mdev;
1467 enum drbd_state_rv rv;
1470 if (mask.conn == C_MASK)
1471 tconn->cstate = val.conn;
1473 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1475 ns = apply_mask_val(os, mask, val);
1476 ns = sanitize_state(mdev, ns, NULL);
1478 rv = __drbd_set_state(mdev, ns, flags, NULL);
1479 if (rv < SS_SUCCESS)
1482 ms.role = max_role(mdev->state.role, ms.role);
1483 ms.peer = max_role(mdev->state.peer, ms.peer);
1484 ms.disk = max_t(enum drbd_disk_state, mdev->state.disk, ms.disk);
1485 ms.pdsk = max_t(enum drbd_disk_state, mdev->state.pdsk, ms.pdsk);
1491 static enum drbd_state_rv
1492 _conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
1494 enum drbd_state_rv rv;
1496 if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags))
1497 return SS_CW_SUCCESS;
1499 if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags))
1500 return SS_CW_FAILED_BY_PEER;
1502 spin_lock_irq(&tconn->req_lock);
1503 rv = tconn->cstate != C_WF_REPORT_PARAMS ? SS_CW_NO_NEED : SS_UNKNOWN_ERROR;
1505 if (rv == SS_UNKNOWN_ERROR)
1506 rv = conn_is_valid_transition(tconn, mask, val, 0);
1508 if (rv == SS_SUCCESS)
1509 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
1511 spin_unlock_irq(&tconn->req_lock);
1516 static enum drbd_state_rv
1517 conn_cl_wide(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1518 enum chg_state_flags f)
1520 enum drbd_state_rv rv;
1522 spin_unlock_irq(&tconn->req_lock);
1523 mutex_lock(&tconn->cstate_mutex);
1525 if (conn_send_state_req(tconn, mask, val)) {
1526 rv = SS_CW_FAILED_BY_PEER;
1527 /* if (f & CS_VERBOSE)
1528 print_st_err(mdev, os, ns, rv); */
1532 wait_event(tconn->ping_wait, (rv = _conn_rq_cond(tconn, mask, val)));
1535 mutex_unlock(&tconn->cstate_mutex);
1536 spin_lock_irq(&tconn->req_lock);
1542 _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1543 enum chg_state_flags flags)
1545 enum drbd_state_rv rv = SS_SUCCESS;
1546 struct after_conn_state_chg_work *acscw;
1547 enum drbd_conns oc = tconn->cstate;
1548 union drbd_state ms, os;
1550 rv = is_valid_conn_transition(oc, val.conn);
1551 if (rv < SS_SUCCESS)
1554 rv = conn_is_valid_transition(tconn, mask, val, flags);
1555 if (rv < SS_SUCCESS)
1558 if (oc == C_WF_REPORT_PARAMS && val.conn == C_DISCONNECTING &&
1559 !(flags & (CS_LOCAL_ONLY | CS_HARD))) {
1560 rv = conn_cl_wide(tconn, mask, val, flags);
1561 if (rv < SS_SUCCESS)
1565 conn_old_common_state(tconn, &os, &flags);
1566 ms = conn_set_state(tconn, mask, val, flags);
1568 conn_pr_state_change(tconn, os, ms, flags);
1570 acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
1572 acscw->oc = os.conn;
1574 acscw->flags = flags;
1575 acscw->w.cb = w_after_conn_state_ch;
1576 acscw->w.tconn = tconn;
1577 drbd_queue_work(&tconn->data.work, &acscw->w);
1579 conn_err(tconn, "Could not kmalloc an acscw\n");
1587 conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1588 enum chg_state_flags flags)
1590 enum drbd_state_rv rv;
1592 spin_lock_irq(&tconn->req_lock);
1593 rv = _conn_request_state(tconn, mask, val, flags);
1594 spin_unlock_irq(&tconn->req_lock);