drbd: Print common state changes of all volumes as connection state changes
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / block / drbd / drbd_state.c
1 /*
2    drbd_state.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26  */
27
28 #include <linux/drbd_limits.h>
29 #include "drbd_int.h"
30 #include "drbd_req.h"
31
32 struct after_state_chg_work {
33         struct drbd_work w;
34         union drbd_state os;
35         union drbd_state ns;
36         enum chg_state_flags flags;
37         struct completion *done;
38 };
39
40 static int w_after_state_ch(struct drbd_work *w, int unused);
41 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
42                            union drbd_state ns, enum chg_state_flags flags);
43 static void after_all_state_ch(struct drbd_tconn *tconn);
44 static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
45 static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state);
46 static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
47 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
48                                        const char **warn_sync_abort);
49
50 bool conn_all_vols_unconf(struct drbd_tconn *tconn)
51 {
52         struct drbd_conf *mdev;
53         int vnr;
54
55         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
56                 if (mdev->state.disk != D_DISKLESS ||
57                     mdev->state.conn != C_STANDALONE ||
58                     mdev->state.role != R_SECONDARY)
59                         return false;
60         }
61         return true;
62 }
63
64 /* Unfortunately the states where not correctly ordered, when
65    they where defined. therefore can not use max_t() here. */
66 static enum drbd_role max_role(enum drbd_role role1, enum drbd_role role2)
67 {
68         if (role1 == R_PRIMARY || role2 == R_PRIMARY)
69                 return R_PRIMARY;
70         if (role1 == R_SECONDARY || role2 == R_SECONDARY)
71                 return R_SECONDARY;
72         return R_UNKNOWN;
73 }
74 static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
75 {
76         if (role1 == R_UNKNOWN || role2 == R_UNKNOWN)
77                 return R_UNKNOWN;
78         if (role1 == R_SECONDARY || role2 == R_SECONDARY)
79                 return R_SECONDARY;
80         return R_PRIMARY;
81 }
82
83 enum drbd_role conn_highest_role(struct drbd_tconn *tconn)
84 {
85         enum drbd_role role = R_UNKNOWN;
86         struct drbd_conf *mdev;
87         int vnr;
88
89         idr_for_each_entry(&tconn->volumes, mdev, vnr)
90                 role = max_role(role, mdev->state.role);
91
92         return role;
93 }
94
95 enum drbd_role conn_highest_peer(struct drbd_tconn *tconn)
96 {
97         enum drbd_role peer = R_UNKNOWN;
98         struct drbd_conf *mdev;
99         int vnr;
100
101         idr_for_each_entry(&tconn->volumes, mdev, vnr)
102                 peer = max_role(peer, mdev->state.peer);
103
104         return peer;
105 }
106
107 enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn)
108 {
109         enum drbd_disk_state ds = D_DISKLESS;
110         struct drbd_conf *mdev;
111         int vnr;
112
113         idr_for_each_entry(&tconn->volumes, mdev, vnr)
114                 ds = max_t(enum drbd_disk_state, ds, mdev->state.disk);
115
116         return ds;
117 }
118
119 enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn)
120 {
121         enum drbd_disk_state ds = D_DISKLESS;
122         struct drbd_conf *mdev;
123         int vnr;
124
125         idr_for_each_entry(&tconn->volumes, mdev, vnr)
126                 ds = max_t(enum drbd_disk_state, ds, mdev->state.pdsk);
127
128         return ds;
129 }
130
131 /**
132  * cl_wide_st_chg() - true if the state change is a cluster wide one
133  * @mdev:       DRBD device.
134  * @os:         old (current) state.
135  * @ns:         new (wanted) state.
136  */
137 static int cl_wide_st_chg(struct drbd_conf *mdev,
138                           union drbd_state os, union drbd_state ns)
139 {
140         return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
141                  ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
142                   (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
143                   (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
144                   (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
145                 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
146                 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
147 }
148
149 static union drbd_state
150 apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
151 {
152         union drbd_state ns;
153         ns.i = (os.i & ~mask.i) | val.i;
154         return ns;
155 }
156
157 enum drbd_state_rv
158 drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
159                   union drbd_state mask, union drbd_state val)
160 {
161         unsigned long flags;
162         union drbd_state ns;
163         enum drbd_state_rv rv;
164
165         spin_lock_irqsave(&mdev->tconn->req_lock, flags);
166         ns = apply_mask_val(mdev->state, mask, val);
167         rv = _drbd_set_state(mdev, ns, f, NULL);
168         ns = mdev->state;
169         spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
170
171         return rv;
172 }
173
174 /**
175  * drbd_force_state() - Impose a change which happens outside our control on our state
176  * @mdev:       DRBD device.
177  * @mask:       mask of state bits to change.
178  * @val:        value of new state bits.
179  */
180 void drbd_force_state(struct drbd_conf *mdev,
181         union drbd_state mask, union drbd_state val)
182 {
183         drbd_change_state(mdev, CS_HARD, mask, val);
184 }
185
186 static enum drbd_state_rv
187 _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
188              union drbd_state val)
189 {
190         union drbd_state os, ns;
191         unsigned long flags;
192         enum drbd_state_rv rv;
193
194         if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
195                 return SS_CW_SUCCESS;
196
197         if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
198                 return SS_CW_FAILED_BY_PEER;
199
200         spin_lock_irqsave(&mdev->tconn->req_lock, flags);
201         os = mdev->state;
202         ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
203         rv = is_valid_transition(os, ns);
204         if (rv == SS_SUCCESS)
205                 rv = SS_UNKNOWN_ERROR;  /* cont waiting, otherwise fail. */
206
207         if (!cl_wide_st_chg(mdev, os, ns))
208                 rv = SS_CW_NO_NEED;
209         if (rv == SS_UNKNOWN_ERROR) {
210                 rv = is_valid_state(mdev, ns);
211                 if (rv == SS_SUCCESS) {
212                         rv = is_valid_soft_transition(os, ns);
213                         if (rv == SS_SUCCESS)
214                                 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
215                 }
216         }
217         spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
218
219         return rv;
220 }
221
222 /**
223  * drbd_req_state() - Perform an eventually cluster wide state change
224  * @mdev:       DRBD device.
225  * @mask:       mask of state bits to change.
226  * @val:        value of new state bits.
227  * @f:          flags
228  *
229  * Should not be called directly, use drbd_request_state() or
230  * _drbd_request_state().
231  */
232 static enum drbd_state_rv
233 drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
234                union drbd_state val, enum chg_state_flags f)
235 {
236         struct completion done;
237         unsigned long flags;
238         union drbd_state os, ns;
239         enum drbd_state_rv rv;
240
241         init_completion(&done);
242
243         if (f & CS_SERIALIZE)
244                 mutex_lock(mdev->state_mutex);
245
246         spin_lock_irqsave(&mdev->tconn->req_lock, flags);
247         os = mdev->state;
248         ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
249         rv = is_valid_transition(os, ns);
250         if (rv < SS_SUCCESS) {
251                 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
252                 goto abort;
253         }
254
255         if (cl_wide_st_chg(mdev, os, ns)) {
256                 rv = is_valid_state(mdev, ns);
257                 if (rv == SS_SUCCESS)
258                         rv = is_valid_soft_transition(os, ns);
259                 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
260
261                 if (rv < SS_SUCCESS) {
262                         if (f & CS_VERBOSE)
263                                 print_st_err(mdev, os, ns, rv);
264                         goto abort;
265                 }
266
267                 if (drbd_send_state_req(mdev, mask, val)) {
268                         rv = SS_CW_FAILED_BY_PEER;
269                         if (f & CS_VERBOSE)
270                                 print_st_err(mdev, os, ns, rv);
271                         goto abort;
272                 }
273
274                 wait_event(mdev->state_wait,
275                         (rv = _req_st_cond(mdev, mask, val)));
276
277                 if (rv < SS_SUCCESS) {
278                         if (f & CS_VERBOSE)
279                                 print_st_err(mdev, os, ns, rv);
280                         goto abort;
281                 }
282                 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
283                 ns = apply_mask_val(mdev->state, mask, val);
284                 rv = _drbd_set_state(mdev, ns, f, &done);
285         } else {
286                 rv = _drbd_set_state(mdev, ns, f, &done);
287         }
288
289         spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
290
291         if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
292                 D_ASSERT(current != mdev->tconn->worker.task);
293                 wait_for_completion(&done);
294         }
295
296 abort:
297         if (f & CS_SERIALIZE)
298                 mutex_unlock(mdev->state_mutex);
299
300         return rv;
301 }
302
303 /**
304  * _drbd_request_state() - Request a state change (with flags)
305  * @mdev:       DRBD device.
306  * @mask:       mask of state bits to change.
307  * @val:        value of new state bits.
308  * @f:          flags
309  *
310  * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
311  * flag, or when logging of failed state change requests is not desired.
312  */
313 enum drbd_state_rv
314 _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
315                     union drbd_state val, enum chg_state_flags f)
316 {
317         enum drbd_state_rv rv;
318
319         wait_event(mdev->state_wait,
320                    (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
321
322         return rv;
323 }
324
325 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
326 {
327         dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
328             name,
329             drbd_conn_str(ns.conn),
330             drbd_role_str(ns.role),
331             drbd_role_str(ns.peer),
332             drbd_disk_str(ns.disk),
333             drbd_disk_str(ns.pdsk),
334             is_susp(ns) ? 's' : 'r',
335             ns.aftr_isp ? 'a' : '-',
336             ns.peer_isp ? 'p' : '-',
337             ns.user_isp ? 'u' : '-',
338             ns.susp_fen ? 'F' : '-',
339             ns.susp_nod ? 'N' : '-'
340             );
341 }
342
343 void print_st_err(struct drbd_conf *mdev, union drbd_state os,
344                   union drbd_state ns, enum drbd_state_rv err)
345 {
346         if (err == SS_IN_TRANSIENT_STATE)
347                 return;
348         dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
349         print_st(mdev, " state", os);
350         print_st(mdev, "wanted", ns);
351 }
352
353 static long print_state_change(char *pb, union drbd_state os, union drbd_state ns,
354                                enum chg_state_flags flags)
355 {
356         char *pbp;
357         pbp = pb;
358         *pbp = 0;
359         if (ns.role != os.role && flags & CS_DC_ROLE)
360                 pbp += sprintf(pbp, "role( %s -> %s ) ",
361                                drbd_role_str(os.role),
362                                drbd_role_str(ns.role));
363         if (ns.peer != os.peer && flags & CS_DC_PEER)
364                 pbp += sprintf(pbp, "peer( %s -> %s ) ",
365                                drbd_role_str(os.peer),
366                                drbd_role_str(ns.peer));
367         if (ns.conn != os.conn && flags & CS_DC_CONN)
368                 pbp += sprintf(pbp, "conn( %s -> %s ) ",
369                                drbd_conn_str(os.conn),
370                                drbd_conn_str(ns.conn));
371         if (ns.disk != os.disk && flags & CS_DC_DISK)
372                 pbp += sprintf(pbp, "disk( %s -> %s ) ",
373                                drbd_disk_str(os.disk),
374                                drbd_disk_str(ns.disk));
375         if (ns.pdsk != os.pdsk && flags & CS_DC_PDSK)
376                 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
377                                drbd_disk_str(os.pdsk),
378                                drbd_disk_str(ns.pdsk));
379         if (is_susp(ns) != is_susp(os))
380                 pbp += sprintf(pbp, "susp( %d -> %d ) ",
381                                is_susp(os),
382                                is_susp(ns));
383         if (ns.aftr_isp != os.aftr_isp)
384                 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
385                                os.aftr_isp,
386                                ns.aftr_isp);
387         if (ns.peer_isp != os.peer_isp)
388                 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
389                                os.peer_isp,
390                                ns.peer_isp);
391         if (ns.user_isp != os.user_isp)
392                 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
393                                os.user_isp,
394                                ns.user_isp);
395
396         return pbp - pb;
397 }
398
399 static void drbd_pr_state_change(struct drbd_conf *mdev, union drbd_state os, union drbd_state ns,
400                                  enum chg_state_flags flags)
401 {
402         char pb[300];
403
404         if (print_state_change(pb, os, ns, flags ^ CS_DC_MASK))
405                 dev_info(DEV, "%s\n", pb);
406 }
407
408 static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os, union drbd_state ns,
409                                  enum chg_state_flags flags)
410 {
411         char pb[300];
412
413         if (print_state_change(pb, os, ns, flags))
414                 conn_info(tconn, "%s\n", pb);
415 }
416
417
418 /**
419  * is_valid_state() - Returns an SS_ error code if ns is not valid
420  * @mdev:       DRBD device.
421  * @ns:         State to consider.
422  */
423 static enum drbd_state_rv
424 is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
425 {
426         /* See drbd_state_sw_errors in drbd_strings.c */
427
428         enum drbd_fencing_p fp;
429         enum drbd_state_rv rv = SS_SUCCESS;
430
431         fp = FP_DONT_CARE;
432         if (get_ldev(mdev)) {
433                 fp = mdev->ldev->dc.fencing;
434                 put_ldev(mdev);
435         }
436
437         if (get_net_conf(mdev->tconn)) {
438                 if (!mdev->tconn->net_conf->two_primaries && ns.role == R_PRIMARY) {
439                         if (ns.peer == R_PRIMARY)
440                                 rv = SS_TWO_PRIMARIES;
441                         else if (conn_highest_peer(mdev->tconn) == R_PRIMARY)
442                                 rv = SS_O_VOL_PEER_PRI;
443                         }
444                 put_net_conf(mdev->tconn);
445         }
446
447         if (rv <= 0)
448                 /* already found a reason to abort */;
449         else if (ns.role == R_SECONDARY && mdev->open_cnt)
450                 rv = SS_DEVICE_IN_USE;
451
452         else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
453                 rv = SS_NO_UP_TO_DATE_DISK;
454
455         else if (fp >= FP_RESOURCE &&
456                  ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
457                 rv = SS_PRIMARY_NOP;
458
459         else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
460                 rv = SS_NO_UP_TO_DATE_DISK;
461
462         else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
463                 rv = SS_NO_LOCAL_DISK;
464
465         else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
466                 rv = SS_NO_REMOTE_DISK;
467
468         else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
469                 rv = SS_NO_UP_TO_DATE_DISK;
470
471         else if ((ns.conn == C_CONNECTED ||
472                   ns.conn == C_WF_BITMAP_S ||
473                   ns.conn == C_SYNC_SOURCE ||
474                   ns.conn == C_PAUSED_SYNC_S) &&
475                   ns.disk == D_OUTDATED)
476                 rv = SS_CONNECTED_OUTDATES;
477
478         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
479                  (mdev->tconn->net_conf->verify_alg[0] == 0))
480                 rv = SS_NO_VERIFY_ALG;
481
482         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
483                   mdev->tconn->agreed_pro_version < 88)
484                 rv = SS_NOT_SUPPORTED;
485
486         else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
487                 rv = SS_CONNECTED_OUTDATES;
488
489         return rv;
490 }
491
492 /**
493  * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
494  * This function limits state transitions that may be declined by DRBD. I.e.
495  * user requests (aka soft transitions).
496  * @mdev:       DRBD device.
497  * @ns:         new state.
498  * @os:         old state.
499  */
500 static enum drbd_state_rv
501 is_valid_soft_transition(union drbd_state os, union drbd_state ns)
502 {
503         enum drbd_state_rv rv = SS_SUCCESS;
504
505         if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
506             os.conn > C_CONNECTED)
507                 rv = SS_RESYNC_RUNNING;
508
509         if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
510                 rv = SS_ALREADY_STANDALONE;
511
512         if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
513                 rv = SS_IS_DISKLESS;
514
515         if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
516                 rv = SS_NO_NET_CONFIG;
517
518         if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
519                 rv = SS_LOWER_THAN_OUTDATED;
520
521         if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
522                 rv = SS_IN_TRANSIENT_STATE;
523
524         /* if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
525            rv = SS_IN_TRANSIENT_STATE; */
526
527         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
528                 rv = SS_NEED_CONNECTION;
529
530         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
531             ns.conn != os.conn && os.conn > C_CONNECTED)
532                 rv = SS_RESYNC_RUNNING;
533
534         if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
535             os.conn < C_CONNECTED)
536                 rv = SS_NEED_CONNECTION;
537
538         if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
539             && os.conn < C_WF_REPORT_PARAMS)
540                 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
541
542         return rv;
543 }
544
545 static enum drbd_state_rv
546 is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc)
547 {
548         enum drbd_state_rv rv = SS_SUCCESS;
549
550         /* Disallow Network errors to configure a device's network part */
551         if ((nc >= C_TIMEOUT && nc <= C_TEAR_DOWN) && oc <= C_DISCONNECTING)
552                 rv = SS_NEED_CONNECTION;
553
554         /* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
555         if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING)
556                 rv = SS_IN_TRANSIENT_STATE;
557
558         /* After C_DISCONNECTING only C_STANDALONE may follow */
559         if (oc == C_DISCONNECTING && nc != C_STANDALONE)
560                 rv = SS_IN_TRANSIENT_STATE;
561
562         return rv;
563 }
564
565
566 /**
567  * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
568  * This limits hard state transitions. Hard state transitions are facts there are
569  * imposed on DRBD by the environment. E.g. disk broke or network broke down.
570  * But those hard state transitions are still not allowed to do everything.
571  * @ns:         new state.
572  * @os:         old state.
573  */
574 static enum drbd_state_rv
575 is_valid_transition(union drbd_state os, union drbd_state ns)
576 {
577         enum drbd_state_rv rv;
578
579         rv = is_valid_conn_transition(os.conn, ns.conn);
580
581         /* we cannot fail (again) if we already detached */
582         if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
583                 rv = SS_IS_DISKLESS;
584
585         /* if we are only D_ATTACHING yet,
586          * we can (and should) go directly to D_DISKLESS. */
587         if (ns.disk == D_FAILED && os.disk == D_ATTACHING) {
588                 printk("TODO: FIX ME\n");
589                 rv = SS_IS_DISKLESS;
590         }
591
592         return rv;
593 }
594
595 /**
596  * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
597  * @mdev:       DRBD device.
598  * @os:         old state.
599  * @ns:         new state.
600  * @warn_sync_abort:
601  *
602  * When we loose connection, we have to set the state of the peers disk (pdsk)
603  * to D_UNKNOWN. This rule and many more along those lines are in this function.
604  */
605 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
606                                        const char **warn_sync_abort)
607 {
608         enum drbd_fencing_p fp;
609         enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
610
611         fp = FP_DONT_CARE;
612         if (get_ldev(mdev)) {
613                 fp = mdev->ldev->dc.fencing;
614                 put_ldev(mdev);
615         }
616
617         /* Implications from connection to peer and peer_isp */
618         if (ns.conn < C_CONNECTED) {
619                 ns.peer_isp = 0;
620                 ns.peer = R_UNKNOWN;
621                 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
622                         ns.pdsk = D_UNKNOWN;
623         }
624
625         /* Clear the aftr_isp when becoming unconfigured */
626         if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
627                 ns.aftr_isp = 0;
628
629         /* An implication of the disk states onto the connection state */
630         /* Abort resync if a disk fails/detaches */
631         if (ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
632                 if (warn_sync_abort)
633                         *warn_sync_abort =
634                                 ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T ?
635                                 "Online-verify" : "Resync";
636                 ns.conn = C_CONNECTED;
637         }
638
639         /* Connection breaks down before we finished "Negotiating" */
640         if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
641             get_ldev_if_state(mdev, D_NEGOTIATING)) {
642                 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
643                         ns.disk = mdev->new_state_tmp.disk;
644                         ns.pdsk = mdev->new_state_tmp.pdsk;
645                 } else {
646                         dev_alert(DEV, "Connection lost while negotiating, no data!\n");
647                         ns.disk = D_DISKLESS;
648                         ns.pdsk = D_UNKNOWN;
649                 }
650                 put_ldev(mdev);
651         }
652
653         /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
654         if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
655                 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
656                         ns.disk = D_UP_TO_DATE;
657                 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
658                         ns.pdsk = D_UP_TO_DATE;
659         }
660
661         /* Implications of the connection stat on the disk states */
662         disk_min = D_DISKLESS;
663         disk_max = D_UP_TO_DATE;
664         pdsk_min = D_INCONSISTENT;
665         pdsk_max = D_UNKNOWN;
666         switch ((enum drbd_conns)ns.conn) {
667         case C_WF_BITMAP_T:
668         case C_PAUSED_SYNC_T:
669         case C_STARTING_SYNC_T:
670         case C_WF_SYNC_UUID:
671         case C_BEHIND:
672                 disk_min = D_INCONSISTENT;
673                 disk_max = D_OUTDATED;
674                 pdsk_min = D_UP_TO_DATE;
675                 pdsk_max = D_UP_TO_DATE;
676                 break;
677         case C_VERIFY_S:
678         case C_VERIFY_T:
679                 disk_min = D_UP_TO_DATE;
680                 disk_max = D_UP_TO_DATE;
681                 pdsk_min = D_UP_TO_DATE;
682                 pdsk_max = D_UP_TO_DATE;
683                 break;
684         case C_CONNECTED:
685                 disk_min = D_DISKLESS;
686                 disk_max = D_UP_TO_DATE;
687                 pdsk_min = D_DISKLESS;
688                 pdsk_max = D_UP_TO_DATE;
689                 break;
690         case C_WF_BITMAP_S:
691         case C_PAUSED_SYNC_S:
692         case C_STARTING_SYNC_S:
693         case C_AHEAD:
694                 disk_min = D_UP_TO_DATE;
695                 disk_max = D_UP_TO_DATE;
696                 pdsk_min = D_INCONSISTENT;
697                 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
698                 break;
699         case C_SYNC_TARGET:
700                 disk_min = D_INCONSISTENT;
701                 disk_max = D_INCONSISTENT;
702                 pdsk_min = D_UP_TO_DATE;
703                 pdsk_max = D_UP_TO_DATE;
704                 break;
705         case C_SYNC_SOURCE:
706                 disk_min = D_UP_TO_DATE;
707                 disk_max = D_UP_TO_DATE;
708                 pdsk_min = D_INCONSISTENT;
709                 pdsk_max = D_INCONSISTENT;
710                 break;
711         case C_STANDALONE:
712         case C_DISCONNECTING:
713         case C_UNCONNECTED:
714         case C_TIMEOUT:
715         case C_BROKEN_PIPE:
716         case C_NETWORK_FAILURE:
717         case C_PROTOCOL_ERROR:
718         case C_TEAR_DOWN:
719         case C_WF_CONNECTION:
720         case C_WF_REPORT_PARAMS:
721         case C_MASK:
722                 break;
723         }
724         if (ns.disk > disk_max)
725                 ns.disk = disk_max;
726
727         if (ns.disk < disk_min) {
728                 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
729                          drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
730                 ns.disk = disk_min;
731         }
732         if (ns.pdsk > pdsk_max)
733                 ns.pdsk = pdsk_max;
734
735         if (ns.pdsk < pdsk_min) {
736                 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
737                          drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
738                 ns.pdsk = pdsk_min;
739         }
740
741         if (fp == FP_STONITH &&
742             (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
743                 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
744
745         if (mdev->tconn->res_opts.on_no_data == OND_SUSPEND_IO &&
746             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
747                 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
748
749         if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
750                 if (ns.conn == C_SYNC_SOURCE)
751                         ns.conn = C_PAUSED_SYNC_S;
752                 if (ns.conn == C_SYNC_TARGET)
753                         ns.conn = C_PAUSED_SYNC_T;
754         } else {
755                 if (ns.conn == C_PAUSED_SYNC_S)
756                         ns.conn = C_SYNC_SOURCE;
757                 if (ns.conn == C_PAUSED_SYNC_T)
758                         ns.conn = C_SYNC_TARGET;
759         }
760
761         return ns;
762 }
763
764 void drbd_resume_al(struct drbd_conf *mdev)
765 {
766         if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
767                 dev_info(DEV, "Resumed AL updates\n");
768 }
769
770 /* helper for __drbd_set_state */
771 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
772 {
773         if (mdev->tconn->agreed_pro_version < 90)
774                 mdev->ov_start_sector = 0;
775         mdev->rs_total = drbd_bm_bits(mdev);
776         mdev->ov_position = 0;
777         if (cs == C_VERIFY_T) {
778                 /* starting online verify from an arbitrary position
779                  * does not fit well into the existing protocol.
780                  * on C_VERIFY_T, we initialize ov_left and friends
781                  * implicitly in receive_DataRequest once the
782                  * first P_OV_REQUEST is received */
783                 mdev->ov_start_sector = ~(sector_t)0;
784         } else {
785                 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
786                 if (bit >= mdev->rs_total) {
787                         mdev->ov_start_sector =
788                                 BM_BIT_TO_SECT(mdev->rs_total - 1);
789                         mdev->rs_total = 1;
790                 } else
791                         mdev->rs_total -= bit;
792                 mdev->ov_position = mdev->ov_start_sector;
793         }
794         mdev->ov_left = mdev->rs_total;
795 }
796
797 /**
798  * __drbd_set_state() - Set a new DRBD state
799  * @mdev:       DRBD device.
800  * @ns:         new state.
801  * @flags:      Flags
802  * @done:       Optional completion, that will get completed after the after_state_ch() finished
803  *
804  * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
805  */
806 enum drbd_state_rv
807 __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
808                  enum chg_state_flags flags, struct completion *done)
809 {
810         union drbd_state os;
811         enum drbd_state_rv rv = SS_SUCCESS;
812         const char *warn_sync_abort = NULL;
813         struct after_state_chg_work *ascw;
814
815         os = mdev->state;
816
817         ns = sanitize_state(mdev, ns, &warn_sync_abort);
818         if (ns.i == os.i)
819                 return SS_NOTHING_TO_DO;
820
821         rv = is_valid_transition(os, ns);
822         if (rv < SS_SUCCESS)
823                 return rv;
824
825         if (!(flags & CS_HARD)) {
826                 /*  pre-state-change checks ; only look at ns  */
827                 /* See drbd_state_sw_errors in drbd_strings.c */
828
829                 rv = is_valid_state(mdev, ns);
830                 if (rv < SS_SUCCESS) {
831                         /* If the old state was illegal as well, then let
832                            this happen...*/
833
834                         if (is_valid_state(mdev, os) == rv)
835                                 rv = is_valid_soft_transition(os, ns);
836                 } else
837                         rv = is_valid_soft_transition(os, ns);
838         }
839
840         if (rv < SS_SUCCESS) {
841                 if (flags & CS_VERBOSE)
842                         print_st_err(mdev, os, ns, rv);
843                 return rv;
844         }
845
846         if (warn_sync_abort)
847                 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
848
849         drbd_pr_state_change(mdev, os, ns, flags);
850
851         /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
852          * on the ldev here, to be sure the transition -> D_DISKLESS resp.
853          * drbd_ldev_destroy() won't happen before our corresponding
854          * after_state_ch works run, where we put_ldev again. */
855         if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
856             (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
857                 atomic_inc(&mdev->local_cnt);
858
859         mdev->state = ns;
860
861         /* solve the race between becoming unconfigured,
862          * worker doing the cleanup, and
863          * admin reconfiguring us:
864          * on (re)configure, first set CONFIG_PENDING,
865          * then wait for a potentially exiting worker,
866          * start the worker, and schedule one no_op.
867          * then proceed with configuration.
868          */
869         if(conn_all_vols_unconf(mdev->tconn) &&
870            !test_and_set_bit(CONFIG_PENDING, &mdev->tconn->flags))
871                 set_bit(OBJECT_DYING, &mdev->tconn->flags);
872
873         if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
874                 drbd_print_uuids(mdev, "attached to UUIDs");
875
876         wake_up(&mdev->misc_wait);
877         wake_up(&mdev->state_wait);
878         wake_up(&mdev->tconn->ping_wait);
879
880         /* aborted verify run. log the last position */
881         if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
882             ns.conn < C_CONNECTED) {
883                 mdev->ov_start_sector =
884                         BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
885                 dev_info(DEV, "Online Verify reached sector %llu\n",
886                         (unsigned long long)mdev->ov_start_sector);
887         }
888
889         if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
890             (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
891                 dev_info(DEV, "Syncer continues.\n");
892                 mdev->rs_paused += (long)jiffies
893                                   -(long)mdev->rs_mark_time[mdev->rs_last_mark];
894                 if (ns.conn == C_SYNC_TARGET)
895                         mod_timer(&mdev->resync_timer, jiffies);
896         }
897
898         if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
899             (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
900                 dev_info(DEV, "Resync suspended\n");
901                 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
902         }
903
904         if (os.conn == C_CONNECTED &&
905             (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
906                 unsigned long now = jiffies;
907                 int i;
908
909                 set_ov_position(mdev, ns.conn);
910                 mdev->rs_start = now;
911                 mdev->rs_last_events = 0;
912                 mdev->rs_last_sect_ev = 0;
913                 mdev->ov_last_oos_size = 0;
914                 mdev->ov_last_oos_start = 0;
915
916                 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
917                         mdev->rs_mark_left[i] = mdev->ov_left;
918                         mdev->rs_mark_time[i] = now;
919                 }
920
921                 drbd_rs_controller_reset(mdev);
922
923                 if (ns.conn == C_VERIFY_S) {
924                         dev_info(DEV, "Starting Online Verify from sector %llu\n",
925                                         (unsigned long long)mdev->ov_position);
926                         mod_timer(&mdev->resync_timer, jiffies);
927                 }
928         }
929
930         if (get_ldev(mdev)) {
931                 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
932                                                  MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
933                                                  MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
934
935                 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
936                         mdf |= MDF_CRASHED_PRIMARY;
937                 if (mdev->state.role == R_PRIMARY ||
938                     (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
939                         mdf |= MDF_PRIMARY_IND;
940                 if (mdev->state.conn > C_WF_REPORT_PARAMS)
941                         mdf |= MDF_CONNECTED_IND;
942                 if (mdev->state.disk > D_INCONSISTENT)
943                         mdf |= MDF_CONSISTENT;
944                 if (mdev->state.disk > D_OUTDATED)
945                         mdf |= MDF_WAS_UP_TO_DATE;
946                 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
947                         mdf |= MDF_PEER_OUT_DATED;
948                 if (mdf != mdev->ldev->md.flags) {
949                         mdev->ldev->md.flags = mdf;
950                         drbd_md_mark_dirty(mdev);
951                 }
952                 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
953                         drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
954                 put_ldev(mdev);
955         }
956
957         /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
958         if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
959             os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
960                 set_bit(CONSIDER_RESYNC, &mdev->flags);
961
962         /* Receiver should clean up itself */
963         if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
964                 drbd_thread_stop_nowait(&mdev->tconn->receiver);
965
966         /* Now the receiver finished cleaning up itself, it should die */
967         if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
968                 drbd_thread_stop_nowait(&mdev->tconn->receiver);
969
970         /* Upon network failure, we need to restart the receiver. */
971         if (os.conn > C_TEAR_DOWN &&
972             ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
973                 drbd_thread_restart_nowait(&mdev->tconn->receiver);
974
975         /* Resume AL writing if we get a connection */
976         if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
977                 drbd_resume_al(mdev);
978
979         ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
980         if (ascw) {
981                 ascw->os = os;
982                 ascw->ns = ns;
983                 ascw->flags = flags;
984                 ascw->w.cb = w_after_state_ch;
985                 ascw->w.mdev = mdev;
986                 ascw->done = done;
987                 drbd_queue_work(&mdev->tconn->data.work, &ascw->w);
988         } else {
989                 dev_err(DEV, "Could not kmalloc an ascw\n");
990         }
991
992         return rv;
993 }
994
995 static int w_after_state_ch(struct drbd_work *w, int unused)
996 {
997         struct after_state_chg_work *ascw =
998                 container_of(w, struct after_state_chg_work, w);
999         struct drbd_conf *mdev = w->mdev;
1000
1001         after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1002         if (ascw->flags & CS_WAIT_COMPLETE) {
1003                 D_ASSERT(ascw->done != NULL);
1004                 complete(ascw->done);
1005         }
1006         kfree(ascw);
1007
1008         return 0;
1009 }
1010
1011 static void abw_start_sync(struct drbd_conf *mdev, int rv)
1012 {
1013         if (rv) {
1014                 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1015                 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1016                 return;
1017         }
1018
1019         switch (mdev->state.conn) {
1020         case C_STARTING_SYNC_T:
1021                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1022                 break;
1023         case C_STARTING_SYNC_S:
1024                 drbd_start_resync(mdev, C_SYNC_SOURCE);
1025                 break;
1026         }
1027 }
1028
1029 int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1030                 int (*io_fn)(struct drbd_conf *),
1031                 char *why, enum bm_flag flags)
1032 {
1033         int rv;
1034
1035         D_ASSERT(current == mdev->tconn->worker.task);
1036
1037         /* open coded non-blocking drbd_suspend_io(mdev); */
1038         set_bit(SUSPEND_IO, &mdev->flags);
1039
1040         drbd_bm_lock(mdev, why, flags);
1041         rv = io_fn(mdev);
1042         drbd_bm_unlock(mdev);
1043
1044         drbd_resume_io(mdev);
1045
1046         return rv;
1047 }
1048
1049 /**
1050  * after_state_ch() - Perform after state change actions that may sleep
1051  * @mdev:       DRBD device.
1052  * @os:         old state.
1053  * @ns:         new state.
1054  * @flags:      Flags
1055  */
1056 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1057                            union drbd_state ns, enum chg_state_flags flags)
1058 {
1059         enum drbd_fencing_p fp;
1060         enum drbd_req_event what = NOTHING;
1061         union drbd_state nsm;
1062         struct sib_info sib;
1063
1064         sib.sib_reason = SIB_STATE_CHANGE;
1065         sib.os = os;
1066         sib.ns = ns;
1067
1068         if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1069                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1070                 if (mdev->p_uuid)
1071                         mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1072         }
1073
1074         fp = FP_DONT_CARE;
1075         if (get_ldev(mdev)) {
1076                 fp = mdev->ldev->dc.fencing;
1077                 put_ldev(mdev);
1078         }
1079
1080         /* Inform userspace about the change... */
1081         drbd_bcast_event(mdev, &sib);
1082
1083         if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1084             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1085                 drbd_khelper(mdev, "pri-on-incon-degr");
1086
1087         /* Here we have the actions that are performed after a
1088            state change. This function might sleep */
1089
1090         nsm.i = -1;
1091         if (ns.susp_nod) {
1092                 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1093                         what = RESEND;
1094
1095                 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
1096                         what = RESTART_FROZEN_DISK_IO;
1097
1098                 if (what != NOTHING)
1099                         nsm.susp_nod = 0;
1100         }
1101
1102         if (ns.susp_fen) {
1103                 /* case1: The outdate peer handler is successful: */
1104                 if (os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) {
1105                         tl_clear(mdev->tconn);
1106                         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1107                                 drbd_uuid_new_current(mdev);
1108                                 clear_bit(NEW_CUR_UUID, &mdev->flags);
1109                         }
1110                         spin_lock_irq(&mdev->tconn->req_lock);
1111                         _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1112                         spin_unlock_irq(&mdev->tconn->req_lock);
1113                 }
1114                 /* case2: The connection was established again: */
1115                 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1116                         clear_bit(NEW_CUR_UUID, &mdev->flags);
1117                         what = RESEND;
1118                         nsm.susp_fen = 0;
1119                 }
1120         }
1121
1122         if (what != NOTHING) {
1123                 spin_lock_irq(&mdev->tconn->req_lock);
1124                 _tl_restart(mdev->tconn, what);
1125                 nsm.i &= mdev->state.i;
1126                 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
1127                 spin_unlock_irq(&mdev->tconn->req_lock);
1128         }
1129
1130         /* Became sync source.  With protocol >= 96, we still need to send out
1131          * the sync uuid now. Need to do that before any drbd_send_state, or
1132          * the other side may go "paused sync" before receiving the sync uuids,
1133          * which is unexpected. */
1134         if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1135             (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1136             mdev->tconn->agreed_pro_version >= 96 && get_ldev(mdev)) {
1137                 drbd_gen_and_send_sync_uuid(mdev);
1138                 put_ldev(mdev);
1139         }
1140
1141         /* Do not change the order of the if above and the two below... */
1142         if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
1143                 drbd_send_uuids(mdev);
1144                 drbd_send_state(mdev);
1145         }
1146         /* No point in queuing send_bitmap if we don't have a connection
1147          * anymore, so check also the _current_ state, not only the new state
1148          * at the time this work was queued. */
1149         if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1150             mdev->state.conn == C_WF_BITMAP_S)
1151                 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
1152                                 "send_bitmap (WFBitMapS)",
1153                                 BM_LOCKED_TEST_ALLOWED);
1154
1155         /* Lost contact to peer's copy of the data */
1156         if ((os.pdsk >= D_INCONSISTENT &&
1157              os.pdsk != D_UNKNOWN &&
1158              os.pdsk != D_OUTDATED)
1159         &&  (ns.pdsk < D_INCONSISTENT ||
1160              ns.pdsk == D_UNKNOWN ||
1161              ns.pdsk == D_OUTDATED)) {
1162                 if (get_ldev(mdev)) {
1163                         if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1164                             mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1165                                 if (is_susp(mdev->state)) {
1166                                         set_bit(NEW_CUR_UUID, &mdev->flags);
1167                                 } else {
1168                                         drbd_uuid_new_current(mdev);
1169                                         drbd_send_uuids(mdev);
1170                                 }
1171                         }
1172                         put_ldev(mdev);
1173                 }
1174         }
1175
1176         if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1177                 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
1178                         drbd_uuid_new_current(mdev);
1179                         drbd_send_uuids(mdev);
1180                 }
1181
1182                 /* D_DISKLESS Peer becomes secondary */
1183                 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1184                         /* We may still be Primary ourselves.
1185                          * No harm done if the bitmap still changes,
1186                          * redirtied pages will follow later. */
1187                         drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1188                                 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
1189                 put_ldev(mdev);
1190         }
1191
1192         /* Write out all changed bits on demote.
1193          * Though, no need to da that just yet
1194          * if there is a resync going on still */
1195         if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1196                 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
1197                 /* No changes to the bitmap expected this time, so assert that,
1198                  * even though no harm was done if it did change. */
1199                 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1200                                 "demote", BM_LOCKED_TEST_ALLOWED);
1201                 put_ldev(mdev);
1202         }
1203
1204         /* Last part of the attaching process ... */
1205         if (ns.conn >= C_CONNECTED &&
1206             os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1207                 drbd_send_sizes(mdev, 0, 0);  /* to start sync... */
1208                 drbd_send_uuids(mdev);
1209                 drbd_send_state(mdev);
1210         }
1211
1212         /* We want to pause/continue resync, tell peer. */
1213         if (ns.conn >= C_CONNECTED &&
1214              ((os.aftr_isp != ns.aftr_isp) ||
1215               (os.user_isp != ns.user_isp)))
1216                 drbd_send_state(mdev);
1217
1218         /* In case one of the isp bits got set, suspend other devices. */
1219         if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1220             (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1221                 suspend_other_sg(mdev);
1222
1223         /* Make sure the peer gets informed about eventual state
1224            changes (ISP bits) while we were in WFReportParams. */
1225         if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1226                 drbd_send_state(mdev);
1227
1228         if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1229                 drbd_send_state(mdev);
1230
1231         /* We are in the progress to start a full sync... */
1232         if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1233             (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1234                 /* no other bitmap changes expected during this phase */
1235                 drbd_queue_bitmap_io(mdev,
1236                         &drbd_bmio_set_n_write, &abw_start_sync,
1237                         "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
1238
1239         /* We are invalidating our self... */
1240         if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1241             os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1242                 /* other bitmap operation expected during this phase */
1243                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1244                         "set_n_write from invalidate", BM_LOCKED_MASK);
1245
1246         /* first half of local IO error, failure to attach,
1247          * or administrative detach */
1248         if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1249                 enum drbd_io_error_p eh;
1250                 int was_io_error;
1251                 /* corresponding get_ldev was in __drbd_set_state, to serialize
1252                  * our cleanup here with the transition to D_DISKLESS,
1253                  * so it is safe to dreference ldev here. */
1254                 eh = mdev->ldev->dc.on_io_error;
1255                 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1256
1257                 /* current state still has to be D_FAILED,
1258                  * there is only one way out: to D_DISKLESS,
1259                  * and that may only happen after our put_ldev below. */
1260                 if (mdev->state.disk != D_FAILED)
1261                         dev_err(DEV,
1262                                 "ASSERT FAILED: disk is %s during detach\n",
1263                                 drbd_disk_str(mdev->state.disk));
1264
1265                 if (!drbd_send_state(mdev))
1266                         dev_warn(DEV, "Notified peer that I am detaching my disk\n");
1267                 else
1268                         dev_err(DEV, "Sending state for detaching disk failed\n");
1269
1270                 drbd_rs_cancel_all(mdev);
1271
1272                 /* In case we want to get something to stable storage still,
1273                  * this may be the last chance.
1274                  * Following put_ldev may transition to D_DISKLESS. */
1275                 drbd_md_sync(mdev);
1276                 put_ldev(mdev);
1277
1278                 if (was_io_error && eh == EP_CALL_HELPER)
1279                         drbd_khelper(mdev, "local-io-error");
1280         }
1281
1282         /* second half of local IO error, failure to attach,
1283          * or administrative detach,
1284          * after local_cnt references have reached zero again */
1285         if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1286                 /* We must still be diskless,
1287                  * re-attach has to be serialized with this! */
1288                 if (mdev->state.disk != D_DISKLESS)
1289                         dev_err(DEV,
1290                                 "ASSERT FAILED: disk is %s while going diskless\n",
1291                                 drbd_disk_str(mdev->state.disk));
1292
1293                 mdev->rs_total = 0;
1294                 mdev->rs_failed = 0;
1295                 atomic_set(&mdev->rs_pending_cnt, 0);
1296
1297                 if (!drbd_send_state(mdev))
1298                         dev_warn(DEV, "Notified peer that I'm now diskless.\n");
1299                 /* corresponding get_ldev in __drbd_set_state
1300                  * this may finally trigger drbd_ldev_destroy. */
1301                 put_ldev(mdev);
1302         }
1303
1304         /* Notify peer that I had a local IO error, and did not detached.. */
1305         if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
1306                 drbd_send_state(mdev);
1307
1308         /* Disks got bigger while they were detached */
1309         if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1310             test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1311                 if (ns.conn == C_CONNECTED)
1312                         resync_after_online_grow(mdev);
1313         }
1314
1315         /* A resync finished or aborted, wake paused devices... */
1316         if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1317             (os.peer_isp && !ns.peer_isp) ||
1318             (os.user_isp && !ns.user_isp))
1319                 resume_next_sg(mdev);
1320
1321         /* sync target done with resync.  Explicitly notify peer, even though
1322          * it should (at least for non-empty resyncs) already know itself. */
1323         if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1324                 drbd_send_state(mdev);
1325
1326         /* This triggers bitmap writeout of potentially still unwritten pages
1327          * if the resync finished cleanly, or aborted because of peer disk
1328          * failure, or because of connection loss.
1329          * For resync aborted because of local disk failure, we cannot do
1330          * any bitmap writeout anymore.
1331          * No harm done if some bits change during this phase.
1332          */
1333         if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1334                 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1335                         "write from resync_finished", BM_LOCKED_SET_ALLOWED);
1336                 put_ldev(mdev);
1337         }
1338
1339         if (ns.disk == D_DISKLESS &&
1340             ns.conn == C_STANDALONE &&
1341             ns.role == R_SECONDARY) {
1342                 if (os.aftr_isp != ns.aftr_isp)
1343                         resume_next_sg(mdev);
1344         }
1345
1346         after_all_state_ch(mdev->tconn);
1347
1348         drbd_md_sync(mdev);
1349 }
1350
1351 struct after_conn_state_chg_work {
1352         struct drbd_work w;
1353         enum drbd_conns oc;
1354         union drbd_state nms; /* new, max state, over all mdevs */
1355         enum chg_state_flags flags;
1356 };
1357
1358 static void after_all_state_ch(struct drbd_tconn *tconn)
1359 {
1360         if (conn_all_vols_unconf(tconn) &&
1361             test_bit(OBJECT_DYING, &tconn->flags)) {
1362                 drbd_thread_stop_nowait(&tconn->worker);
1363         }
1364 }
1365
1366 static int w_after_conn_state_ch(struct drbd_work *w, int unused)
1367 {
1368         struct after_conn_state_chg_work *acscw =
1369                 container_of(w, struct after_conn_state_chg_work, w);
1370         struct drbd_tconn *tconn = w->tconn;
1371         enum drbd_conns oc = acscw->oc;
1372         union drbd_state nms = acscw->nms;
1373
1374         kfree(acscw);
1375
1376         /* Upon network configuration, we need to start the receiver */
1377         if (oc == C_STANDALONE && nms.conn == C_UNCONNECTED)
1378                 drbd_thread_start(&tconn->receiver);
1379
1380         //conn_err(tconn, STATE_FMT, STATE_ARGS("nms", nms));
1381         after_all_state_ch(tconn);
1382
1383         return 0;
1384 }
1385
1386 void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum chg_state_flags *pf)
1387 {
1388         enum chg_state_flags flags = ~0;
1389         union drbd_state os, cs = {}; /* old_state, common_state */
1390         struct drbd_conf *mdev;
1391         int vnr, first_vol = 1;
1392
1393         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1394                 os = mdev->state;
1395
1396                 if (first_vol) {
1397                         cs = os;
1398                         first_vol = 0;
1399                         continue;
1400                 }
1401
1402                 if (cs.role != os.role)
1403                         flags &= ~CS_DC_ROLE;
1404
1405                 if (cs.peer != os.peer)
1406                         flags &= ~CS_DC_PEER;
1407
1408                 if (cs.conn != os.conn)
1409                         flags &= ~CS_DC_CONN;
1410
1411                 if (cs.disk != os.disk)
1412                         flags &= ~CS_DC_DISK;
1413
1414                 if (cs.pdsk != os.pdsk)
1415                         flags &= ~CS_DC_PDSK;
1416         }
1417
1418         *pf |= CS_DC_MASK;
1419         *pf &= flags;
1420         *pcs = cs;
1421 }
1422
1423 static enum drbd_state_rv
1424 conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1425                          enum chg_state_flags flags)
1426 {
1427         enum drbd_state_rv rv = SS_SUCCESS;
1428         union drbd_state ns, os;
1429         struct drbd_conf *mdev;
1430         int vnr;
1431
1432         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1433                 os = mdev->state;
1434                 ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
1435
1436                 if (ns.i == os.i)
1437                         continue;
1438
1439                 rv = is_valid_transition(os, ns);
1440                 if (rv < SS_SUCCESS)
1441                         break;
1442
1443                 if (!(flags & CS_HARD)) {
1444                         rv = is_valid_state(mdev, ns);
1445                         if (rv < SS_SUCCESS) {
1446                                 if (is_valid_state(mdev, os) == rv)
1447                                         rv = is_valid_soft_transition(os, ns);
1448                         } else
1449                                 rv = is_valid_soft_transition(os, ns);
1450                 }
1451                 if (rv < SS_SUCCESS)
1452                         break;
1453         }
1454
1455         if (rv < SS_SUCCESS && flags & CS_VERBOSE)
1456                 print_st_err(mdev, os, ns, rv);
1457
1458         return rv;
1459 }
1460
1461 static union drbd_state
1462 conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1463                enum chg_state_flags flags)
1464 {
1465         union drbd_state ns, os, ms = { };
1466         struct drbd_conf *mdev;
1467         enum drbd_state_rv rv;
1468         int vnr;
1469
1470         if (mask.conn == C_MASK)
1471                 tconn->cstate = val.conn;
1472
1473         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1474                 os = mdev->state;
1475                 ns = apply_mask_val(os, mask, val);
1476                 ns = sanitize_state(mdev, ns, NULL);
1477
1478                 rv = __drbd_set_state(mdev, ns, flags, NULL);
1479                 if (rv < SS_SUCCESS)
1480                         BUG();
1481
1482                 ms.role = max_role(mdev->state.role, ms.role);
1483                 ms.peer = max_role(mdev->state.peer, ms.peer);
1484                 ms.disk = max_t(enum drbd_disk_state, mdev->state.disk, ms.disk);
1485                 ms.pdsk = max_t(enum drbd_disk_state, mdev->state.pdsk, ms.pdsk);
1486         }
1487
1488         return ms;
1489 }
1490
1491 static enum drbd_state_rv
1492 _conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
1493 {
1494         enum drbd_state_rv rv;
1495
1496         if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags))
1497                 return SS_CW_SUCCESS;
1498
1499         if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags))
1500                 return SS_CW_FAILED_BY_PEER;
1501
1502         spin_lock_irq(&tconn->req_lock);
1503         rv = tconn->cstate != C_WF_REPORT_PARAMS ? SS_CW_NO_NEED : SS_UNKNOWN_ERROR;
1504
1505         if (rv == SS_UNKNOWN_ERROR)
1506                 rv = conn_is_valid_transition(tconn, mask, val, 0);
1507
1508         if (rv == SS_SUCCESS)
1509                 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
1510
1511         spin_unlock_irq(&tconn->req_lock);
1512
1513         return rv;
1514 }
1515
1516 static enum drbd_state_rv
1517 conn_cl_wide(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1518              enum chg_state_flags f)
1519 {
1520         enum drbd_state_rv rv;
1521
1522         spin_unlock_irq(&tconn->req_lock);
1523         mutex_lock(&tconn->cstate_mutex);
1524
1525         if (conn_send_state_req(tconn, mask, val)) {
1526                 rv = SS_CW_FAILED_BY_PEER;
1527                 /* if (f & CS_VERBOSE)
1528                    print_st_err(mdev, os, ns, rv); */
1529                 goto abort;
1530         }
1531
1532         wait_event(tconn->ping_wait, (rv = _conn_rq_cond(tconn, mask, val)));
1533
1534 abort:
1535         mutex_unlock(&tconn->cstate_mutex);
1536         spin_lock_irq(&tconn->req_lock);
1537
1538         return rv;
1539 }
1540
1541 enum drbd_state_rv
1542 _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1543                     enum chg_state_flags flags)
1544 {
1545         enum drbd_state_rv rv = SS_SUCCESS;
1546         struct after_conn_state_chg_work *acscw;
1547         enum drbd_conns oc = tconn->cstate;
1548         union drbd_state ms, os;
1549
1550         rv = is_valid_conn_transition(oc, val.conn);
1551         if (rv < SS_SUCCESS)
1552                 goto abort;
1553
1554         rv = conn_is_valid_transition(tconn, mask, val, flags);
1555         if (rv < SS_SUCCESS)
1556                 goto abort;
1557
1558         if (oc == C_WF_REPORT_PARAMS && val.conn == C_DISCONNECTING &&
1559             !(flags & (CS_LOCAL_ONLY | CS_HARD))) {
1560                 rv = conn_cl_wide(tconn, mask, val, flags);
1561                 if (rv < SS_SUCCESS)
1562                         goto abort;
1563         }
1564
1565         conn_old_common_state(tconn, &os, &flags);
1566         ms = conn_set_state(tconn, mask, val, flags);
1567         ms.conn = val.conn;
1568         conn_pr_state_change(tconn, os, ms, flags);
1569
1570         acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
1571         if (acscw) {
1572                 acscw->oc = os.conn;
1573                 acscw->nms = ms;
1574                 acscw->flags = flags;
1575                 acscw->w.cb = w_after_conn_state_ch;
1576                 acscw->w.tconn = tconn;
1577                 drbd_queue_work(&tconn->data.work, &acscw->w);
1578         } else {
1579                 conn_err(tconn, "Could not kmalloc an acscw\n");
1580         }
1581
1582 abort:
1583         return rv;
1584 }
1585
1586 enum drbd_state_rv
1587 conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1588                    enum chg_state_flags flags)
1589 {
1590         enum drbd_state_rv rv;
1591
1592         spin_lock_irq(&tconn->req_lock);
1593         rv = _conn_request_state(tconn, mask, val, flags);
1594         spin_unlock_irq(&tconn->req_lock);
1595
1596         return rv;
1597 }