4c13a6f4f1848315e8add4a1f2706056aa4d76d6
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / block / drbd / drbd_state.c
1 /*
2    drbd_state.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26  */
27
28 #include <linux/drbd_limits.h>
29 #include "drbd_int.h"
30 #include "drbd_req.h"
31
32 struct after_state_chg_work {
33         struct drbd_work w;
34         union drbd_state os;
35         union drbd_state ns;
36         enum chg_state_flags flags;
37         struct completion *done;
38 };
39
40 enum sanitize_state_warnings {
41         NO_WARNING,
42         ABORTED_ONLINE_VERIFY,
43         ABORTED_RESYNC,
44         CONNECTION_LOST_NEGOTIATING,
45         IMPLICITLY_UPGRADED_DISK,
46         IMPLICITLY_UPGRADED_PDSK,
47 };
48
49 static int w_after_state_ch(struct drbd_work *w, int unused);
50 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
51                            union drbd_state ns, enum chg_state_flags flags);
52 static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
53 static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state);
54 static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
55 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
56                                        enum sanitize_state_warnings *warn);
57
58 static inline bool is_susp(union drbd_state s)
59 {
60         return s.susp || s.susp_nod || s.susp_fen;
61 }
62
63 bool conn_all_vols_unconf(struct drbd_tconn *tconn)
64 {
65         struct drbd_conf *mdev;
66         bool rv = true;
67         int vnr;
68
69         rcu_read_lock();
70         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
71                 if (mdev->state.disk != D_DISKLESS ||
72                     mdev->state.conn != C_STANDALONE ||
73                     mdev->state.role != R_SECONDARY) {
74                         rv = false;
75                         break;
76                 }
77         }
78         rcu_read_unlock();
79
80         return rv;
81 }
82
83 /* Unfortunately the states where not correctly ordered, when
84    they where defined. therefore can not use max_t() here. */
85 static enum drbd_role max_role(enum drbd_role role1, enum drbd_role role2)
86 {
87         if (role1 == R_PRIMARY || role2 == R_PRIMARY)
88                 return R_PRIMARY;
89         if (role1 == R_SECONDARY || role2 == R_SECONDARY)
90                 return R_SECONDARY;
91         return R_UNKNOWN;
92 }
93 static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
94 {
95         if (role1 == R_UNKNOWN || role2 == R_UNKNOWN)
96                 return R_UNKNOWN;
97         if (role1 == R_SECONDARY || role2 == R_SECONDARY)
98                 return R_SECONDARY;
99         return R_PRIMARY;
100 }
101
102 enum drbd_role conn_highest_role(struct drbd_tconn *tconn)
103 {
104         enum drbd_role role = R_UNKNOWN;
105         struct drbd_conf *mdev;
106         int vnr;
107
108         rcu_read_lock();
109         idr_for_each_entry(&tconn->volumes, mdev, vnr)
110                 role = max_role(role, mdev->state.role);
111         rcu_read_unlock();
112
113         return role;
114 }
115
116 enum drbd_role conn_highest_peer(struct drbd_tconn *tconn)
117 {
118         enum drbd_role peer = R_UNKNOWN;
119         struct drbd_conf *mdev;
120         int vnr;
121
122         rcu_read_lock();
123         idr_for_each_entry(&tconn->volumes, mdev, vnr)
124                 peer = max_role(peer, mdev->state.peer);
125         rcu_read_unlock();
126
127         return peer;
128 }
129
130 enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn)
131 {
132         enum drbd_disk_state ds = D_DISKLESS;
133         struct drbd_conf *mdev;
134         int vnr;
135
136         rcu_read_lock();
137         idr_for_each_entry(&tconn->volumes, mdev, vnr)
138                 ds = max_t(enum drbd_disk_state, ds, mdev->state.disk);
139         rcu_read_unlock();
140
141         return ds;
142 }
143
144 enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn)
145 {
146         enum drbd_disk_state ds = D_MASK;
147         struct drbd_conf *mdev;
148         int vnr;
149
150         rcu_read_lock();
151         idr_for_each_entry(&tconn->volumes, mdev, vnr)
152                 ds = min_t(enum drbd_disk_state, ds, mdev->state.disk);
153         rcu_read_unlock();
154
155         return ds;
156 }
157
158 enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn)
159 {
160         enum drbd_disk_state ds = D_DISKLESS;
161         struct drbd_conf *mdev;
162         int vnr;
163
164         rcu_read_lock();
165         idr_for_each_entry(&tconn->volumes, mdev, vnr)
166                 ds = max_t(enum drbd_disk_state, ds, mdev->state.pdsk);
167         rcu_read_unlock();
168
169         return ds;
170 }
171
172 enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn)
173 {
174         enum drbd_conns conn = C_MASK;
175         struct drbd_conf *mdev;
176         int vnr;
177
178         rcu_read_lock();
179         idr_for_each_entry(&tconn->volumes, mdev, vnr)
180                 conn = min_t(enum drbd_conns, conn, mdev->state.conn);
181         rcu_read_unlock();
182
183         return conn;
184 }
185
186 /**
187  * cl_wide_st_chg() - true if the state change is a cluster wide one
188  * @mdev:       DRBD device.
189  * @os:         old (current) state.
190  * @ns:         new (wanted) state.
191  */
192 static int cl_wide_st_chg(struct drbd_conf *mdev,
193                           union drbd_state os, union drbd_state ns)
194 {
195         return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
196                  ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
197                   (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
198                   (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
199                   (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
200                 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
201                 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
202 }
203
204 static union drbd_state
205 apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
206 {
207         union drbd_state ns;
208         ns.i = (os.i & ~mask.i) | val.i;
209         return ns;
210 }
211
212 enum drbd_state_rv
213 drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
214                   union drbd_state mask, union drbd_state val)
215 {
216         unsigned long flags;
217         union drbd_state ns;
218         enum drbd_state_rv rv;
219
220         spin_lock_irqsave(&mdev->tconn->req_lock, flags);
221         ns = apply_mask_val(drbd_read_state(mdev), mask, val);
222         rv = _drbd_set_state(mdev, ns, f, NULL);
223         spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
224
225         return rv;
226 }
227
228 /**
229  * drbd_force_state() - Impose a change which happens outside our control on our state
230  * @mdev:       DRBD device.
231  * @mask:       mask of state bits to change.
232  * @val:        value of new state bits.
233  */
234 void drbd_force_state(struct drbd_conf *mdev,
235         union drbd_state mask, union drbd_state val)
236 {
237         drbd_change_state(mdev, CS_HARD, mask, val);
238 }
239
240 static enum drbd_state_rv
241 _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
242              union drbd_state val)
243 {
244         union drbd_state os, ns;
245         unsigned long flags;
246         enum drbd_state_rv rv;
247
248         if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
249                 return SS_CW_SUCCESS;
250
251         if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
252                 return SS_CW_FAILED_BY_PEER;
253
254         spin_lock_irqsave(&mdev->tconn->req_lock, flags);
255         os = drbd_read_state(mdev);
256         ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
257         rv = is_valid_transition(os, ns);
258         if (rv == SS_SUCCESS)
259                 rv = SS_UNKNOWN_ERROR;  /* cont waiting, otherwise fail. */
260
261         if (!cl_wide_st_chg(mdev, os, ns))
262                 rv = SS_CW_NO_NEED;
263         if (rv == SS_UNKNOWN_ERROR) {
264                 rv = is_valid_state(mdev, ns);
265                 if (rv == SS_SUCCESS) {
266                         rv = is_valid_soft_transition(os, ns);
267                         if (rv == SS_SUCCESS)
268                                 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
269                 }
270         }
271         spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
272
273         return rv;
274 }
275
276 /**
277  * drbd_req_state() - Perform an eventually cluster wide state change
278  * @mdev:       DRBD device.
279  * @mask:       mask of state bits to change.
280  * @val:        value of new state bits.
281  * @f:          flags
282  *
283  * Should not be called directly, use drbd_request_state() or
284  * _drbd_request_state().
285  */
286 static enum drbd_state_rv
287 drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
288                union drbd_state val, enum chg_state_flags f)
289 {
290         struct completion done;
291         unsigned long flags;
292         union drbd_state os, ns;
293         enum drbd_state_rv rv;
294
295         init_completion(&done);
296
297         if (f & CS_SERIALIZE)
298                 mutex_lock(mdev->state_mutex);
299
300         spin_lock_irqsave(&mdev->tconn->req_lock, flags);
301         os = drbd_read_state(mdev);
302         ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
303         rv = is_valid_transition(os, ns);
304         if (rv < SS_SUCCESS) {
305                 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
306                 goto abort;
307         }
308
309         if (cl_wide_st_chg(mdev, os, ns)) {
310                 rv = is_valid_state(mdev, ns);
311                 if (rv == SS_SUCCESS)
312                         rv = is_valid_soft_transition(os, ns);
313                 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
314
315                 if (rv < SS_SUCCESS) {
316                         if (f & CS_VERBOSE)
317                                 print_st_err(mdev, os, ns, rv);
318                         goto abort;
319                 }
320
321                 if (drbd_send_state_req(mdev, mask, val)) {
322                         rv = SS_CW_FAILED_BY_PEER;
323                         if (f & CS_VERBOSE)
324                                 print_st_err(mdev, os, ns, rv);
325                         goto abort;
326                 }
327
328                 wait_event(mdev->state_wait,
329                         (rv = _req_st_cond(mdev, mask, val)));
330
331                 if (rv < SS_SUCCESS) {
332                         if (f & CS_VERBOSE)
333                                 print_st_err(mdev, os, ns, rv);
334                         goto abort;
335                 }
336                 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
337                 ns = apply_mask_val(drbd_read_state(mdev), mask, val);
338                 rv = _drbd_set_state(mdev, ns, f, &done);
339         } else {
340                 rv = _drbd_set_state(mdev, ns, f, &done);
341         }
342
343         spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
344
345         if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
346                 D_ASSERT(current != mdev->tconn->worker.task);
347                 wait_for_completion(&done);
348         }
349
350 abort:
351         if (f & CS_SERIALIZE)
352                 mutex_unlock(mdev->state_mutex);
353
354         return rv;
355 }
356
357 /**
358  * _drbd_request_state() - Request a state change (with flags)
359  * @mdev:       DRBD device.
360  * @mask:       mask of state bits to change.
361  * @val:        value of new state bits.
362  * @f:          flags
363  *
364  * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
365  * flag, or when logging of failed state change requests is not desired.
366  */
367 enum drbd_state_rv
368 _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
369                     union drbd_state val, enum chg_state_flags f)
370 {
371         enum drbd_state_rv rv;
372
373         wait_event(mdev->state_wait,
374                    (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
375
376         return rv;
377 }
378
379 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
380 {
381         dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
382             name,
383             drbd_conn_str(ns.conn),
384             drbd_role_str(ns.role),
385             drbd_role_str(ns.peer),
386             drbd_disk_str(ns.disk),
387             drbd_disk_str(ns.pdsk),
388             is_susp(ns) ? 's' : 'r',
389             ns.aftr_isp ? 'a' : '-',
390             ns.peer_isp ? 'p' : '-',
391             ns.user_isp ? 'u' : '-',
392             ns.susp_fen ? 'F' : '-',
393             ns.susp_nod ? 'N' : '-'
394             );
395 }
396
397 void print_st_err(struct drbd_conf *mdev, union drbd_state os,
398                   union drbd_state ns, enum drbd_state_rv err)
399 {
400         if (err == SS_IN_TRANSIENT_STATE)
401                 return;
402         dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
403         print_st(mdev, " state", os);
404         print_st(mdev, "wanted", ns);
405 }
406
407 static long print_state_change(char *pb, union drbd_state os, union drbd_state ns,
408                                enum chg_state_flags flags)
409 {
410         char *pbp;
411         pbp = pb;
412         *pbp = 0;
413
414         if (ns.role != os.role && flags & CS_DC_ROLE)
415                 pbp += sprintf(pbp, "role( %s -> %s ) ",
416                                drbd_role_str(os.role),
417                                drbd_role_str(ns.role));
418         if (ns.peer != os.peer && flags & CS_DC_PEER)
419                 pbp += sprintf(pbp, "peer( %s -> %s ) ",
420                                drbd_role_str(os.peer),
421                                drbd_role_str(ns.peer));
422         if (ns.conn != os.conn && flags & CS_DC_CONN)
423                 pbp += sprintf(pbp, "conn( %s -> %s ) ",
424                                drbd_conn_str(os.conn),
425                                drbd_conn_str(ns.conn));
426         if (ns.disk != os.disk && flags & CS_DC_DISK)
427                 pbp += sprintf(pbp, "disk( %s -> %s ) ",
428                                drbd_disk_str(os.disk),
429                                drbd_disk_str(ns.disk));
430         if (ns.pdsk != os.pdsk && flags & CS_DC_PDSK)
431                 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
432                                drbd_disk_str(os.pdsk),
433                                drbd_disk_str(ns.pdsk));
434
435         return pbp - pb;
436 }
437
438 static void drbd_pr_state_change(struct drbd_conf *mdev, union drbd_state os, union drbd_state ns,
439                                  enum chg_state_flags flags)
440 {
441         char pb[300];
442         char *pbp = pb;
443
444         pbp += print_state_change(pbp, os, ns, flags ^ CS_DC_MASK);
445
446         if (ns.aftr_isp != os.aftr_isp)
447                 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
448                                os.aftr_isp,
449                                ns.aftr_isp);
450         if (ns.peer_isp != os.peer_isp)
451                 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
452                                os.peer_isp,
453                                ns.peer_isp);
454         if (ns.user_isp != os.user_isp)
455                 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
456                                os.user_isp,
457                                ns.user_isp);
458
459         if (pbp != pb)
460                 dev_info(DEV, "%s\n", pb);
461 }
462
463 static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os, union drbd_state ns,
464                                  enum chg_state_flags flags)
465 {
466         char pb[300];
467         char *pbp = pb;
468
469         pbp += print_state_change(pbp, os, ns, flags);
470
471         if (is_susp(ns) != is_susp(os) && flags & CS_DC_SUSP)
472                 pbp += sprintf(pbp, "susp( %d -> %d ) ",
473                                is_susp(os),
474                                is_susp(ns));
475
476         if (pbp != pb)
477                 conn_info(tconn, "%s\n", pb);
478 }
479
480
481 /**
482  * is_valid_state() - Returns an SS_ error code if ns is not valid
483  * @mdev:       DRBD device.
484  * @ns:         State to consider.
485  */
486 static enum drbd_state_rv
487 is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
488 {
489         /* See drbd_state_sw_errors in drbd_strings.c */
490
491         enum drbd_fencing_p fp;
492         enum drbd_state_rv rv = SS_SUCCESS;
493         struct net_conf *nc;
494
495         rcu_read_lock();
496         fp = FP_DONT_CARE;
497         if (get_ldev(mdev)) {
498                 fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
499                 put_ldev(mdev);
500         }
501
502         nc = rcu_dereference(mdev->tconn->net_conf);
503         if (nc) {
504                 if (!nc->two_primaries && ns.role == R_PRIMARY) {
505                         if (ns.peer == R_PRIMARY)
506                                 rv = SS_TWO_PRIMARIES;
507                         else if (conn_highest_peer(mdev->tconn) == R_PRIMARY)
508                                 rv = SS_O_VOL_PEER_PRI;
509                 }
510         }
511
512         if (rv <= 0)
513                 /* already found a reason to abort */;
514         else if (ns.role == R_SECONDARY && mdev->open_cnt)
515                 rv = SS_DEVICE_IN_USE;
516
517         else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
518                 rv = SS_NO_UP_TO_DATE_DISK;
519
520         else if (fp >= FP_RESOURCE &&
521                  ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
522                 rv = SS_PRIMARY_NOP;
523
524         else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
525                 rv = SS_NO_UP_TO_DATE_DISK;
526
527         else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
528                 rv = SS_NO_LOCAL_DISK;
529
530         else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
531                 rv = SS_NO_REMOTE_DISK;
532
533         else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
534                 rv = SS_NO_UP_TO_DATE_DISK;
535
536         else if ((ns.conn == C_CONNECTED ||
537                   ns.conn == C_WF_BITMAP_S ||
538                   ns.conn == C_SYNC_SOURCE ||
539                   ns.conn == C_PAUSED_SYNC_S) &&
540                   ns.disk == D_OUTDATED)
541                 rv = SS_CONNECTED_OUTDATES;
542
543         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
544                  (nc->verify_alg[0] == 0))
545                 rv = SS_NO_VERIFY_ALG;
546
547         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
548                   mdev->tconn->agreed_pro_version < 88)
549                 rv = SS_NOT_SUPPORTED;
550
551         else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
552                 rv = SS_CONNECTED_OUTDATES;
553
554         rcu_read_unlock();
555
556         return rv;
557 }
558
559 /**
560  * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
561  * This function limits state transitions that may be declined by DRBD. I.e.
562  * user requests (aka soft transitions).
563  * @mdev:       DRBD device.
564  * @ns:         new state.
565  * @os:         old state.
566  */
567 static enum drbd_state_rv
568 is_valid_soft_transition(union drbd_state os, union drbd_state ns)
569 {
570         enum drbd_state_rv rv = SS_SUCCESS;
571
572         if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
573             os.conn > C_CONNECTED)
574                 rv = SS_RESYNC_RUNNING;
575
576         if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
577                 rv = SS_ALREADY_STANDALONE;
578
579         if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
580                 rv = SS_IS_DISKLESS;
581
582         if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
583                 rv = SS_NO_NET_CONFIG;
584
585         if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
586                 rv = SS_LOWER_THAN_OUTDATED;
587
588         if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
589                 rv = SS_IN_TRANSIENT_STATE;
590
591         /* if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
592            rv = SS_IN_TRANSIENT_STATE; */
593
594         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
595                 rv = SS_NEED_CONNECTION;
596
597         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
598             ns.conn != os.conn && os.conn > C_CONNECTED)
599                 rv = SS_RESYNC_RUNNING;
600
601         if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
602             os.conn < C_CONNECTED)
603                 rv = SS_NEED_CONNECTION;
604
605         if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
606             && os.conn < C_WF_REPORT_PARAMS)
607                 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
608
609         return rv;
610 }
611
612 static enum drbd_state_rv
613 is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc)
614 {
615         /* no change -> nothing to do, at least for the connection part */
616         if (oc == nc)
617                 return SS_NOTHING_TO_DO;
618
619         /* disconnect of an unconfigured connection does not make sense */
620         if (oc == C_STANDALONE && nc == C_DISCONNECTING)
621                 return SS_ALREADY_STANDALONE;
622
623         /* from C_STANDALONE, we start with C_UNCONNECTED */
624         if (oc == C_STANDALONE && nc != C_UNCONNECTED)
625                 return SS_NEED_CONNECTION;
626
627         /* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
628         if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING)
629                 return SS_IN_TRANSIENT_STATE;
630
631         /* After C_DISCONNECTING only C_STANDALONE may follow */
632         if (oc == C_DISCONNECTING && nc != C_STANDALONE)
633                 return SS_IN_TRANSIENT_STATE;
634
635         return SS_SUCCESS;
636 }
637
638
639 /**
640  * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
641  * This limits hard state transitions. Hard state transitions are facts there are
642  * imposed on DRBD by the environment. E.g. disk broke or network broke down.
643  * But those hard state transitions are still not allowed to do everything.
644  * @ns:         new state.
645  * @os:         old state.
646  */
647 static enum drbd_state_rv
648 is_valid_transition(union drbd_state os, union drbd_state ns)
649 {
650         enum drbd_state_rv rv;
651
652         rv = is_valid_conn_transition(os.conn, ns.conn);
653
654         /* we cannot fail (again) if we already detached */
655         if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
656                 rv = SS_IS_DISKLESS;
657
658         return rv;
659 }
660
661 static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
662 {
663         static const char *msg_table[] = {
664                 [NO_WARNING] = "",
665                 [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
666                 [ABORTED_RESYNC] = "Resync aborted.",
667                 [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
668                 [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
669                 [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
670         };
671
672         if (warn != NO_WARNING)
673                 dev_warn(DEV, "%s\n", msg_table[warn]);
674 }
675
676 /**
677  * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
678  * @mdev:       DRBD device.
679  * @os:         old state.
680  * @ns:         new state.
681  * @warn_sync_abort:
682  *
683  * When we loose connection, we have to set the state of the peers disk (pdsk)
684  * to D_UNKNOWN. This rule and many more along those lines are in this function.
685  */
686 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
687                                        enum sanitize_state_warnings *warn)
688 {
689         enum drbd_fencing_p fp;
690         enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
691
692         if (warn)
693                 *warn = NO_WARNING;
694
695         fp = FP_DONT_CARE;
696         if (get_ldev(mdev)) {
697                 rcu_read_lock();
698                 fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
699                 rcu_read_unlock();
700                 put_ldev(mdev);
701         }
702
703         /* Implications from connection to peer and peer_isp */
704         if (ns.conn < C_CONNECTED) {
705                 ns.peer_isp = 0;
706                 ns.peer = R_UNKNOWN;
707                 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
708                         ns.pdsk = D_UNKNOWN;
709         }
710
711         /* Clear the aftr_isp when becoming unconfigured */
712         if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
713                 ns.aftr_isp = 0;
714
715         /* An implication of the disk states onto the connection state */
716         /* Abort resync if a disk fails/detaches */
717         if (ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
718                 if (warn)
719                         *warn = ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T ?
720                                 ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
721                 ns.conn = C_CONNECTED;
722         }
723
724         /* Connection breaks down before we finished "Negotiating" */
725         if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
726             get_ldev_if_state(mdev, D_NEGOTIATING)) {
727                 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
728                         ns.disk = mdev->new_state_tmp.disk;
729                         ns.pdsk = mdev->new_state_tmp.pdsk;
730                 } else {
731                         if (warn)
732                                 *warn = CONNECTION_LOST_NEGOTIATING;
733                         ns.disk = D_DISKLESS;
734                         ns.pdsk = D_UNKNOWN;
735                 }
736                 put_ldev(mdev);
737         }
738
739         /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
740         if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
741                 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
742                         ns.disk = D_UP_TO_DATE;
743                 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
744                         ns.pdsk = D_UP_TO_DATE;
745         }
746
747         /* Implications of the connection stat on the disk states */
748         disk_min = D_DISKLESS;
749         disk_max = D_UP_TO_DATE;
750         pdsk_min = D_INCONSISTENT;
751         pdsk_max = D_UNKNOWN;
752         switch ((enum drbd_conns)ns.conn) {
753         case C_WF_BITMAP_T:
754         case C_PAUSED_SYNC_T:
755         case C_STARTING_SYNC_T:
756         case C_WF_SYNC_UUID:
757         case C_BEHIND:
758                 disk_min = D_INCONSISTENT;
759                 disk_max = D_OUTDATED;
760                 pdsk_min = D_UP_TO_DATE;
761                 pdsk_max = D_UP_TO_DATE;
762                 break;
763         case C_VERIFY_S:
764         case C_VERIFY_T:
765                 disk_min = D_UP_TO_DATE;
766                 disk_max = D_UP_TO_DATE;
767                 pdsk_min = D_UP_TO_DATE;
768                 pdsk_max = D_UP_TO_DATE;
769                 break;
770         case C_CONNECTED:
771                 disk_min = D_DISKLESS;
772                 disk_max = D_UP_TO_DATE;
773                 pdsk_min = D_DISKLESS;
774                 pdsk_max = D_UP_TO_DATE;
775                 break;
776         case C_WF_BITMAP_S:
777         case C_PAUSED_SYNC_S:
778         case C_STARTING_SYNC_S:
779         case C_AHEAD:
780                 disk_min = D_UP_TO_DATE;
781                 disk_max = D_UP_TO_DATE;
782                 pdsk_min = D_INCONSISTENT;
783                 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
784                 break;
785         case C_SYNC_TARGET:
786                 disk_min = D_INCONSISTENT;
787                 disk_max = D_INCONSISTENT;
788                 pdsk_min = D_UP_TO_DATE;
789                 pdsk_max = D_UP_TO_DATE;
790                 break;
791         case C_SYNC_SOURCE:
792                 disk_min = D_UP_TO_DATE;
793                 disk_max = D_UP_TO_DATE;
794                 pdsk_min = D_INCONSISTENT;
795                 pdsk_max = D_INCONSISTENT;
796                 break;
797         case C_STANDALONE:
798         case C_DISCONNECTING:
799         case C_UNCONNECTED:
800         case C_TIMEOUT:
801         case C_BROKEN_PIPE:
802         case C_NETWORK_FAILURE:
803         case C_PROTOCOL_ERROR:
804         case C_TEAR_DOWN:
805         case C_WF_CONNECTION:
806         case C_WF_REPORT_PARAMS:
807         case C_MASK:
808                 break;
809         }
810         if (ns.disk > disk_max)
811                 ns.disk = disk_max;
812
813         if (ns.disk < disk_min) {
814                 if (warn)
815                         *warn = IMPLICITLY_UPGRADED_DISK;
816                 ns.disk = disk_min;
817         }
818         if (ns.pdsk > pdsk_max)
819                 ns.pdsk = pdsk_max;
820
821         if (ns.pdsk < pdsk_min) {
822                 if (warn)
823                         *warn = IMPLICITLY_UPGRADED_PDSK;
824                 ns.pdsk = pdsk_min;
825         }
826
827         if (fp == FP_STONITH &&
828             (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
829                 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
830
831         if (mdev->tconn->res_opts.on_no_data == OND_SUSPEND_IO &&
832             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
833                 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
834
835         if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
836                 if (ns.conn == C_SYNC_SOURCE)
837                         ns.conn = C_PAUSED_SYNC_S;
838                 if (ns.conn == C_SYNC_TARGET)
839                         ns.conn = C_PAUSED_SYNC_T;
840         } else {
841                 if (ns.conn == C_PAUSED_SYNC_S)
842                         ns.conn = C_SYNC_SOURCE;
843                 if (ns.conn == C_PAUSED_SYNC_T)
844                         ns.conn = C_SYNC_TARGET;
845         }
846
847         return ns;
848 }
849
850 void drbd_resume_al(struct drbd_conf *mdev)
851 {
852         if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
853                 dev_info(DEV, "Resumed AL updates\n");
854 }
855
856 /* helper for __drbd_set_state */
857 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
858 {
859         if (mdev->tconn->agreed_pro_version < 90)
860                 mdev->ov_start_sector = 0;
861         mdev->rs_total = drbd_bm_bits(mdev);
862         mdev->ov_position = 0;
863         if (cs == C_VERIFY_T) {
864                 /* starting online verify from an arbitrary position
865                  * does not fit well into the existing protocol.
866                  * on C_VERIFY_T, we initialize ov_left and friends
867                  * implicitly in receive_DataRequest once the
868                  * first P_OV_REQUEST is received */
869                 mdev->ov_start_sector = ~(sector_t)0;
870         } else {
871                 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
872                 if (bit >= mdev->rs_total) {
873                         mdev->ov_start_sector =
874                                 BM_BIT_TO_SECT(mdev->rs_total - 1);
875                         mdev->rs_total = 1;
876                 } else
877                         mdev->rs_total -= bit;
878                 mdev->ov_position = mdev->ov_start_sector;
879         }
880         mdev->ov_left = mdev->rs_total;
881 }
882
883 /**
884  * __drbd_set_state() - Set a new DRBD state
885  * @mdev:       DRBD device.
886  * @ns:         new state.
887  * @flags:      Flags
888  * @done:       Optional completion, that will get completed after the after_state_ch() finished
889  *
890  * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
891  */
892 enum drbd_state_rv
893 __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
894                  enum chg_state_flags flags, struct completion *done)
895 {
896         union drbd_state os;
897         enum drbd_state_rv rv = SS_SUCCESS;
898         enum sanitize_state_warnings ssw;
899         struct after_state_chg_work *ascw;
900
901         os = drbd_read_state(mdev);
902
903         ns = sanitize_state(mdev, ns, &ssw);
904         if (ns.i == os.i)
905                 return SS_NOTHING_TO_DO;
906
907         rv = is_valid_transition(os, ns);
908         if (rv < SS_SUCCESS)
909                 return rv;
910
911         if (!(flags & CS_HARD)) {
912                 /*  pre-state-change checks ; only look at ns  */
913                 /* See drbd_state_sw_errors in drbd_strings.c */
914
915                 rv = is_valid_state(mdev, ns);
916                 if (rv < SS_SUCCESS) {
917                         /* If the old state was illegal as well, then let
918                            this happen...*/
919
920                         if (is_valid_state(mdev, os) == rv)
921                                 rv = is_valid_soft_transition(os, ns);
922                 } else
923                         rv = is_valid_soft_transition(os, ns);
924         }
925
926         if (rv < SS_SUCCESS) {
927                 if (flags & CS_VERBOSE)
928                         print_st_err(mdev, os, ns, rv);
929                 return rv;
930         }
931
932         print_sanitize_warnings(mdev, ssw);
933
934         drbd_pr_state_change(mdev, os, ns, flags);
935
936         /* Display changes to the susp* flags that where caused by the call to
937            sanitize_state(). Only display it here if we where not called from
938            _conn_request_state() */
939         if (!(flags & CS_DC_SUSP))
940                 conn_pr_state_change(mdev->tconn, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP);
941
942         /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
943          * on the ldev here, to be sure the transition -> D_DISKLESS resp.
944          * drbd_ldev_destroy() won't happen before our corresponding
945          * after_state_ch works run, where we put_ldev again. */
946         if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
947             (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
948                 atomic_inc(&mdev->local_cnt);
949
950         mdev->state.i = ns.i;
951         mdev->tconn->susp = ns.susp;
952         mdev->tconn->susp_nod = ns.susp_nod;
953         mdev->tconn->susp_fen = ns.susp_fen;
954
955         if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
956                 drbd_print_uuids(mdev, "attached to UUIDs");
957
958         wake_up(&mdev->misc_wait);
959         wake_up(&mdev->state_wait);
960         wake_up(&mdev->tconn->ping_wait);
961
962         /* aborted verify run. log the last position */
963         if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
964             ns.conn < C_CONNECTED) {
965                 mdev->ov_start_sector =
966                         BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
967                 dev_info(DEV, "Online Verify reached sector %llu\n",
968                         (unsigned long long)mdev->ov_start_sector);
969         }
970
971         if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
972             (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
973                 dev_info(DEV, "Syncer continues.\n");
974                 mdev->rs_paused += (long)jiffies
975                                   -(long)mdev->rs_mark_time[mdev->rs_last_mark];
976                 if (ns.conn == C_SYNC_TARGET)
977                         mod_timer(&mdev->resync_timer, jiffies);
978         }
979
980         if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
981             (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
982                 dev_info(DEV, "Resync suspended\n");
983                 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
984         }
985
986         if (os.conn == C_CONNECTED &&
987             (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
988                 unsigned long now = jiffies;
989                 int i;
990
991                 set_ov_position(mdev, ns.conn);
992                 mdev->rs_start = now;
993                 mdev->rs_last_events = 0;
994                 mdev->rs_last_sect_ev = 0;
995                 mdev->ov_last_oos_size = 0;
996                 mdev->ov_last_oos_start = 0;
997
998                 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
999                         mdev->rs_mark_left[i] = mdev->ov_left;
1000                         mdev->rs_mark_time[i] = now;
1001                 }
1002
1003                 drbd_rs_controller_reset(mdev);
1004
1005                 if (ns.conn == C_VERIFY_S) {
1006                         dev_info(DEV, "Starting Online Verify from sector %llu\n",
1007                                         (unsigned long long)mdev->ov_position);
1008                         mod_timer(&mdev->resync_timer, jiffies);
1009                 }
1010         }
1011
1012         if (get_ldev(mdev)) {
1013                 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1014                                                  MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1015                                                  MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1016
1017                 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1018                         mdf |= MDF_CRASHED_PRIMARY;
1019                 if (mdev->state.role == R_PRIMARY ||
1020                     (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1021                         mdf |= MDF_PRIMARY_IND;
1022                 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1023                         mdf |= MDF_CONNECTED_IND;
1024                 if (mdev->state.disk > D_INCONSISTENT)
1025                         mdf |= MDF_CONSISTENT;
1026                 if (mdev->state.disk > D_OUTDATED)
1027                         mdf |= MDF_WAS_UP_TO_DATE;
1028                 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1029                         mdf |= MDF_PEER_OUT_DATED;
1030                 if (mdf != mdev->ldev->md.flags) {
1031                         mdev->ldev->md.flags = mdf;
1032                         drbd_md_mark_dirty(mdev);
1033                 }
1034                 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1035                         drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1036                 put_ldev(mdev);
1037         }
1038
1039         /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1040         if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1041             os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1042                 set_bit(CONSIDER_RESYNC, &mdev->flags);
1043
1044         /* Receiver should clean up itself */
1045         if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1046                 drbd_thread_stop_nowait(&mdev->tconn->receiver);
1047
1048         /* Now the receiver finished cleaning up itself, it should die */
1049         if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1050                 drbd_thread_stop_nowait(&mdev->tconn->receiver);
1051
1052         /* Upon network failure, we need to restart the receiver. */
1053         if (os.conn > C_TEAR_DOWN &&
1054             ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1055                 drbd_thread_restart_nowait(&mdev->tconn->receiver);
1056
1057         /* Resume AL writing if we get a connection */
1058         if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1059                 drbd_resume_al(mdev);
1060
1061         ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1062         if (ascw) {
1063                 ascw->os = os;
1064                 ascw->ns = ns;
1065                 ascw->flags = flags;
1066                 ascw->w.cb = w_after_state_ch;
1067                 ascw->w.mdev = mdev;
1068                 ascw->done = done;
1069                 drbd_queue_work(&mdev->tconn->data.work, &ascw->w);
1070         } else {
1071                 dev_err(DEV, "Could not kmalloc an ascw\n");
1072         }
1073
1074         return rv;
1075 }
1076
1077 static int w_after_state_ch(struct drbd_work *w, int unused)
1078 {
1079         struct after_state_chg_work *ascw =
1080                 container_of(w, struct after_state_chg_work, w);
1081         struct drbd_conf *mdev = w->mdev;
1082
1083         after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1084         if (ascw->flags & CS_WAIT_COMPLETE) {
1085                 D_ASSERT(ascw->done != NULL);
1086                 complete(ascw->done);
1087         }
1088         kfree(ascw);
1089
1090         return 0;
1091 }
1092
1093 static void abw_start_sync(struct drbd_conf *mdev, int rv)
1094 {
1095         if (rv) {
1096                 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1097                 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1098                 return;
1099         }
1100
1101         switch (mdev->state.conn) {
1102         case C_STARTING_SYNC_T:
1103                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1104                 break;
1105         case C_STARTING_SYNC_S:
1106                 drbd_start_resync(mdev, C_SYNC_SOURCE);
1107                 break;
1108         }
1109 }
1110
1111 int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1112                 int (*io_fn)(struct drbd_conf *),
1113                 char *why, enum bm_flag flags)
1114 {
1115         int rv;
1116
1117         D_ASSERT(current == mdev->tconn->worker.task);
1118
1119         /* open coded non-blocking drbd_suspend_io(mdev); */
1120         set_bit(SUSPEND_IO, &mdev->flags);
1121
1122         drbd_bm_lock(mdev, why, flags);
1123         rv = io_fn(mdev);
1124         drbd_bm_unlock(mdev);
1125
1126         drbd_resume_io(mdev);
1127
1128         return rv;
1129 }
1130
1131 /**
1132  * after_state_ch() - Perform after state change actions that may sleep
1133  * @mdev:       DRBD device.
1134  * @os:         old state.
1135  * @ns:         new state.
1136  * @flags:      Flags
1137  */
1138 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1139                            union drbd_state ns, enum chg_state_flags flags)
1140 {
1141         enum drbd_fencing_p fp;
1142         struct sib_info sib;
1143
1144         sib.sib_reason = SIB_STATE_CHANGE;
1145         sib.os = os;
1146         sib.ns = ns;
1147
1148         if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1149                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1150                 if (mdev->p_uuid)
1151                         mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1152         }
1153
1154         fp = FP_DONT_CARE;
1155         if (get_ldev(mdev)) {
1156                 rcu_read_lock();
1157                 fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
1158                 rcu_read_unlock();
1159                 put_ldev(mdev);
1160         }
1161
1162         /* Inform userspace about the change... */
1163         drbd_bcast_event(mdev, &sib);
1164
1165         if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1166             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1167                 drbd_khelper(mdev, "pri-on-incon-degr");
1168
1169         /* Here we have the actions that are performed after a
1170            state change. This function might sleep */
1171
1172         if (ns.susp_nod) {
1173                 enum drbd_req_event what = NOTHING;
1174
1175                 if (os.conn < C_CONNECTED && conn_lowest_conn(mdev->tconn) >= C_CONNECTED)
1176                         what = RESEND;
1177
1178                 if (os.disk == D_ATTACHING && conn_lowest_disk(mdev->tconn) > D_ATTACHING)
1179                         what = RESTART_FROZEN_DISK_IO;
1180
1181                 if (what != NOTHING) {
1182                         spin_lock_irq(&mdev->tconn->req_lock);
1183                         _tl_restart(mdev->tconn, what);
1184                         _drbd_set_state(_NS(mdev, susp_nod, 0), CS_VERBOSE, NULL);
1185                         spin_unlock_irq(&mdev->tconn->req_lock);
1186                 }
1187         }
1188
1189         /* Became sync source.  With protocol >= 96, we still need to send out
1190          * the sync uuid now. Need to do that before any drbd_send_state, or
1191          * the other side may go "paused sync" before receiving the sync uuids,
1192          * which is unexpected. */
1193         if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1194             (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1195             mdev->tconn->agreed_pro_version >= 96 && get_ldev(mdev)) {
1196                 drbd_gen_and_send_sync_uuid(mdev);
1197                 put_ldev(mdev);
1198         }
1199
1200         /* Do not change the order of the if above and the two below... */
1201         if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
1202                 drbd_send_uuids(mdev);
1203                 drbd_send_state(mdev);
1204         }
1205         /* No point in queuing send_bitmap if we don't have a connection
1206          * anymore, so check also the _current_ state, not only the new state
1207          * at the time this work was queued. */
1208         if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1209             mdev->state.conn == C_WF_BITMAP_S)
1210                 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
1211                                 "send_bitmap (WFBitMapS)",
1212                                 BM_LOCKED_TEST_ALLOWED);
1213
1214         /* Lost contact to peer's copy of the data */
1215         if ((os.pdsk >= D_INCONSISTENT &&
1216              os.pdsk != D_UNKNOWN &&
1217              os.pdsk != D_OUTDATED)
1218         &&  (ns.pdsk < D_INCONSISTENT ||
1219              ns.pdsk == D_UNKNOWN ||
1220              ns.pdsk == D_OUTDATED)) {
1221                 if (get_ldev(mdev)) {
1222                         if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1223                             mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1224                                 if (drbd_suspended(mdev)) {
1225                                         set_bit(NEW_CUR_UUID, &mdev->flags);
1226                                 } else {
1227                                         drbd_uuid_new_current(mdev);
1228                                         drbd_send_uuids(mdev);
1229                                 }
1230                         }
1231                         put_ldev(mdev);
1232                 }
1233         }
1234
1235         if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1236                 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
1237                         drbd_uuid_new_current(mdev);
1238                         drbd_send_uuids(mdev);
1239                 }
1240
1241                 /* D_DISKLESS Peer becomes secondary */
1242                 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1243                         /* We may still be Primary ourselves.
1244                          * No harm done if the bitmap still changes,
1245                          * redirtied pages will follow later. */
1246                         drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1247                                 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
1248                 put_ldev(mdev);
1249         }
1250
1251         /* Write out all changed bits on demote.
1252          * Though, no need to da that just yet
1253          * if there is a resync going on still */
1254         if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1255                 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
1256                 /* No changes to the bitmap expected this time, so assert that,
1257                  * even though no harm was done if it did change. */
1258                 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1259                                 "demote", BM_LOCKED_TEST_ALLOWED);
1260                 put_ldev(mdev);
1261         }
1262
1263         /* Last part of the attaching process ... */
1264         if (ns.conn >= C_CONNECTED &&
1265             os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1266                 drbd_send_sizes(mdev, 0, 0);  /* to start sync... */
1267                 drbd_send_uuids(mdev);
1268                 drbd_send_state(mdev);
1269         }
1270
1271         /* We want to pause/continue resync, tell peer. */
1272         if (ns.conn >= C_CONNECTED &&
1273              ((os.aftr_isp != ns.aftr_isp) ||
1274               (os.user_isp != ns.user_isp)))
1275                 drbd_send_state(mdev);
1276
1277         /* In case one of the isp bits got set, suspend other devices. */
1278         if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1279             (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1280                 suspend_other_sg(mdev);
1281
1282         /* Make sure the peer gets informed about eventual state
1283            changes (ISP bits) while we were in WFReportParams. */
1284         if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1285                 drbd_send_state(mdev);
1286
1287         if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1288                 drbd_send_state(mdev);
1289
1290         /* We are in the progress to start a full sync... */
1291         if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1292             (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1293                 /* no other bitmap changes expected during this phase */
1294                 drbd_queue_bitmap_io(mdev,
1295                         &drbd_bmio_set_n_write, &abw_start_sync,
1296                         "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
1297
1298         /* We are invalidating our self... */
1299         if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1300             os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1301                 /* other bitmap operation expected during this phase */
1302                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1303                         "set_n_write from invalidate", BM_LOCKED_MASK);
1304
1305         /* first half of local IO error, failure to attach,
1306          * or administrative detach */
1307         if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1308                 enum drbd_io_error_p eh;
1309                 int was_io_error;
1310                 /* corresponding get_ldev was in __drbd_set_state, to serialize
1311                  * our cleanup here with the transition to D_DISKLESS,
1312                  * so it is safe to dreference ldev here. */
1313                 rcu_read_lock();
1314                 eh = rcu_dereference(mdev->ldev->disk_conf)->on_io_error;
1315                 rcu_read_unlock();
1316                 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1317
1318                 /* current state still has to be D_FAILED,
1319                  * there is only one way out: to D_DISKLESS,
1320                  * and that may only happen after our put_ldev below. */
1321                 if (mdev->state.disk != D_FAILED)
1322                         dev_err(DEV,
1323                                 "ASSERT FAILED: disk is %s during detach\n",
1324                                 drbd_disk_str(mdev->state.disk));
1325
1326                 if (!drbd_send_state(mdev))
1327                         dev_info(DEV, "Notified peer that I am detaching my disk\n");
1328
1329                 drbd_rs_cancel_all(mdev);
1330
1331                 /* In case we want to get something to stable storage still,
1332                  * this may be the last chance.
1333                  * Following put_ldev may transition to D_DISKLESS. */
1334                 drbd_md_sync(mdev);
1335                 put_ldev(mdev);
1336
1337                 if (was_io_error && eh == EP_CALL_HELPER)
1338                         drbd_khelper(mdev, "local-io-error");
1339         }
1340
1341         /* second half of local IO error, failure to attach,
1342          * or administrative detach,
1343          * after local_cnt references have reached zero again */
1344         if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1345                 /* We must still be diskless,
1346                  * re-attach has to be serialized with this! */
1347                 if (mdev->state.disk != D_DISKLESS)
1348                         dev_err(DEV,
1349                                 "ASSERT FAILED: disk is %s while going diskless\n",
1350                                 drbd_disk_str(mdev->state.disk));
1351
1352                 mdev->rs_total = 0;
1353                 mdev->rs_failed = 0;
1354                 atomic_set(&mdev->rs_pending_cnt, 0);
1355
1356                 if (!drbd_send_state(mdev))
1357                         dev_info(DEV, "Notified peer that I'm now diskless.\n");
1358                 /* corresponding get_ldev in __drbd_set_state
1359                  * this may finally trigger drbd_ldev_destroy. */
1360                 put_ldev(mdev);
1361         }
1362
1363         /* Notify peer that I had a local IO error, and did not detached.. */
1364         if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
1365                 drbd_send_state(mdev);
1366
1367         /* Disks got bigger while they were detached */
1368         if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1369             test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1370                 if (ns.conn == C_CONNECTED)
1371                         resync_after_online_grow(mdev);
1372         }
1373
1374         /* A resync finished or aborted, wake paused devices... */
1375         if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1376             (os.peer_isp && !ns.peer_isp) ||
1377             (os.user_isp && !ns.user_isp))
1378                 resume_next_sg(mdev);
1379
1380         /* sync target done with resync.  Explicitly notify peer, even though
1381          * it should (at least for non-empty resyncs) already know itself. */
1382         if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1383                 drbd_send_state(mdev);
1384
1385         /* This triggers bitmap writeout of potentially still unwritten pages
1386          * if the resync finished cleanly, or aborted because of peer disk
1387          * failure, or because of connection loss.
1388          * For resync aborted because of local disk failure, we cannot do
1389          * any bitmap writeout anymore.
1390          * No harm done if some bits change during this phase.
1391          */
1392         if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1393                 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1394                         "write from resync_finished", BM_LOCKED_SET_ALLOWED);
1395                 put_ldev(mdev);
1396         }
1397
1398         if (ns.disk == D_DISKLESS &&
1399             ns.conn == C_STANDALONE &&
1400             ns.role == R_SECONDARY) {
1401                 if (os.aftr_isp != ns.aftr_isp)
1402                         resume_next_sg(mdev);
1403         }
1404
1405         drbd_md_sync(mdev);
1406 }
1407
1408 struct after_conn_state_chg_work {
1409         struct drbd_work w;
1410         enum drbd_conns oc;
1411         union drbd_state ns_min;
1412         union drbd_state ns_max; /* new, max state, over all mdevs */
1413         enum chg_state_flags flags;
1414 };
1415
1416 static int w_after_conn_state_ch(struct drbd_work *w, int unused)
1417 {
1418         struct after_conn_state_chg_work *acscw =
1419                 container_of(w, struct after_conn_state_chg_work, w);
1420         struct drbd_tconn *tconn = w->tconn;
1421         enum drbd_conns oc = acscw->oc;
1422         union drbd_state ns_max = acscw->ns_max;
1423         union drbd_state ns_min = acscw->ns_min;
1424         struct drbd_conf *mdev;
1425         int vnr;
1426
1427         kfree(acscw);
1428
1429         /* Upon network configuration, we need to start the receiver */
1430         if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
1431                 drbd_thread_start(&tconn->receiver);
1432
1433         if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
1434                 struct net_conf *old_conf;
1435
1436                 mutex_lock(&tconn->conf_update);
1437                 old_conf = tconn->net_conf;
1438                 tconn->my_addr_len = 0;
1439                 tconn->peer_addr_len = 0;
1440                 rcu_assign_pointer(tconn->net_conf, NULL);
1441                 conn_free_crypto(tconn);
1442                 mutex_unlock(&tconn->conf_update);
1443
1444                 synchronize_rcu();
1445                 kfree(old_conf);
1446         }
1447
1448         if (ns_max.susp_fen) {
1449                 /* case1: The outdate peer handler is successful: */
1450                 if (ns_max.pdsk <= D_OUTDATED) {
1451                         tl_clear(tconn);
1452                         rcu_read_lock();
1453                         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1454                                 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1455                                         drbd_uuid_new_current(mdev);
1456                                         clear_bit(NEW_CUR_UUID, &mdev->flags);
1457                                 }
1458                         }
1459                         rcu_read_unlock();
1460                         conn_request_state(tconn,
1461                                            (union drbd_state) { { .susp_fen = 1 } },
1462                                            (union drbd_state) { { .susp_fen = 0 } },
1463                                            CS_VERBOSE);
1464                 }
1465                 /* case2: The connection was established again: */
1466                 if (ns_min.conn >= C_CONNECTED) {
1467                         rcu_read_lock();
1468                         idr_for_each_entry(&tconn->volumes, mdev, vnr)
1469                                 clear_bit(NEW_CUR_UUID, &mdev->flags);
1470                         rcu_read_unlock();
1471                         spin_lock_irq(&tconn->req_lock);
1472                         _tl_restart(tconn, RESEND);
1473                         _conn_request_state(tconn,
1474                                             (union drbd_state) { { .susp_fen = 1 } },
1475                                             (union drbd_state) { { .susp_fen = 0 } },
1476                                             CS_VERBOSE);
1477                         spin_unlock_irq(&tconn->req_lock);
1478                 }
1479         }
1480         kref_put(&tconn->kref, &conn_destroy);
1481         return 0;
1482 }
1483
1484 void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum chg_state_flags *pf)
1485 {
1486         enum chg_state_flags flags = ~0;
1487         union drbd_dev_state os, cs = {}; /* old_state, common_state */
1488         struct drbd_conf *mdev;
1489         int vnr, first_vol = 1;
1490
1491         rcu_read_lock();
1492         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1493                 os = mdev->state;
1494
1495                 if (first_vol) {
1496                         cs = os;
1497                         first_vol = 0;
1498                         continue;
1499                 }
1500
1501                 if (cs.role != os.role)
1502                         flags &= ~CS_DC_ROLE;
1503
1504                 if (cs.peer != os.peer)
1505                         flags &= ~CS_DC_PEER;
1506
1507                 if (cs.conn != os.conn)
1508                         flags &= ~CS_DC_CONN;
1509
1510                 if (cs.disk != os.disk)
1511                         flags &= ~CS_DC_DISK;
1512
1513                 if (cs.pdsk != os.pdsk)
1514                         flags &= ~CS_DC_PDSK;
1515         }
1516         rcu_read_unlock();
1517
1518         *pf |= CS_DC_MASK;
1519         *pf &= flags;
1520         (*pcs).i = cs.i;
1521 }
1522
1523 static enum drbd_state_rv
1524 conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1525                          enum chg_state_flags flags)
1526 {
1527         enum drbd_state_rv rv = SS_SUCCESS;
1528         union drbd_state ns, os;
1529         struct drbd_conf *mdev;
1530         int vnr;
1531
1532         rcu_read_lock();
1533         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1534                 os = drbd_read_state(mdev);
1535                 ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
1536
1537                 if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
1538                         ns.disk = os.disk;
1539
1540                 if (ns.i == os.i)
1541                         continue;
1542
1543                 rv = is_valid_transition(os, ns);
1544                 if (rv < SS_SUCCESS)
1545                         break;
1546
1547                 if (!(flags & CS_HARD)) {
1548                         rv = is_valid_state(mdev, ns);
1549                         if (rv < SS_SUCCESS) {
1550                                 if (is_valid_state(mdev, os) == rv)
1551                                         rv = is_valid_soft_transition(os, ns);
1552                         } else
1553                                 rv = is_valid_soft_transition(os, ns);
1554                 }
1555                 if (rv < SS_SUCCESS)
1556                         break;
1557         }
1558         rcu_read_unlock();
1559
1560         if (rv < SS_SUCCESS && flags & CS_VERBOSE)
1561                 print_st_err(mdev, os, ns, rv);
1562
1563         return rv;
1564 }
1565
1566 void
1567 conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1568                union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags)
1569 {
1570         union drbd_state ns, os, ns_max = { };
1571         union drbd_state ns_min = {
1572                 { .role = R_MASK,
1573                   .peer = R_MASK,
1574                   .disk = D_MASK,
1575                   .pdsk = D_MASK
1576                 } };
1577         struct drbd_conf *mdev;
1578         enum drbd_state_rv rv;
1579         int vnr;
1580
1581         if (mask.conn == C_MASK)
1582                 tconn->cstate = val.conn;
1583
1584         rcu_read_lock();
1585         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1586                 os = drbd_read_state(mdev);
1587                 ns = apply_mask_val(os, mask, val);
1588                 ns = sanitize_state(mdev, ns, NULL);
1589
1590                 if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED)
1591                         ns.disk = os.disk;
1592
1593                 rv = __drbd_set_state(mdev, ns, flags, NULL);
1594                 if (rv < SS_SUCCESS)
1595                         BUG();
1596
1597                 ns.i = mdev->state.i;
1598                 ns_max.role = max_role(ns.role, ns_max.role);
1599                 ns_max.peer = max_role(ns.peer, ns_max.peer);
1600                 ns_max.conn = max_t(enum drbd_conns, ns.conn, ns_max.conn);
1601                 ns_max.disk = max_t(enum drbd_disk_state, ns.disk, ns_max.disk);
1602                 ns_max.pdsk = max_t(enum drbd_disk_state, ns.pdsk, ns_max.pdsk);
1603
1604                 ns_min.role = min_role(ns.role, ns_min.role);
1605                 ns_min.peer = min_role(ns.peer, ns_min.peer);
1606                 ns_min.conn = min_t(enum drbd_conns, ns.conn, ns_min.conn);
1607                 ns_min.disk = min_t(enum drbd_disk_state, ns.disk, ns_min.disk);
1608                 ns_min.pdsk = min_t(enum drbd_disk_state, ns.pdsk, ns_min.pdsk);
1609         }
1610         rcu_read_unlock();
1611
1612         ns_min.susp = ns_max.susp = tconn->susp;
1613         ns_min.susp_nod = ns_max.susp_nod = tconn->susp_nod;
1614         ns_min.susp_fen = ns_max.susp_fen = tconn->susp_fen;
1615
1616         *pns_min = ns_min;
1617         *pns_max = ns_max;
1618 }
1619
1620 static enum drbd_state_rv
1621 _conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
1622 {
1623         enum drbd_state_rv rv;
1624
1625         if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags))
1626                 return SS_CW_SUCCESS;
1627
1628         if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags))
1629                 return SS_CW_FAILED_BY_PEER;
1630
1631         spin_lock_irq(&tconn->req_lock);
1632         rv = tconn->cstate != C_WF_REPORT_PARAMS ? SS_CW_NO_NEED : SS_UNKNOWN_ERROR;
1633
1634         if (rv == SS_UNKNOWN_ERROR)
1635                 rv = conn_is_valid_transition(tconn, mask, val, 0);
1636
1637         if (rv == SS_SUCCESS)
1638                 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
1639
1640         spin_unlock_irq(&tconn->req_lock);
1641
1642         return rv;
1643 }
1644
1645 static enum drbd_state_rv
1646 conn_cl_wide(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1647              enum chg_state_flags f)
1648 {
1649         enum drbd_state_rv rv;
1650
1651         spin_unlock_irq(&tconn->req_lock);
1652         mutex_lock(&tconn->cstate_mutex);
1653
1654         if (conn_send_state_req(tconn, mask, val)) {
1655                 rv = SS_CW_FAILED_BY_PEER;
1656                 /* if (f & CS_VERBOSE)
1657                    print_st_err(mdev, os, ns, rv); */
1658                 goto abort;
1659         }
1660
1661         wait_event(tconn->ping_wait, (rv = _conn_rq_cond(tconn, mask, val)));
1662
1663 abort:
1664         mutex_unlock(&tconn->cstate_mutex);
1665         spin_lock_irq(&tconn->req_lock);
1666
1667         return rv;
1668 }
1669
1670 enum drbd_state_rv
1671 _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1672                     enum chg_state_flags flags)
1673 {
1674         enum drbd_state_rv rv = SS_SUCCESS;
1675         struct after_conn_state_chg_work *acscw;
1676         enum drbd_conns oc = tconn->cstate;
1677         union drbd_state ns_max, ns_min, os;
1678
1679         rv = is_valid_conn_transition(oc, val.conn);
1680         if (rv < SS_SUCCESS)
1681                 goto abort;
1682
1683         rv = conn_is_valid_transition(tconn, mask, val, flags);
1684         if (rv < SS_SUCCESS)
1685                 goto abort;
1686
1687         if (oc == C_WF_REPORT_PARAMS && val.conn == C_DISCONNECTING &&
1688             !(flags & (CS_LOCAL_ONLY | CS_HARD))) {
1689                 rv = conn_cl_wide(tconn, mask, val, flags);
1690                 if (rv < SS_SUCCESS)
1691                         goto abort;
1692         }
1693
1694         conn_old_common_state(tconn, &os, &flags);
1695         flags |= CS_DC_SUSP;
1696         conn_set_state(tconn, mask, val, &ns_min, &ns_max, flags);
1697         conn_pr_state_change(tconn, os, ns_max, flags);
1698
1699         acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
1700         if (acscw) {
1701                 acscw->oc = os.conn;
1702                 acscw->ns_min = ns_min;
1703                 acscw->ns_max = ns_max;
1704                 acscw->flags = flags;
1705                 acscw->w.cb = w_after_conn_state_ch;
1706                 kref_get(&tconn->kref);
1707                 acscw->w.tconn = tconn;
1708                 drbd_queue_work(&tconn->data.work, &acscw->w);
1709         } else {
1710                 conn_err(tconn, "Could not kmalloc an acscw\n");
1711         }
1712
1713 abort:
1714         return rv;
1715 }
1716
1717 enum drbd_state_rv
1718 conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1719                    enum chg_state_flags flags)
1720 {
1721         enum drbd_state_rv rv;
1722
1723         spin_lock_irq(&tconn->req_lock);
1724         rv = _conn_request_state(tconn, mask, val, flags);
1725         spin_unlock_irq(&tconn->req_lock);
1726
1727         return rv;
1728 }