Merge tag 'metag-for-v3.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[profile/ivi/kernel-x86-ivi.git] / drivers / block / drbd / drbd_receiver.c
1 /*
2    drbd_receiver.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24
25
26 #include <linux/module.h>
27
28 #include <asm/uaccess.h>
29 #include <net/sock.h>
30
31 #include <linux/drbd.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/in.h>
35 #include <linux/mm.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
46 #include "drbd_int.h"
47 #include "drbd_req.h"
48
49 #include "drbd_vli.h"
50
51 struct packet_info {
52         enum drbd_packet cmd;
53         unsigned int size;
54         unsigned int vnr;
55         void *data;
56 };
57
58 enum finish_epoch {
59         FE_STILL_LIVE,
60         FE_DESTROYED,
61         FE_RECYCLED,
62 };
63
64 static int drbd_do_features(struct drbd_tconn *tconn);
65 static int drbd_do_auth(struct drbd_tconn *tconn);
66 static int drbd_disconnected(struct drbd_conf *mdev);
67
68 static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
69 static int e_end_block(struct drbd_work *, int);
70
71
72 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
73
74 /*
75  * some helper functions to deal with single linked page lists,
76  * page->private being our "next" pointer.
77  */
78
79 /* If at least n pages are linked at head, get n pages off.
80  * Otherwise, don't modify head, and return NULL.
81  * Locking is the responsibility of the caller.
82  */
83 static struct page *page_chain_del(struct page **head, int n)
84 {
85         struct page *page;
86         struct page *tmp;
87
88         BUG_ON(!n);
89         BUG_ON(!head);
90
91         page = *head;
92
93         if (!page)
94                 return NULL;
95
96         while (page) {
97                 tmp = page_chain_next(page);
98                 if (--n == 0)
99                         break; /* found sufficient pages */
100                 if (tmp == NULL)
101                         /* insufficient pages, don't use any of them. */
102                         return NULL;
103                 page = tmp;
104         }
105
106         /* add end of list marker for the returned list */
107         set_page_private(page, 0);
108         /* actual return value, and adjustment of head */
109         page = *head;
110         *head = tmp;
111         return page;
112 }
113
114 /* may be used outside of locks to find the tail of a (usually short)
115  * "private" page chain, before adding it back to a global chain head
116  * with page_chain_add() under a spinlock. */
117 static struct page *page_chain_tail(struct page *page, int *len)
118 {
119         struct page *tmp;
120         int i = 1;
121         while ((tmp = page_chain_next(page)))
122                 ++i, page = tmp;
123         if (len)
124                 *len = i;
125         return page;
126 }
127
128 static int page_chain_free(struct page *page)
129 {
130         struct page *tmp;
131         int i = 0;
132         page_chain_for_each_safe(page, tmp) {
133                 put_page(page);
134                 ++i;
135         }
136         return i;
137 }
138
139 static void page_chain_add(struct page **head,
140                 struct page *chain_first, struct page *chain_last)
141 {
142 #if 1
143         struct page *tmp;
144         tmp = page_chain_tail(chain_first, NULL);
145         BUG_ON(tmp != chain_last);
146 #endif
147
148         /* add chain to head */
149         set_page_private(chain_last, (unsigned long)*head);
150         *head = chain_first;
151 }
152
153 static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
154                                        unsigned int number)
155 {
156         struct page *page = NULL;
157         struct page *tmp = NULL;
158         unsigned int i = 0;
159
160         /* Yes, testing drbd_pp_vacant outside the lock is racy.
161          * So what. It saves a spin_lock. */
162         if (drbd_pp_vacant >= number) {
163                 spin_lock(&drbd_pp_lock);
164                 page = page_chain_del(&drbd_pp_pool, number);
165                 if (page)
166                         drbd_pp_vacant -= number;
167                 spin_unlock(&drbd_pp_lock);
168                 if (page)
169                         return page;
170         }
171
172         /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173          * "criss-cross" setup, that might cause write-out on some other DRBD,
174          * which in turn might block on the other node at this very place.  */
175         for (i = 0; i < number; i++) {
176                 tmp = alloc_page(GFP_TRY);
177                 if (!tmp)
178                         break;
179                 set_page_private(tmp, (unsigned long)page);
180                 page = tmp;
181         }
182
183         if (i == number)
184                 return page;
185
186         /* Not enough pages immediately available this time.
187          * No need to jump around here, drbd_alloc_pages will retry this
188          * function "soon". */
189         if (page) {
190                 tmp = page_chain_tail(page, NULL);
191                 spin_lock(&drbd_pp_lock);
192                 page_chain_add(&drbd_pp_pool, page, tmp);
193                 drbd_pp_vacant += i;
194                 spin_unlock(&drbd_pp_lock);
195         }
196         return NULL;
197 }
198
199 static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200                                            struct list_head *to_be_freed)
201 {
202         struct drbd_peer_request *peer_req;
203         struct list_head *le, *tle;
204
205         /* The EEs are always appended to the end of the list. Since
206            they are sent in order over the wire, they have to finish
207            in order. As soon as we see the first not finished we can
208            stop to examine the list... */
209
210         list_for_each_safe(le, tle, &mdev->net_ee) {
211                 peer_req = list_entry(le, struct drbd_peer_request, w.list);
212                 if (drbd_peer_req_has_active_page(peer_req))
213                         break;
214                 list_move(le, to_be_freed);
215         }
216 }
217
218 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219 {
220         LIST_HEAD(reclaimed);
221         struct drbd_peer_request *peer_req, *t;
222
223         spin_lock_irq(&mdev->tconn->req_lock);
224         reclaim_finished_net_peer_reqs(mdev, &reclaimed);
225         spin_unlock_irq(&mdev->tconn->req_lock);
226
227         list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
228                 drbd_free_net_peer_req(mdev, peer_req);
229 }
230
231 /**
232  * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
233  * @mdev:       DRBD device.
234  * @number:     number of pages requested
235  * @retry:      whether to retry, if not enough pages are available right now
236  *
237  * Tries to allocate number pages, first from our own page pool, then from
238  * the kernel, unless this allocation would exceed the max_buffers setting.
239  * Possibly retry until DRBD frees sufficient pages somewhere else.
240  *
241  * Returns a page chain linked via page->private.
242  */
243 struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
244                               bool retry)
245 {
246         struct page *page = NULL;
247         struct net_conf *nc;
248         DEFINE_WAIT(wait);
249         int mxb;
250
251         /* Yes, we may run up to @number over max_buffers. If we
252          * follow it strictly, the admin will get it wrong anyways. */
253         rcu_read_lock();
254         nc = rcu_dereference(mdev->tconn->net_conf);
255         mxb = nc ? nc->max_buffers : 1000000;
256         rcu_read_unlock();
257
258         if (atomic_read(&mdev->pp_in_use) < mxb)
259                 page = __drbd_alloc_pages(mdev, number);
260
261         while (page == NULL) {
262                 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
263
264                 drbd_kick_lo_and_reclaim_net(mdev);
265
266                 if (atomic_read(&mdev->pp_in_use) < mxb) {
267                         page = __drbd_alloc_pages(mdev, number);
268                         if (page)
269                                 break;
270                 }
271
272                 if (!retry)
273                         break;
274
275                 if (signal_pending(current)) {
276                         dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
277                         break;
278                 }
279
280                 schedule();
281         }
282         finish_wait(&drbd_pp_wait, &wait);
283
284         if (page)
285                 atomic_add(number, &mdev->pp_in_use);
286         return page;
287 }
288
289 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
290  * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
291  * Either links the page chain back to the global pool,
292  * or returns all pages to the system. */
293 static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
294 {
295         atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
296         int i;
297
298         if (page == NULL)
299                 return;
300
301         if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
302                 i = page_chain_free(page);
303         else {
304                 struct page *tmp;
305                 tmp = page_chain_tail(page, &i);
306                 spin_lock(&drbd_pp_lock);
307                 page_chain_add(&drbd_pp_pool, page, tmp);
308                 drbd_pp_vacant += i;
309                 spin_unlock(&drbd_pp_lock);
310         }
311         i = atomic_sub_return(i, a);
312         if (i < 0)
313                 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
314                         is_net ? "pp_in_use_by_net" : "pp_in_use", i);
315         wake_up(&drbd_pp_wait);
316 }
317
318 /*
319 You need to hold the req_lock:
320  _drbd_wait_ee_list_empty()
321
322 You must not have the req_lock:
323  drbd_free_peer_req()
324  drbd_alloc_peer_req()
325  drbd_free_peer_reqs()
326  drbd_ee_fix_bhs()
327  drbd_finish_peer_reqs()
328  drbd_clear_done_ee()
329  drbd_wait_ee_list_empty()
330 */
331
332 struct drbd_peer_request *
333 drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
334                     unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
335 {
336         struct drbd_peer_request *peer_req;
337         struct page *page = NULL;
338         unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
339
340         if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
341                 return NULL;
342
343         peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
344         if (!peer_req) {
345                 if (!(gfp_mask & __GFP_NOWARN))
346                         dev_err(DEV, "%s: allocation failed\n", __func__);
347                 return NULL;
348         }
349
350         if (data_size) {
351                 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
352                 if (!page)
353                         goto fail;
354         }
355
356         drbd_clear_interval(&peer_req->i);
357         peer_req->i.size = data_size;
358         peer_req->i.sector = sector;
359         peer_req->i.local = false;
360         peer_req->i.waiting = false;
361
362         peer_req->epoch = NULL;
363         peer_req->w.mdev = mdev;
364         peer_req->pages = page;
365         atomic_set(&peer_req->pending_bios, 0);
366         peer_req->flags = 0;
367         /*
368          * The block_id is opaque to the receiver.  It is not endianness
369          * converted, and sent back to the sender unchanged.
370          */
371         peer_req->block_id = id;
372
373         return peer_req;
374
375  fail:
376         mempool_free(peer_req, drbd_ee_mempool);
377         return NULL;
378 }
379
380 void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
381                        int is_net)
382 {
383         if (peer_req->flags & EE_HAS_DIGEST)
384                 kfree(peer_req->digest);
385         drbd_free_pages(mdev, peer_req->pages, is_net);
386         D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
387         D_ASSERT(drbd_interval_empty(&peer_req->i));
388         mempool_free(peer_req, drbd_ee_mempool);
389 }
390
391 int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
392 {
393         LIST_HEAD(work_list);
394         struct drbd_peer_request *peer_req, *t;
395         int count = 0;
396         int is_net = list == &mdev->net_ee;
397
398         spin_lock_irq(&mdev->tconn->req_lock);
399         list_splice_init(list, &work_list);
400         spin_unlock_irq(&mdev->tconn->req_lock);
401
402         list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
403                 __drbd_free_peer_req(mdev, peer_req, is_net);
404                 count++;
405         }
406         return count;
407 }
408
409 /*
410  * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
411  */
412 static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
413 {
414         LIST_HEAD(work_list);
415         LIST_HEAD(reclaimed);
416         struct drbd_peer_request *peer_req, *t;
417         int err = 0;
418
419         spin_lock_irq(&mdev->tconn->req_lock);
420         reclaim_finished_net_peer_reqs(mdev, &reclaimed);
421         list_splice_init(&mdev->done_ee, &work_list);
422         spin_unlock_irq(&mdev->tconn->req_lock);
423
424         list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
425                 drbd_free_net_peer_req(mdev, peer_req);
426
427         /* possible callbacks here:
428          * e_end_block, and e_end_resync_block, e_send_superseded.
429          * all ignore the last argument.
430          */
431         list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
432                 int err2;
433
434                 /* list_del not necessary, next/prev members not touched */
435                 err2 = peer_req->w.cb(&peer_req->w, !!err);
436                 if (!err)
437                         err = err2;
438                 drbd_free_peer_req(mdev, peer_req);
439         }
440         wake_up(&mdev->ee_wait);
441
442         return err;
443 }
444
445 static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
446                                      struct list_head *head)
447 {
448         DEFINE_WAIT(wait);
449
450         /* avoids spin_lock/unlock
451          * and calling prepare_to_wait in the fast path */
452         while (!list_empty(head)) {
453                 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
454                 spin_unlock_irq(&mdev->tconn->req_lock);
455                 io_schedule();
456                 finish_wait(&mdev->ee_wait, &wait);
457                 spin_lock_irq(&mdev->tconn->req_lock);
458         }
459 }
460
461 static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
462                                     struct list_head *head)
463 {
464         spin_lock_irq(&mdev->tconn->req_lock);
465         _drbd_wait_ee_list_empty(mdev, head);
466         spin_unlock_irq(&mdev->tconn->req_lock);
467 }
468
469 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
470 {
471         mm_segment_t oldfs;
472         struct kvec iov = {
473                 .iov_base = buf,
474                 .iov_len = size,
475         };
476         struct msghdr msg = {
477                 .msg_iovlen = 1,
478                 .msg_iov = (struct iovec *)&iov,
479                 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
480         };
481         int rv;
482
483         oldfs = get_fs();
484         set_fs(KERNEL_DS);
485         rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
486         set_fs(oldfs);
487
488         return rv;
489 }
490
491 static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
492 {
493         int rv;
494
495         rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
496
497         if (rv < 0) {
498                 if (rv == -ECONNRESET)
499                         conn_info(tconn, "sock was reset by peer\n");
500                 else if (rv != -ERESTARTSYS)
501                         conn_err(tconn, "sock_recvmsg returned %d\n", rv);
502         } else if (rv == 0) {
503                 if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
504                         long t;
505                         rcu_read_lock();
506                         t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
507                         rcu_read_unlock();
508
509                         t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
510
511                         if (t)
512                                 goto out;
513                 }
514                 conn_info(tconn, "sock was shut down by peer\n");
515         }
516
517         if (rv != size)
518                 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
519
520 out:
521         return rv;
522 }
523
524 static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
525 {
526         int err;
527
528         err = drbd_recv(tconn, buf, size);
529         if (err != size) {
530                 if (err >= 0)
531                         err = -EIO;
532         } else
533                 err = 0;
534         return err;
535 }
536
537 static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
538 {
539         int err;
540
541         err = drbd_recv_all(tconn, buf, size);
542         if (err && !signal_pending(current))
543                 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
544         return err;
545 }
546
547 /* quoting tcp(7):
548  *   On individual connections, the socket buffer size must be set prior to the
549  *   listen(2) or connect(2) calls in order to have it take effect.
550  * This is our wrapper to do so.
551  */
552 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
553                 unsigned int rcv)
554 {
555         /* open coded SO_SNDBUF, SO_RCVBUF */
556         if (snd) {
557                 sock->sk->sk_sndbuf = snd;
558                 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
559         }
560         if (rcv) {
561                 sock->sk->sk_rcvbuf = rcv;
562                 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
563         }
564 }
565
566 static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
567 {
568         const char *what;
569         struct socket *sock;
570         struct sockaddr_in6 src_in6;
571         struct sockaddr_in6 peer_in6;
572         struct net_conf *nc;
573         int err, peer_addr_len, my_addr_len;
574         int sndbuf_size, rcvbuf_size, connect_int;
575         int disconnect_on_error = 1;
576
577         rcu_read_lock();
578         nc = rcu_dereference(tconn->net_conf);
579         if (!nc) {
580                 rcu_read_unlock();
581                 return NULL;
582         }
583         sndbuf_size = nc->sndbuf_size;
584         rcvbuf_size = nc->rcvbuf_size;
585         connect_int = nc->connect_int;
586         rcu_read_unlock();
587
588         my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
589         memcpy(&src_in6, &tconn->my_addr, my_addr_len);
590
591         if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
592                 src_in6.sin6_port = 0;
593         else
594                 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
595
596         peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
597         memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
598
599         what = "sock_create_kern";
600         err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
601                                SOCK_STREAM, IPPROTO_TCP, &sock);
602         if (err < 0) {
603                 sock = NULL;
604                 goto out;
605         }
606
607         sock->sk->sk_rcvtimeo =
608         sock->sk->sk_sndtimeo = connect_int * HZ;
609         drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
610
611        /* explicitly bind to the configured IP as source IP
612         *  for the outgoing connections.
613         *  This is needed for multihomed hosts and to be
614         *  able to use lo: interfaces for drbd.
615         * Make sure to use 0 as port number, so linux selects
616         *  a free one dynamically.
617         */
618         what = "bind before connect";
619         err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
620         if (err < 0)
621                 goto out;
622
623         /* connect may fail, peer not yet available.
624          * stay C_WF_CONNECTION, don't go Disconnecting! */
625         disconnect_on_error = 0;
626         what = "connect";
627         err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
628
629 out:
630         if (err < 0) {
631                 if (sock) {
632                         sock_release(sock);
633                         sock = NULL;
634                 }
635                 switch (-err) {
636                         /* timeout, busy, signal pending */
637                 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
638                 case EINTR: case ERESTARTSYS:
639                         /* peer not (yet) available, network problem */
640                 case ECONNREFUSED: case ENETUNREACH:
641                 case EHOSTDOWN:    case EHOSTUNREACH:
642                         disconnect_on_error = 0;
643                         break;
644                 default:
645                         conn_err(tconn, "%s failed, err = %d\n", what, err);
646                 }
647                 if (disconnect_on_error)
648                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
649         }
650
651         return sock;
652 }
653
654 struct accept_wait_data {
655         struct drbd_tconn *tconn;
656         struct socket *s_listen;
657         struct completion door_bell;
658         void (*original_sk_state_change)(struct sock *sk);
659
660 };
661
662 static void drbd_incoming_connection(struct sock *sk)
663 {
664         struct accept_wait_data *ad = sk->sk_user_data;
665         void (*state_change)(struct sock *sk);
666
667         state_change = ad->original_sk_state_change;
668         if (sk->sk_state == TCP_ESTABLISHED)
669                 complete(&ad->door_bell);
670         state_change(sk);
671 }
672
673 static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
674 {
675         int err, sndbuf_size, rcvbuf_size, my_addr_len;
676         struct sockaddr_in6 my_addr;
677         struct socket *s_listen;
678         struct net_conf *nc;
679         const char *what;
680
681         rcu_read_lock();
682         nc = rcu_dereference(tconn->net_conf);
683         if (!nc) {
684                 rcu_read_unlock();
685                 return -EIO;
686         }
687         sndbuf_size = nc->sndbuf_size;
688         rcvbuf_size = nc->rcvbuf_size;
689         rcu_read_unlock();
690
691         my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
692         memcpy(&my_addr, &tconn->my_addr, my_addr_len);
693
694         what = "sock_create_kern";
695         err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
696                                SOCK_STREAM, IPPROTO_TCP, &s_listen);
697         if (err) {
698                 s_listen = NULL;
699                 goto out;
700         }
701
702         s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
703         drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
704
705         what = "bind before listen";
706         err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
707         if (err < 0)
708                 goto out;
709
710         ad->s_listen = s_listen;
711         write_lock_bh(&s_listen->sk->sk_callback_lock);
712         ad->original_sk_state_change = s_listen->sk->sk_state_change;
713         s_listen->sk->sk_state_change = drbd_incoming_connection;
714         s_listen->sk->sk_user_data = ad;
715         write_unlock_bh(&s_listen->sk->sk_callback_lock);
716
717         what = "listen";
718         err = s_listen->ops->listen(s_listen, 5);
719         if (err < 0)
720                 goto out;
721
722         return 0;
723 out:
724         if (s_listen)
725                 sock_release(s_listen);
726         if (err < 0) {
727                 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
728                         conn_err(tconn, "%s failed, err = %d\n", what, err);
729                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
730                 }
731         }
732
733         return -EIO;
734 }
735
736 static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
737 {
738         write_lock_bh(&sk->sk_callback_lock);
739         sk->sk_state_change = ad->original_sk_state_change;
740         sk->sk_user_data = NULL;
741         write_unlock_bh(&sk->sk_callback_lock);
742 }
743
744 static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
745 {
746         int timeo, connect_int, err = 0;
747         struct socket *s_estab = NULL;
748         struct net_conf *nc;
749
750         rcu_read_lock();
751         nc = rcu_dereference(tconn->net_conf);
752         if (!nc) {
753                 rcu_read_unlock();
754                 return NULL;
755         }
756         connect_int = nc->connect_int;
757         rcu_read_unlock();
758
759         timeo = connect_int * HZ;
760         /* 28.5% random jitter */
761         timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
762
763         err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
764         if (err <= 0)
765                 return NULL;
766
767         err = kernel_accept(ad->s_listen, &s_estab, 0);
768         if (err < 0) {
769                 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
770                         conn_err(tconn, "accept failed, err = %d\n", err);
771                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
772                 }
773         }
774
775         if (s_estab)
776                 unregister_state_change(s_estab->sk, ad);
777
778         return s_estab;
779 }
780
781 static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
782
783 static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
784                              enum drbd_packet cmd)
785 {
786         if (!conn_prepare_command(tconn, sock))
787                 return -EIO;
788         return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
789 }
790
791 static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
792 {
793         unsigned int header_size = drbd_header_size(tconn);
794         struct packet_info pi;
795         int err;
796
797         err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
798         if (err != header_size) {
799                 if (err >= 0)
800                         err = -EIO;
801                 return err;
802         }
803         err = decode_header(tconn, tconn->data.rbuf, &pi);
804         if (err)
805                 return err;
806         return pi.cmd;
807 }
808
809 /**
810  * drbd_socket_okay() - Free the socket if its connection is not okay
811  * @sock:       pointer to the pointer to the socket.
812  */
813 static int drbd_socket_okay(struct socket **sock)
814 {
815         int rr;
816         char tb[4];
817
818         if (!*sock)
819                 return false;
820
821         rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
822
823         if (rr > 0 || rr == -EAGAIN) {
824                 return true;
825         } else {
826                 sock_release(*sock);
827                 *sock = NULL;
828                 return false;
829         }
830 }
831 /* Gets called if a connection is established, or if a new minor gets created
832    in a connection */
833 int drbd_connected(struct drbd_conf *mdev)
834 {
835         int err;
836
837         atomic_set(&mdev->packet_seq, 0);
838         mdev->peer_seq = 0;
839
840         mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
841                 &mdev->tconn->cstate_mutex :
842                 &mdev->own_state_mutex;
843
844         err = drbd_send_sync_param(mdev);
845         if (!err)
846                 err = drbd_send_sizes(mdev, 0, 0);
847         if (!err)
848                 err = drbd_send_uuids(mdev);
849         if (!err)
850                 err = drbd_send_current_state(mdev);
851         clear_bit(USE_DEGR_WFC_T, &mdev->flags);
852         clear_bit(RESIZE_PENDING, &mdev->flags);
853         mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
854         return err;
855 }
856
857 /*
858  * return values:
859  *   1 yes, we have a valid connection
860  *   0 oops, did not work out, please try again
861  *  -1 peer talks different language,
862  *     no point in trying again, please go standalone.
863  *  -2 We do not have a network config...
864  */
865 static int conn_connect(struct drbd_tconn *tconn)
866 {
867         struct drbd_socket sock, msock;
868         struct drbd_conf *mdev;
869         struct net_conf *nc;
870         int vnr, timeout, h, ok;
871         bool discard_my_data;
872         enum drbd_state_rv rv;
873         struct accept_wait_data ad = {
874                 .tconn = tconn,
875                 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
876         };
877
878         clear_bit(DISCONNECT_SENT, &tconn->flags);
879         if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
880                 return -2;
881
882         mutex_init(&sock.mutex);
883         sock.sbuf = tconn->data.sbuf;
884         sock.rbuf = tconn->data.rbuf;
885         sock.socket = NULL;
886         mutex_init(&msock.mutex);
887         msock.sbuf = tconn->meta.sbuf;
888         msock.rbuf = tconn->meta.rbuf;
889         msock.socket = NULL;
890
891         /* Assume that the peer only understands protocol 80 until we know better.  */
892         tconn->agreed_pro_version = 80;
893
894         if (prepare_listen_socket(tconn, &ad))
895                 return 0;
896
897         do {
898                 struct socket *s;
899
900                 s = drbd_try_connect(tconn);
901                 if (s) {
902                         if (!sock.socket) {
903                                 sock.socket = s;
904                                 send_first_packet(tconn, &sock, P_INITIAL_DATA);
905                         } else if (!msock.socket) {
906                                 clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
907                                 msock.socket = s;
908                                 send_first_packet(tconn, &msock, P_INITIAL_META);
909                         } else {
910                                 conn_err(tconn, "Logic error in conn_connect()\n");
911                                 goto out_release_sockets;
912                         }
913                 }
914
915                 if (sock.socket && msock.socket) {
916                         rcu_read_lock();
917                         nc = rcu_dereference(tconn->net_conf);
918                         timeout = nc->ping_timeo * HZ / 10;
919                         rcu_read_unlock();
920                         schedule_timeout_interruptible(timeout);
921                         ok = drbd_socket_okay(&sock.socket);
922                         ok = drbd_socket_okay(&msock.socket) && ok;
923                         if (ok)
924                                 break;
925                 }
926
927 retry:
928                 s = drbd_wait_for_connect(tconn, &ad);
929                 if (s) {
930                         int fp = receive_first_packet(tconn, s);
931                         drbd_socket_okay(&sock.socket);
932                         drbd_socket_okay(&msock.socket);
933                         switch (fp) {
934                         case P_INITIAL_DATA:
935                                 if (sock.socket) {
936                                         conn_warn(tconn, "initial packet S crossed\n");
937                                         sock_release(sock.socket);
938                                         sock.socket = s;
939                                         goto randomize;
940                                 }
941                                 sock.socket = s;
942                                 break;
943                         case P_INITIAL_META:
944                                 set_bit(RESOLVE_CONFLICTS, &tconn->flags);
945                                 if (msock.socket) {
946                                         conn_warn(tconn, "initial packet M crossed\n");
947                                         sock_release(msock.socket);
948                                         msock.socket = s;
949                                         goto randomize;
950                                 }
951                                 msock.socket = s;
952                                 break;
953                         default:
954                                 conn_warn(tconn, "Error receiving initial packet\n");
955                                 sock_release(s);
956 randomize:
957                                 if (prandom_u32() & 1)
958                                         goto retry;
959                         }
960                 }
961
962                 if (tconn->cstate <= C_DISCONNECTING)
963                         goto out_release_sockets;
964                 if (signal_pending(current)) {
965                         flush_signals(current);
966                         smp_rmb();
967                         if (get_t_state(&tconn->receiver) == EXITING)
968                                 goto out_release_sockets;
969                 }
970
971                 ok = drbd_socket_okay(&sock.socket);
972                 ok = drbd_socket_okay(&msock.socket) && ok;
973         } while (!ok);
974
975         if (ad.s_listen)
976                 sock_release(ad.s_listen);
977
978         sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
979         msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
980
981         sock.socket->sk->sk_allocation = GFP_NOIO;
982         msock.socket->sk->sk_allocation = GFP_NOIO;
983
984         sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
985         msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
986
987         /* NOT YET ...
988          * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
989          * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
990          * first set it to the P_CONNECTION_FEATURES timeout,
991          * which we set to 4x the configured ping_timeout. */
992         rcu_read_lock();
993         nc = rcu_dereference(tconn->net_conf);
994
995         sock.socket->sk->sk_sndtimeo =
996         sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
997
998         msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
999         timeout = nc->timeout * HZ / 10;
1000         discard_my_data = nc->discard_my_data;
1001         rcu_read_unlock();
1002
1003         msock.socket->sk->sk_sndtimeo = timeout;
1004
1005         /* we don't want delays.
1006          * we use TCP_CORK where appropriate, though */
1007         drbd_tcp_nodelay(sock.socket);
1008         drbd_tcp_nodelay(msock.socket);
1009
1010         tconn->data.socket = sock.socket;
1011         tconn->meta.socket = msock.socket;
1012         tconn->last_received = jiffies;
1013
1014         h = drbd_do_features(tconn);
1015         if (h <= 0)
1016                 return h;
1017
1018         if (tconn->cram_hmac_tfm) {
1019                 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
1020                 switch (drbd_do_auth(tconn)) {
1021                 case -1:
1022                         conn_err(tconn, "Authentication of peer failed\n");
1023                         return -1;
1024                 case 0:
1025                         conn_err(tconn, "Authentication of peer failed, trying again.\n");
1026                         return 0;
1027                 }
1028         }
1029
1030         tconn->data.socket->sk->sk_sndtimeo = timeout;
1031         tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1032
1033         if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
1034                 return -1;
1035
1036         set_bit(STATE_SENT, &tconn->flags);
1037
1038         rcu_read_lock();
1039         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1040                 kref_get(&mdev->kref);
1041                 /* Prevent a race between resync-handshake and
1042                  * being promoted to Primary.
1043                  *
1044                  * Grab and release the state mutex, so we know that any current
1045                  * drbd_set_role() is finished, and any incoming drbd_set_role
1046                  * will see the STATE_SENT flag, and wait for it to be cleared.
1047                  */
1048                 mutex_lock(mdev->state_mutex);
1049                 mutex_unlock(mdev->state_mutex);
1050
1051                 rcu_read_unlock();
1052
1053                 if (discard_my_data)
1054                         set_bit(DISCARD_MY_DATA, &mdev->flags);
1055                 else
1056                         clear_bit(DISCARD_MY_DATA, &mdev->flags);
1057
1058                 drbd_connected(mdev);
1059                 kref_put(&mdev->kref, &drbd_minor_destroy);
1060                 rcu_read_lock();
1061         }
1062         rcu_read_unlock();
1063
1064         rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1065         if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
1066                 clear_bit(STATE_SENT, &tconn->flags);
1067                 return 0;
1068         }
1069
1070         drbd_thread_start(&tconn->asender);
1071
1072         mutex_lock(&tconn->conf_update);
1073         /* The discard_my_data flag is a single-shot modifier to the next
1074          * connection attempt, the handshake of which is now well underway.
1075          * No need for rcu style copying of the whole struct
1076          * just to clear a single value. */
1077         tconn->net_conf->discard_my_data = 0;
1078         mutex_unlock(&tconn->conf_update);
1079
1080         return h;
1081
1082 out_release_sockets:
1083         if (ad.s_listen)
1084                 sock_release(ad.s_listen);
1085         if (sock.socket)
1086                 sock_release(sock.socket);
1087         if (msock.socket)
1088                 sock_release(msock.socket);
1089         return -1;
1090 }
1091
1092 static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
1093 {
1094         unsigned int header_size = drbd_header_size(tconn);
1095
1096         if (header_size == sizeof(struct p_header100) &&
1097             *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1098                 struct p_header100 *h = header;
1099                 if (h->pad != 0) {
1100                         conn_err(tconn, "Header padding is not zero\n");
1101                         return -EINVAL;
1102                 }
1103                 pi->vnr = be16_to_cpu(h->volume);
1104                 pi->cmd = be16_to_cpu(h->command);
1105                 pi->size = be32_to_cpu(h->length);
1106         } else if (header_size == sizeof(struct p_header95) &&
1107                    *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
1108                 struct p_header95 *h = header;
1109                 pi->cmd = be16_to_cpu(h->command);
1110                 pi->size = be32_to_cpu(h->length);
1111                 pi->vnr = 0;
1112         } else if (header_size == sizeof(struct p_header80) &&
1113                    *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1114                 struct p_header80 *h = header;
1115                 pi->cmd = be16_to_cpu(h->command);
1116                 pi->size = be16_to_cpu(h->length);
1117                 pi->vnr = 0;
1118         } else {
1119                 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1120                          be32_to_cpu(*(__be32 *)header),
1121                          tconn->agreed_pro_version);
1122                 return -EINVAL;
1123         }
1124         pi->data = header + header_size;
1125         return 0;
1126 }
1127
1128 static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
1129 {
1130         void *buffer = tconn->data.rbuf;
1131         int err;
1132
1133         err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
1134         if (err)
1135                 return err;
1136
1137         err = decode_header(tconn, buffer, pi);
1138         tconn->last_received = jiffies;
1139
1140         return err;
1141 }
1142
1143 static void drbd_flush(struct drbd_tconn *tconn)
1144 {
1145         int rv;
1146         struct drbd_conf *mdev;
1147         int vnr;
1148
1149         if (tconn->write_ordering >= WO_bdev_flush) {
1150                 rcu_read_lock();
1151                 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1152                         if (!get_ldev(mdev))
1153                                 continue;
1154                         kref_get(&mdev->kref);
1155                         rcu_read_unlock();
1156
1157                         rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
1158                                         GFP_NOIO, NULL);
1159                         if (rv) {
1160                                 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1161                                 /* would rather check on EOPNOTSUPP, but that is not reliable.
1162                                  * don't try again for ANY return value != 0
1163                                  * if (rv == -EOPNOTSUPP) */
1164                                 drbd_bump_write_ordering(tconn, WO_drain_io);
1165                         }
1166                         put_ldev(mdev);
1167                         kref_put(&mdev->kref, &drbd_minor_destroy);
1168
1169                         rcu_read_lock();
1170                         if (rv)
1171                                 break;
1172                 }
1173                 rcu_read_unlock();
1174         }
1175 }
1176
1177 /**
1178  * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1179  * @mdev:       DRBD device.
1180  * @epoch:      Epoch object.
1181  * @ev:         Epoch event.
1182  */
1183 static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
1184                                                struct drbd_epoch *epoch,
1185                                                enum epoch_event ev)
1186 {
1187         int epoch_size;
1188         struct drbd_epoch *next_epoch;
1189         enum finish_epoch rv = FE_STILL_LIVE;
1190
1191         spin_lock(&tconn->epoch_lock);
1192         do {
1193                 next_epoch = NULL;
1194
1195                 epoch_size = atomic_read(&epoch->epoch_size);
1196
1197                 switch (ev & ~EV_CLEANUP) {
1198                 case EV_PUT:
1199                         atomic_dec(&epoch->active);
1200                         break;
1201                 case EV_GOT_BARRIER_NR:
1202                         set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1203                         break;
1204                 case EV_BECAME_LAST:
1205                         /* nothing to do*/
1206                         break;
1207                 }
1208
1209                 if (epoch_size != 0 &&
1210                     atomic_read(&epoch->active) == 0 &&
1211                     (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1212                         if (!(ev & EV_CLEANUP)) {
1213                                 spin_unlock(&tconn->epoch_lock);
1214                                 drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
1215                                 spin_lock(&tconn->epoch_lock);
1216                         }
1217 #if 0
1218                         /* FIXME: dec unacked on connection, once we have
1219                          * something to count pending connection packets in. */
1220                         if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1221                                 dec_unacked(epoch->tconn);
1222 #endif
1223
1224                         if (tconn->current_epoch != epoch) {
1225                                 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1226                                 list_del(&epoch->list);
1227                                 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1228                                 tconn->epochs--;
1229                                 kfree(epoch);
1230
1231                                 if (rv == FE_STILL_LIVE)
1232                                         rv = FE_DESTROYED;
1233                         } else {
1234                                 epoch->flags = 0;
1235                                 atomic_set(&epoch->epoch_size, 0);
1236                                 /* atomic_set(&epoch->active, 0); is already zero */
1237                                 if (rv == FE_STILL_LIVE)
1238                                         rv = FE_RECYCLED;
1239                         }
1240                 }
1241
1242                 if (!next_epoch)
1243                         break;
1244
1245                 epoch = next_epoch;
1246         } while (1);
1247
1248         spin_unlock(&tconn->epoch_lock);
1249
1250         return rv;
1251 }
1252
1253 /**
1254  * drbd_bump_write_ordering() - Fall back to an other write ordering method
1255  * @tconn:      DRBD connection.
1256  * @wo:         Write ordering method to try.
1257  */
1258 void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
1259 {
1260         struct disk_conf *dc;
1261         struct drbd_conf *mdev;
1262         enum write_ordering_e pwo;
1263         int vnr;
1264         static char *write_ordering_str[] = {
1265                 [WO_none] = "none",
1266                 [WO_drain_io] = "drain",
1267                 [WO_bdev_flush] = "flush",
1268         };
1269
1270         pwo = tconn->write_ordering;
1271         wo = min(pwo, wo);
1272         rcu_read_lock();
1273         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1274                 if (!get_ldev_if_state(mdev, D_ATTACHING))
1275                         continue;
1276                 dc = rcu_dereference(mdev->ldev->disk_conf);
1277
1278                 if (wo == WO_bdev_flush && !dc->disk_flushes)
1279                         wo = WO_drain_io;
1280                 if (wo == WO_drain_io && !dc->disk_drain)
1281                         wo = WO_none;
1282                 put_ldev(mdev);
1283         }
1284         rcu_read_unlock();
1285         tconn->write_ordering = wo;
1286         if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
1287                 conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
1288 }
1289
1290 /**
1291  * drbd_submit_peer_request()
1292  * @mdev:       DRBD device.
1293  * @peer_req:   peer request
1294  * @rw:         flag field, see bio->bi_rw
1295  *
1296  * May spread the pages to multiple bios,
1297  * depending on bio_add_page restrictions.
1298  *
1299  * Returns 0 if all bios have been submitted,
1300  * -ENOMEM if we could not allocate enough bios,
1301  * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1302  *  single page to an empty bio (which should never happen and likely indicates
1303  *  that the lower level IO stack is in some way broken). This has been observed
1304  *  on certain Xen deployments.
1305  */
1306 /* TODO allocate from our own bio_set. */
1307 int drbd_submit_peer_request(struct drbd_conf *mdev,
1308                              struct drbd_peer_request *peer_req,
1309                              const unsigned rw, const int fault_type)
1310 {
1311         struct bio *bios = NULL;
1312         struct bio *bio;
1313         struct page *page = peer_req->pages;
1314         sector_t sector = peer_req->i.sector;
1315         unsigned ds = peer_req->i.size;
1316         unsigned n_bios = 0;
1317         unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1318         int err = -ENOMEM;
1319
1320         /* In most cases, we will only need one bio.  But in case the lower
1321          * level restrictions happen to be different at this offset on this
1322          * side than those of the sending peer, we may need to submit the
1323          * request in more than one bio.
1324          *
1325          * Plain bio_alloc is good enough here, this is no DRBD internally
1326          * generated bio, but a bio allocated on behalf of the peer.
1327          */
1328 next_bio:
1329         bio = bio_alloc(GFP_NOIO, nr_pages);
1330         if (!bio) {
1331                 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1332                 goto fail;
1333         }
1334         /* > peer_req->i.sector, unless this is the first bio */
1335         bio->bi_sector = sector;
1336         bio->bi_bdev = mdev->ldev->backing_bdev;
1337         bio->bi_rw = rw;
1338         bio->bi_private = peer_req;
1339         bio->bi_end_io = drbd_peer_request_endio;
1340
1341         bio->bi_next = bios;
1342         bios = bio;
1343         ++n_bios;
1344
1345         page_chain_for_each(page) {
1346                 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1347                 if (!bio_add_page(bio, page, len, 0)) {
1348                         /* A single page must always be possible!
1349                          * But in case it fails anyways,
1350                          * we deal with it, and complain (below). */
1351                         if (bio->bi_vcnt == 0) {
1352                                 dev_err(DEV,
1353                                         "bio_add_page failed for len=%u, "
1354                                         "bi_vcnt=0 (bi_sector=%llu)\n",
1355                                         len, (unsigned long long)bio->bi_sector);
1356                                 err = -ENOSPC;
1357                                 goto fail;
1358                         }
1359                         goto next_bio;
1360                 }
1361                 ds -= len;
1362                 sector += len >> 9;
1363                 --nr_pages;
1364         }
1365         D_ASSERT(page == NULL);
1366         D_ASSERT(ds == 0);
1367
1368         atomic_set(&peer_req->pending_bios, n_bios);
1369         do {
1370                 bio = bios;
1371                 bios = bios->bi_next;
1372                 bio->bi_next = NULL;
1373
1374                 drbd_generic_make_request(mdev, fault_type, bio);
1375         } while (bios);
1376         return 0;
1377
1378 fail:
1379         while (bios) {
1380                 bio = bios;
1381                 bios = bios->bi_next;
1382                 bio_put(bio);
1383         }
1384         return err;
1385 }
1386
1387 static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
1388                                              struct drbd_peer_request *peer_req)
1389 {
1390         struct drbd_interval *i = &peer_req->i;
1391
1392         drbd_remove_interval(&mdev->write_requests, i);
1393         drbd_clear_interval(i);
1394
1395         /* Wake up any processes waiting for this peer request to complete.  */
1396         if (i->waiting)
1397                 wake_up(&mdev->misc_wait);
1398 }
1399
1400 void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1401 {
1402         struct drbd_conf *mdev;
1403         int vnr;
1404
1405         rcu_read_lock();
1406         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1407                 kref_get(&mdev->kref);
1408                 rcu_read_unlock();
1409                 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1410                 kref_put(&mdev->kref, &drbd_minor_destroy);
1411                 rcu_read_lock();
1412         }
1413         rcu_read_unlock();
1414 }
1415
1416 static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1417 {
1418         int rv;
1419         struct p_barrier *p = pi->data;
1420         struct drbd_epoch *epoch;
1421
1422         /* FIXME these are unacked on connection,
1423          * not a specific (peer)device.
1424          */
1425         tconn->current_epoch->barrier_nr = p->barrier;
1426         tconn->current_epoch->tconn = tconn;
1427         rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
1428
1429         /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1430          * the activity log, which means it would not be resynced in case the
1431          * R_PRIMARY crashes now.
1432          * Therefore we must send the barrier_ack after the barrier request was
1433          * completed. */
1434         switch (tconn->write_ordering) {
1435         case WO_none:
1436                 if (rv == FE_RECYCLED)
1437                         return 0;
1438
1439                 /* receiver context, in the writeout path of the other node.
1440                  * avoid potential distributed deadlock */
1441                 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1442                 if (epoch)
1443                         break;
1444                 else
1445                         conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
1446                         /* Fall through */
1447
1448         case WO_bdev_flush:
1449         case WO_drain_io:
1450                 conn_wait_active_ee_empty(tconn);
1451                 drbd_flush(tconn);
1452
1453                 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1454                         epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1455                         if (epoch)
1456                                 break;
1457                 }
1458
1459                 return 0;
1460         default:
1461                 conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
1462                 return -EIO;
1463         }
1464
1465         epoch->flags = 0;
1466         atomic_set(&epoch->epoch_size, 0);
1467         atomic_set(&epoch->active, 0);
1468
1469         spin_lock(&tconn->epoch_lock);
1470         if (atomic_read(&tconn->current_epoch->epoch_size)) {
1471                 list_add(&epoch->list, &tconn->current_epoch->list);
1472                 tconn->current_epoch = epoch;
1473                 tconn->epochs++;
1474         } else {
1475                 /* The current_epoch got recycled while we allocated this one... */
1476                 kfree(epoch);
1477         }
1478         spin_unlock(&tconn->epoch_lock);
1479
1480         return 0;
1481 }
1482
1483 /* used from receive_RSDataReply (recv_resync_read)
1484  * and from receive_Data */
1485 static struct drbd_peer_request *
1486 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1487               int data_size) __must_hold(local)
1488 {
1489         const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1490         struct drbd_peer_request *peer_req;
1491         struct page *page;
1492         int dgs, ds, err;
1493         void *dig_in = mdev->tconn->int_dig_in;
1494         void *dig_vv = mdev->tconn->int_dig_vv;
1495         unsigned long *data;
1496
1497         dgs = 0;
1498         if (mdev->tconn->peer_integrity_tfm) {
1499                 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1500                 /*
1501                  * FIXME: Receive the incoming digest into the receive buffer
1502                  *        here, together with its struct p_data?
1503                  */
1504                 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1505                 if (err)
1506                         return NULL;
1507                 data_size -= dgs;
1508         }
1509
1510         if (!expect(IS_ALIGNED(data_size, 512)))
1511                 return NULL;
1512         if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1513                 return NULL;
1514
1515         /* even though we trust out peer,
1516          * we sometimes have to double check. */
1517         if (sector + (data_size>>9) > capacity) {
1518                 dev_err(DEV, "request from peer beyond end of local disk: "
1519                         "capacity: %llus < sector: %llus + size: %u\n",
1520                         (unsigned long long)capacity,
1521                         (unsigned long long)sector, data_size);
1522                 return NULL;
1523         }
1524
1525         /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1526          * "criss-cross" setup, that might cause write-out on some other DRBD,
1527          * which in turn might block on the other node at this very place.  */
1528         peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
1529         if (!peer_req)
1530                 return NULL;
1531
1532         if (!data_size)
1533                 return peer_req;
1534
1535         ds = data_size;
1536         page = peer_req->pages;
1537         page_chain_for_each(page) {
1538                 unsigned len = min_t(int, ds, PAGE_SIZE);
1539                 data = kmap(page);
1540                 err = drbd_recv_all_warn(mdev->tconn, data, len);
1541                 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
1542                         dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1543                         data[0] = data[0] ^ (unsigned long)-1;
1544                 }
1545                 kunmap(page);
1546                 if (err) {
1547                         drbd_free_peer_req(mdev, peer_req);
1548                         return NULL;
1549                 }
1550                 ds -= len;
1551         }
1552
1553         if (dgs) {
1554                 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
1555                 if (memcmp(dig_in, dig_vv, dgs)) {
1556                         dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1557                                 (unsigned long long)sector, data_size);
1558                         drbd_free_peer_req(mdev, peer_req);
1559                         return NULL;
1560                 }
1561         }
1562         mdev->recv_cnt += data_size>>9;
1563         return peer_req;
1564 }
1565
1566 /* drbd_drain_block() just takes a data block
1567  * out of the socket input buffer, and discards it.
1568  */
1569 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1570 {
1571         struct page *page;
1572         int err = 0;
1573         void *data;
1574
1575         if (!data_size)
1576                 return 0;
1577
1578         page = drbd_alloc_pages(mdev, 1, 1);
1579
1580         data = kmap(page);
1581         while (data_size) {
1582                 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1583
1584                 err = drbd_recv_all_warn(mdev->tconn, data, len);
1585                 if (err)
1586                         break;
1587                 data_size -= len;
1588         }
1589         kunmap(page);
1590         drbd_free_pages(mdev, page, 0);
1591         return err;
1592 }
1593
1594 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1595                            sector_t sector, int data_size)
1596 {
1597         struct bio_vec *bvec;
1598         struct bio *bio;
1599         int dgs, err, i, expect;
1600         void *dig_in = mdev->tconn->int_dig_in;
1601         void *dig_vv = mdev->tconn->int_dig_vv;
1602
1603         dgs = 0;
1604         if (mdev->tconn->peer_integrity_tfm) {
1605                 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1606                 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1607                 if (err)
1608                         return err;
1609                 data_size -= dgs;
1610         }
1611
1612         /* optimistically update recv_cnt.  if receiving fails below,
1613          * we disconnect anyways, and counters will be reset. */
1614         mdev->recv_cnt += data_size>>9;
1615
1616         bio = req->master_bio;
1617         D_ASSERT(sector == bio->bi_sector);
1618
1619         bio_for_each_segment(bvec, bio, i) {
1620                 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
1621                 expect = min_t(int, data_size, bvec->bv_len);
1622                 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
1623                 kunmap(bvec->bv_page);
1624                 if (err)
1625                         return err;
1626                 data_size -= expect;
1627         }
1628
1629         if (dgs) {
1630                 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
1631                 if (memcmp(dig_in, dig_vv, dgs)) {
1632                         dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1633                         return -EINVAL;
1634                 }
1635         }
1636
1637         D_ASSERT(data_size == 0);
1638         return 0;
1639 }
1640
1641 /*
1642  * e_end_resync_block() is called in asender context via
1643  * drbd_finish_peer_reqs().
1644  */
1645 static int e_end_resync_block(struct drbd_work *w, int unused)
1646 {
1647         struct drbd_peer_request *peer_req =
1648                 container_of(w, struct drbd_peer_request, w);
1649         struct drbd_conf *mdev = w->mdev;
1650         sector_t sector = peer_req->i.sector;
1651         int err;
1652
1653         D_ASSERT(drbd_interval_empty(&peer_req->i));
1654
1655         if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1656                 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1657                 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
1658         } else {
1659                 /* Record failure to sync */
1660                 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
1661
1662                 err  = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1663         }
1664         dec_unacked(mdev);
1665
1666         return err;
1667 }
1668
1669 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1670 {
1671         struct drbd_peer_request *peer_req;
1672
1673         peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1674         if (!peer_req)
1675                 goto fail;
1676
1677         dec_rs_pending(mdev);
1678
1679         inc_unacked(mdev);
1680         /* corresponding dec_unacked() in e_end_resync_block()
1681          * respective _drbd_clear_done_ee */
1682
1683         peer_req->w.cb = e_end_resync_block;
1684
1685         spin_lock_irq(&mdev->tconn->req_lock);
1686         list_add(&peer_req->w.list, &mdev->sync_ee);
1687         spin_unlock_irq(&mdev->tconn->req_lock);
1688
1689         atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1690         if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
1691                 return 0;
1692
1693         /* don't care for the reason here */
1694         dev_err(DEV, "submit failed, triggering re-connect\n");
1695         spin_lock_irq(&mdev->tconn->req_lock);
1696         list_del(&peer_req->w.list);
1697         spin_unlock_irq(&mdev->tconn->req_lock);
1698
1699         drbd_free_peer_req(mdev, peer_req);
1700 fail:
1701         put_ldev(mdev);
1702         return -EIO;
1703 }
1704
1705 static struct drbd_request *
1706 find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1707              sector_t sector, bool missing_ok, const char *func)
1708 {
1709         struct drbd_request *req;
1710
1711         /* Request object according to our peer */
1712         req = (struct drbd_request *)(unsigned long)id;
1713         if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
1714                 return req;
1715         if (!missing_ok) {
1716                 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
1717                         (unsigned long)id, (unsigned long long)sector);
1718         }
1719         return NULL;
1720 }
1721
1722 static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1723 {
1724         struct drbd_conf *mdev;
1725         struct drbd_request *req;
1726         sector_t sector;
1727         int err;
1728         struct p_data *p = pi->data;
1729
1730         mdev = vnr_to_mdev(tconn, pi->vnr);
1731         if (!mdev)
1732                 return -EIO;
1733
1734         sector = be64_to_cpu(p->sector);
1735
1736         spin_lock_irq(&mdev->tconn->req_lock);
1737         req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
1738         spin_unlock_irq(&mdev->tconn->req_lock);
1739         if (unlikely(!req))
1740                 return -EIO;
1741
1742         /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1743          * special casing it there for the various failure cases.
1744          * still no race with drbd_fail_pending_reads */
1745         err = recv_dless_read(mdev, req, sector, pi->size);
1746         if (!err)
1747                 req_mod(req, DATA_RECEIVED);
1748         /* else: nothing. handled from drbd_disconnect...
1749          * I don't think we may complete this just yet
1750          * in case we are "on-disconnect: freeze" */
1751
1752         return err;
1753 }
1754
1755 static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1756 {
1757         struct drbd_conf *mdev;
1758         sector_t sector;
1759         int err;
1760         struct p_data *p = pi->data;
1761
1762         mdev = vnr_to_mdev(tconn, pi->vnr);
1763         if (!mdev)
1764                 return -EIO;
1765
1766         sector = be64_to_cpu(p->sector);
1767         D_ASSERT(p->block_id == ID_SYNCER);
1768
1769         if (get_ldev(mdev)) {
1770                 /* data is submitted to disk within recv_resync_read.
1771                  * corresponding put_ldev done below on error,
1772                  * or in drbd_peer_request_endio. */
1773                 err = recv_resync_read(mdev, sector, pi->size);
1774         } else {
1775                 if (__ratelimit(&drbd_ratelimit_state))
1776                         dev_err(DEV, "Can not write resync data to local disk.\n");
1777
1778                 err = drbd_drain_block(mdev, pi->size);
1779
1780                 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
1781         }
1782
1783         atomic_add(pi->size >> 9, &mdev->rs_sect_in);
1784
1785         return err;
1786 }
1787
1788 static void restart_conflicting_writes(struct drbd_conf *mdev,
1789                                        sector_t sector, int size)
1790 {
1791         struct drbd_interval *i;
1792         struct drbd_request *req;
1793
1794         drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1795                 if (!i->local)
1796                         continue;
1797                 req = container_of(i, struct drbd_request, i);
1798                 if (req->rq_state & RQ_LOCAL_PENDING ||
1799                     !(req->rq_state & RQ_POSTPONED))
1800                         continue;
1801                 /* as it is RQ_POSTPONED, this will cause it to
1802                  * be queued on the retry workqueue. */
1803                 __req_mod(req, CONFLICT_RESOLVED, NULL);
1804         }
1805 }
1806
1807 /*
1808  * e_end_block() is called in asender context via drbd_finish_peer_reqs().
1809  */
1810 static int e_end_block(struct drbd_work *w, int cancel)
1811 {
1812         struct drbd_peer_request *peer_req =
1813                 container_of(w, struct drbd_peer_request, w);
1814         struct drbd_conf *mdev = w->mdev;
1815         sector_t sector = peer_req->i.sector;
1816         int err = 0, pcmd;
1817
1818         if (peer_req->flags & EE_SEND_WRITE_ACK) {
1819                 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1820                         pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1821                                 mdev->state.conn <= C_PAUSED_SYNC_T &&
1822                                 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
1823                                 P_RS_WRITE_ACK : P_WRITE_ACK;
1824                         err = drbd_send_ack(mdev, pcmd, peer_req);
1825                         if (pcmd == P_RS_WRITE_ACK)
1826                                 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1827                 } else {
1828                         err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1829                         /* we expect it to be marked out of sync anyways...
1830                          * maybe assert this?  */
1831                 }
1832                 dec_unacked(mdev);
1833         }
1834         /* we delete from the conflict detection hash _after_ we sent out the
1835          * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right.  */
1836         if (peer_req->flags & EE_IN_INTERVAL_TREE) {
1837                 spin_lock_irq(&mdev->tconn->req_lock);
1838                 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1839                 drbd_remove_epoch_entry_interval(mdev, peer_req);
1840                 if (peer_req->flags & EE_RESTART_REQUESTS)
1841                         restart_conflicting_writes(mdev, sector, peer_req->i.size);
1842                 spin_unlock_irq(&mdev->tconn->req_lock);
1843         } else
1844                 D_ASSERT(drbd_interval_empty(&peer_req->i));
1845
1846         drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1847
1848         return err;
1849 }
1850
1851 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
1852 {
1853         struct drbd_conf *mdev = w->mdev;
1854         struct drbd_peer_request *peer_req =
1855                 container_of(w, struct drbd_peer_request, w);
1856         int err;
1857
1858         err = drbd_send_ack(mdev, ack, peer_req);
1859         dec_unacked(mdev);
1860
1861         return err;
1862 }
1863
1864 static int e_send_superseded(struct drbd_work *w, int unused)
1865 {
1866         return e_send_ack(w, P_SUPERSEDED);
1867 }
1868
1869 static int e_send_retry_write(struct drbd_work *w, int unused)
1870 {
1871         struct drbd_tconn *tconn = w->mdev->tconn;
1872
1873         return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
1874                              P_RETRY_WRITE : P_SUPERSEDED);
1875 }
1876
1877 static bool seq_greater(u32 a, u32 b)
1878 {
1879         /*
1880          * We assume 32-bit wrap-around here.
1881          * For 24-bit wrap-around, we would have to shift:
1882          *  a <<= 8; b <<= 8;
1883          */
1884         return (s32)a - (s32)b > 0;
1885 }
1886
1887 static u32 seq_max(u32 a, u32 b)
1888 {
1889         return seq_greater(a, b) ? a : b;
1890 }
1891
1892 static bool need_peer_seq(struct drbd_conf *mdev)
1893 {
1894         struct drbd_tconn *tconn = mdev->tconn;
1895         int tp;
1896
1897         /*
1898          * We only need to keep track of the last packet_seq number of our peer
1899          * if we are in dual-primary mode and we have the resolve-conflicts flag set; see
1900          * handle_write_conflicts().
1901          */
1902
1903         rcu_read_lock();
1904         tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1905         rcu_read_unlock();
1906
1907         return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags);
1908 }
1909
1910 static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
1911 {
1912         unsigned int newest_peer_seq;
1913
1914         if (need_peer_seq(mdev)) {
1915                 spin_lock(&mdev->peer_seq_lock);
1916                 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1917                 mdev->peer_seq = newest_peer_seq;
1918                 spin_unlock(&mdev->peer_seq_lock);
1919                 /* wake up only if we actually changed mdev->peer_seq */
1920                 if (peer_seq == newest_peer_seq)
1921                         wake_up(&mdev->seq_wait);
1922         }
1923 }
1924
1925 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
1926 {
1927         return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1928 }
1929
1930 /* maybe change sync_ee into interval trees as well? */
1931 static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
1932 {
1933         struct drbd_peer_request *rs_req;
1934         bool rv = 0;
1935
1936         spin_lock_irq(&mdev->tconn->req_lock);
1937         list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
1938                 if (overlaps(peer_req->i.sector, peer_req->i.size,
1939                              rs_req->i.sector, rs_req->i.size)) {
1940                         rv = 1;
1941                         break;
1942                 }
1943         }
1944         spin_unlock_irq(&mdev->tconn->req_lock);
1945
1946         return rv;
1947 }
1948
1949 /* Called from receive_Data.
1950  * Synchronize packets on sock with packets on msock.
1951  *
1952  * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1953  * packet traveling on msock, they are still processed in the order they have
1954  * been sent.
1955  *
1956  * Note: we don't care for Ack packets overtaking P_DATA packets.
1957  *
1958  * In case packet_seq is larger than mdev->peer_seq number, there are
1959  * outstanding packets on the msock. We wait for them to arrive.
1960  * In case we are the logically next packet, we update mdev->peer_seq
1961  * ourselves. Correctly handles 32bit wrap around.
1962  *
1963  * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1964  * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1965  * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1966  * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1967  *
1968  * returns 0 if we may process the packet,
1969  * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1970 static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
1971 {
1972         DEFINE_WAIT(wait);
1973         long timeout;
1974         int ret;
1975
1976         if (!need_peer_seq(mdev))
1977                 return 0;
1978
1979         spin_lock(&mdev->peer_seq_lock);
1980         for (;;) {
1981                 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1982                         mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1983                         ret = 0;
1984                         break;
1985                 }
1986                 if (signal_pending(current)) {
1987                         ret = -ERESTARTSYS;
1988                         break;
1989                 }
1990                 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1991                 spin_unlock(&mdev->peer_seq_lock);
1992                 rcu_read_lock();
1993                 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1994                 rcu_read_unlock();
1995                 timeout = schedule_timeout(timeout);
1996                 spin_lock(&mdev->peer_seq_lock);
1997                 if (!timeout) {
1998                         ret = -ETIMEDOUT;
1999                         dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
2000                         break;
2001                 }
2002         }
2003         spin_unlock(&mdev->peer_seq_lock);
2004         finish_wait(&mdev->seq_wait, &wait);
2005         return ret;
2006 }
2007
2008 /* see also bio_flags_to_wire()
2009  * DRBD_REQ_*, because we need to semantically map the flags to data packet
2010  * flags and back. We may replicate to other kernel versions. */
2011 static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
2012 {
2013         return  (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2014                 (dpf & DP_FUA ? REQ_FUA : 0) |
2015                 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2016                 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
2017 }
2018
2019 static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
2020                                     unsigned int size)
2021 {
2022         struct drbd_interval *i;
2023
2024     repeat:
2025         drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2026                 struct drbd_request *req;
2027                 struct bio_and_error m;
2028
2029                 if (!i->local)
2030                         continue;
2031                 req = container_of(i, struct drbd_request, i);
2032                 if (!(req->rq_state & RQ_POSTPONED))
2033                         continue;
2034                 req->rq_state &= ~RQ_POSTPONED;
2035                 __req_mod(req, NEG_ACKED, &m);
2036                 spin_unlock_irq(&mdev->tconn->req_lock);
2037                 if (m.bio)
2038                         complete_master_bio(mdev, &m);
2039                 spin_lock_irq(&mdev->tconn->req_lock);
2040                 goto repeat;
2041         }
2042 }
2043
2044 static int handle_write_conflicts(struct drbd_conf *mdev,
2045                                   struct drbd_peer_request *peer_req)
2046 {
2047         struct drbd_tconn *tconn = mdev->tconn;
2048         bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
2049         sector_t sector = peer_req->i.sector;
2050         const unsigned int size = peer_req->i.size;
2051         struct drbd_interval *i;
2052         bool equal;
2053         int err;
2054
2055         /*
2056          * Inserting the peer request into the write_requests tree will prevent
2057          * new conflicting local requests from being added.
2058          */
2059         drbd_insert_interval(&mdev->write_requests, &peer_req->i);
2060
2061     repeat:
2062         drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2063                 if (i == &peer_req->i)
2064                         continue;
2065
2066                 if (!i->local) {
2067                         /*
2068                          * Our peer has sent a conflicting remote request; this
2069                          * should not happen in a two-node setup.  Wait for the
2070                          * earlier peer request to complete.
2071                          */
2072                         err = drbd_wait_misc(mdev, i);
2073                         if (err)
2074                                 goto out;
2075                         goto repeat;
2076                 }
2077
2078                 equal = i->sector == sector && i->size == size;
2079                 if (resolve_conflicts) {
2080                         /*
2081                          * If the peer request is fully contained within the
2082                          * overlapping request, it can be considered overwritten
2083                          * and thus superseded; otherwise, it will be retried
2084                          * once all overlapping requests have completed.
2085                          */
2086                         bool superseded = i->sector <= sector && i->sector +
2087                                        (i->size >> 9) >= sector + (size >> 9);
2088
2089                         if (!equal)
2090                                 dev_alert(DEV, "Concurrent writes detected: "
2091                                                "local=%llus +%u, remote=%llus +%u, "
2092                                                "assuming %s came first\n",
2093                                           (unsigned long long)i->sector, i->size,
2094                                           (unsigned long long)sector, size,
2095                                           superseded ? "local" : "remote");
2096
2097                         inc_unacked(mdev);
2098                         peer_req->w.cb = superseded ? e_send_superseded :
2099                                                    e_send_retry_write;
2100                         list_add_tail(&peer_req->w.list, &mdev->done_ee);
2101                         wake_asender(mdev->tconn);
2102
2103                         err = -ENOENT;
2104                         goto out;
2105                 } else {
2106                         struct drbd_request *req =
2107                                 container_of(i, struct drbd_request, i);
2108
2109                         if (!equal)
2110                                 dev_alert(DEV, "Concurrent writes detected: "
2111                                                "local=%llus +%u, remote=%llus +%u\n",
2112                                           (unsigned long long)i->sector, i->size,
2113                                           (unsigned long long)sector, size);
2114
2115                         if (req->rq_state & RQ_LOCAL_PENDING ||
2116                             !(req->rq_state & RQ_POSTPONED)) {
2117                                 /*
2118                                  * Wait for the node with the discard flag to
2119                                  * decide if this request has been superseded
2120                                  * or needs to be retried.
2121                                  * Requests that have been superseded will
2122                                  * disappear from the write_requests tree.
2123                                  *
2124                                  * In addition, wait for the conflicting
2125                                  * request to finish locally before submitting
2126                                  * the conflicting peer request.
2127                                  */
2128                                 err = drbd_wait_misc(mdev, &req->i);
2129                                 if (err) {
2130                                         _conn_request_state(mdev->tconn,
2131                                                             NS(conn, C_TIMEOUT),
2132                                                             CS_HARD);
2133                                         fail_postponed_requests(mdev, sector, size);
2134                                         goto out;
2135                                 }
2136                                 goto repeat;
2137                         }
2138                         /*
2139                          * Remember to restart the conflicting requests after
2140                          * the new peer request has completed.
2141                          */
2142                         peer_req->flags |= EE_RESTART_REQUESTS;
2143                 }
2144         }
2145         err = 0;
2146
2147     out:
2148         if (err)
2149                 drbd_remove_epoch_entry_interval(mdev, peer_req);
2150         return err;
2151 }
2152
2153 /* mirrored write */
2154 static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2155 {
2156         struct drbd_conf *mdev;
2157         sector_t sector;
2158         struct drbd_peer_request *peer_req;
2159         struct p_data *p = pi->data;
2160         u32 peer_seq = be32_to_cpu(p->seq_num);
2161         int rw = WRITE;
2162         u32 dp_flags;
2163         int err, tp;
2164
2165         mdev = vnr_to_mdev(tconn, pi->vnr);
2166         if (!mdev)
2167                 return -EIO;
2168
2169         if (!get_ldev(mdev)) {
2170                 int err2;
2171
2172                 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2173                 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
2174                 atomic_inc(&tconn->current_epoch->epoch_size);
2175                 err2 = drbd_drain_block(mdev, pi->size);
2176                 if (!err)
2177                         err = err2;
2178                 return err;
2179         }
2180
2181         /*
2182          * Corresponding put_ldev done either below (on various errors), or in
2183          * drbd_peer_request_endio, if we successfully submit the data at the
2184          * end of this function.
2185          */
2186
2187         sector = be64_to_cpu(p->sector);
2188         peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
2189         if (!peer_req) {
2190                 put_ldev(mdev);
2191                 return -EIO;
2192         }
2193
2194         peer_req->w.cb = e_end_block;
2195
2196         dp_flags = be32_to_cpu(p->dp_flags);
2197         rw |= wire_flags_to_bio(mdev, dp_flags);
2198         if (peer_req->pages == NULL) {
2199                 D_ASSERT(peer_req->i.size == 0);
2200                 D_ASSERT(dp_flags & DP_FLUSH);
2201         }
2202
2203         if (dp_flags & DP_MAY_SET_IN_SYNC)
2204                 peer_req->flags |= EE_MAY_SET_IN_SYNC;
2205
2206         spin_lock(&tconn->epoch_lock);
2207         peer_req->epoch = tconn->current_epoch;
2208         atomic_inc(&peer_req->epoch->epoch_size);
2209         atomic_inc(&peer_req->epoch->active);
2210         spin_unlock(&tconn->epoch_lock);
2211
2212         rcu_read_lock();
2213         tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2214         rcu_read_unlock();
2215         if (tp) {
2216                 peer_req->flags |= EE_IN_INTERVAL_TREE;
2217                 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2218                 if (err)
2219                         goto out_interrupted;
2220                 spin_lock_irq(&mdev->tconn->req_lock);
2221                 err = handle_write_conflicts(mdev, peer_req);
2222                 if (err) {
2223                         spin_unlock_irq(&mdev->tconn->req_lock);
2224                         if (err == -ENOENT) {
2225                                 put_ldev(mdev);
2226                                 return 0;
2227                         }
2228                         goto out_interrupted;
2229                 }
2230         } else
2231                 spin_lock_irq(&mdev->tconn->req_lock);
2232         list_add(&peer_req->w.list, &mdev->active_ee);
2233         spin_unlock_irq(&mdev->tconn->req_lock);
2234
2235         if (mdev->state.conn == C_SYNC_TARGET)
2236                 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
2237
2238         if (mdev->tconn->agreed_pro_version < 100) {
2239                 rcu_read_lock();
2240                 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
2241                 case DRBD_PROT_C:
2242                         dp_flags |= DP_SEND_WRITE_ACK;
2243                         break;
2244                 case DRBD_PROT_B:
2245                         dp_flags |= DP_SEND_RECEIVE_ACK;
2246                         break;
2247                 }
2248                 rcu_read_unlock();
2249         }
2250
2251         if (dp_flags & DP_SEND_WRITE_ACK) {
2252                 peer_req->flags |= EE_SEND_WRITE_ACK;
2253                 inc_unacked(mdev);
2254                 /* corresponding dec_unacked() in e_end_block()
2255                  * respective _drbd_clear_done_ee */
2256         }
2257
2258         if (dp_flags & DP_SEND_RECEIVE_ACK) {
2259                 /* I really don't like it that the receiver thread
2260                  * sends on the msock, but anyways */
2261                 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
2262         }
2263
2264         if (mdev->state.pdsk < D_INCONSISTENT) {
2265                 /* In case we have the only disk of the cluster, */
2266                 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2267                 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2268                 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2269                 drbd_al_begin_io(mdev, &peer_req->i);
2270         }
2271
2272         err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2273         if (!err)
2274                 return 0;
2275
2276         /* don't care for the reason here */
2277         dev_err(DEV, "submit failed, triggering re-connect\n");
2278         spin_lock_irq(&mdev->tconn->req_lock);
2279         list_del(&peer_req->w.list);
2280         drbd_remove_epoch_entry_interval(mdev, peer_req);
2281         spin_unlock_irq(&mdev->tconn->req_lock);
2282         if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2283                 drbd_al_complete_io(mdev, &peer_req->i);
2284
2285 out_interrupted:
2286         drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
2287         put_ldev(mdev);
2288         drbd_free_peer_req(mdev, peer_req);
2289         return err;
2290 }
2291
2292 /* We may throttle resync, if the lower device seems to be busy,
2293  * and current sync rate is above c_min_rate.
2294  *
2295  * To decide whether or not the lower device is busy, we use a scheme similar
2296  * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2297  * (more than 64 sectors) of activity we cannot account for with our own resync
2298  * activity, it obviously is "busy".
2299  *
2300  * The current sync rate used here uses only the most recent two step marks,
2301  * to have a short time average so we can react faster.
2302  */
2303 int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
2304 {
2305         struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2306         unsigned long db, dt, dbdt;
2307         struct lc_element *tmp;
2308         int curr_events;
2309         int throttle = 0;
2310         unsigned int c_min_rate;
2311
2312         rcu_read_lock();
2313         c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2314         rcu_read_unlock();
2315
2316         /* feature disabled? */
2317         if (c_min_rate == 0)
2318                 return 0;
2319
2320         spin_lock_irq(&mdev->al_lock);
2321         tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2322         if (tmp) {
2323                 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2324                 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2325                         spin_unlock_irq(&mdev->al_lock);
2326                         return 0;
2327                 }
2328                 /* Do not slow down if app IO is already waiting for this extent */
2329         }
2330         spin_unlock_irq(&mdev->al_lock);
2331
2332         curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2333                       (int)part_stat_read(&disk->part0, sectors[1]) -
2334                         atomic_read(&mdev->rs_sect_ev);
2335
2336         if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2337                 unsigned long rs_left;
2338                 int i;
2339
2340                 mdev->rs_last_events = curr_events;
2341
2342                 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2343                  * approx. */
2344                 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2345
2346                 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2347                         rs_left = mdev->ov_left;
2348                 else
2349                         rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2350
2351                 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2352                 if (!dt)
2353                         dt++;
2354                 db = mdev->rs_mark_left[i] - rs_left;
2355                 dbdt = Bit2KB(db/dt);
2356
2357                 if (dbdt > c_min_rate)
2358                         throttle = 1;
2359         }
2360         return throttle;
2361 }
2362
2363
2364 static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
2365 {
2366         struct drbd_conf *mdev;
2367         sector_t sector;
2368         sector_t capacity;
2369         struct drbd_peer_request *peer_req;
2370         struct digest_info *di = NULL;
2371         int size, verb;
2372         unsigned int fault_type;
2373         struct p_block_req *p = pi->data;
2374
2375         mdev = vnr_to_mdev(tconn, pi->vnr);
2376         if (!mdev)
2377                 return -EIO;
2378         capacity = drbd_get_capacity(mdev->this_bdev);
2379
2380         sector = be64_to_cpu(p->sector);
2381         size   = be32_to_cpu(p->blksize);
2382
2383         if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
2384                 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2385                                 (unsigned long long)sector, size);
2386                 return -EINVAL;
2387         }
2388         if (sector + (size>>9) > capacity) {
2389                 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2390                                 (unsigned long long)sector, size);
2391                 return -EINVAL;
2392         }
2393
2394         if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2395                 verb = 1;
2396                 switch (pi->cmd) {
2397                 case P_DATA_REQUEST:
2398                         drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2399                         break;
2400                 case P_RS_DATA_REQUEST:
2401                 case P_CSUM_RS_REQUEST:
2402                 case P_OV_REQUEST:
2403                         drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2404                         break;
2405                 case P_OV_REPLY:
2406                         verb = 0;
2407                         dec_rs_pending(mdev);
2408                         drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2409                         break;
2410                 default:
2411                         BUG();
2412                 }
2413                 if (verb && __ratelimit(&drbd_ratelimit_state))
2414                         dev_err(DEV, "Can not satisfy peer's read request, "
2415                             "no local data.\n");
2416
2417                 /* drain possibly payload */
2418                 return drbd_drain_block(mdev, pi->size);
2419         }
2420
2421         /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2422          * "criss-cross" setup, that might cause write-out on some other DRBD,
2423          * which in turn might block on the other node at this very place.  */
2424         peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
2425         if (!peer_req) {
2426                 put_ldev(mdev);
2427                 return -ENOMEM;
2428         }
2429
2430         switch (pi->cmd) {
2431         case P_DATA_REQUEST:
2432                 peer_req->w.cb = w_e_end_data_req;
2433                 fault_type = DRBD_FAULT_DT_RD;
2434                 /* application IO, don't drbd_rs_begin_io */
2435                 goto submit;
2436
2437         case P_RS_DATA_REQUEST:
2438                 peer_req->w.cb = w_e_end_rsdata_req;
2439                 fault_type = DRBD_FAULT_RS_RD;
2440                 /* used in the sector offset progress display */
2441                 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2442                 break;
2443
2444         case P_OV_REPLY:
2445         case P_CSUM_RS_REQUEST:
2446                 fault_type = DRBD_FAULT_RS_RD;
2447                 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
2448                 if (!di)
2449                         goto out_free_e;
2450
2451                 di->digest_size = pi->size;
2452                 di->digest = (((char *)di)+sizeof(struct digest_info));
2453
2454                 peer_req->digest = di;
2455                 peer_req->flags |= EE_HAS_DIGEST;
2456
2457                 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
2458                         goto out_free_e;
2459
2460                 if (pi->cmd == P_CSUM_RS_REQUEST) {
2461                         D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
2462                         peer_req->w.cb = w_e_end_csum_rs_req;
2463                         /* used in the sector offset progress display */
2464                         mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2465                 } else if (pi->cmd == P_OV_REPLY) {
2466                         /* track progress, we may need to throttle */
2467                         atomic_add(size >> 9, &mdev->rs_sect_in);
2468                         peer_req->w.cb = w_e_end_ov_reply;
2469                         dec_rs_pending(mdev);
2470                         /* drbd_rs_begin_io done when we sent this request,
2471                          * but accounting still needs to be done. */
2472                         goto submit_for_resync;
2473                 }
2474                 break;
2475
2476         case P_OV_REQUEST:
2477                 if (mdev->ov_start_sector == ~(sector_t)0 &&
2478                     mdev->tconn->agreed_pro_version >= 90) {
2479                         unsigned long now = jiffies;
2480                         int i;
2481                         mdev->ov_start_sector = sector;
2482                         mdev->ov_position = sector;
2483                         mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2484                         mdev->rs_total = mdev->ov_left;
2485                         for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2486                                 mdev->rs_mark_left[i] = mdev->ov_left;
2487                                 mdev->rs_mark_time[i] = now;
2488                         }
2489                         dev_info(DEV, "Online Verify start sector: %llu\n",
2490                                         (unsigned long long)sector);
2491                 }
2492                 peer_req->w.cb = w_e_end_ov_req;
2493                 fault_type = DRBD_FAULT_RS_RD;
2494                 break;
2495
2496         default:
2497                 BUG();
2498         }
2499
2500         /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2501          * wrt the receiver, but it is not as straightforward as it may seem.
2502          * Various places in the resync start and stop logic assume resync
2503          * requests are processed in order, requeuing this on the worker thread
2504          * introduces a bunch of new code for synchronization between threads.
2505          *
2506          * Unlimited throttling before drbd_rs_begin_io may stall the resync
2507          * "forever", throttling after drbd_rs_begin_io will lock that extent
2508          * for application writes for the same time.  For now, just throttle
2509          * here, where the rest of the code expects the receiver to sleep for
2510          * a while, anyways.
2511          */
2512
2513         /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2514          * this defers syncer requests for some time, before letting at least
2515          * on request through.  The resync controller on the receiving side
2516          * will adapt to the incoming rate accordingly.
2517          *
2518          * We cannot throttle here if remote is Primary/SyncTarget:
2519          * we would also throttle its application reads.
2520          * In that case, throttling is done on the SyncTarget only.
2521          */
2522         if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2523                 schedule_timeout_uninterruptible(HZ/10);
2524         if (drbd_rs_begin_io(mdev, sector))
2525                 goto out_free_e;
2526
2527 submit_for_resync:
2528         atomic_add(size >> 9, &mdev->rs_sect_ev);
2529
2530 submit:
2531         inc_unacked(mdev);
2532         spin_lock_irq(&mdev->tconn->req_lock);
2533         list_add_tail(&peer_req->w.list, &mdev->read_ee);
2534         spin_unlock_irq(&mdev->tconn->req_lock);
2535
2536         if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
2537                 return 0;
2538
2539         /* don't care for the reason here */
2540         dev_err(DEV, "submit failed, triggering re-connect\n");
2541         spin_lock_irq(&mdev->tconn->req_lock);
2542         list_del(&peer_req->w.list);
2543         spin_unlock_irq(&mdev->tconn->req_lock);
2544         /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2545
2546 out_free_e:
2547         put_ldev(mdev);
2548         drbd_free_peer_req(mdev, peer_req);
2549         return -EIO;
2550 }
2551
2552 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2553 {
2554         int self, peer, rv = -100;
2555         unsigned long ch_self, ch_peer;
2556         enum drbd_after_sb_p after_sb_0p;
2557
2558         self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2559         peer = mdev->p_uuid[UI_BITMAP] & 1;
2560
2561         ch_peer = mdev->p_uuid[UI_SIZE];
2562         ch_self = mdev->comm_bm_set;
2563
2564         rcu_read_lock();
2565         after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2566         rcu_read_unlock();
2567         switch (after_sb_0p) {
2568         case ASB_CONSENSUS:
2569         case ASB_DISCARD_SECONDARY:
2570         case ASB_CALL_HELPER:
2571         case ASB_VIOLENTLY:
2572                 dev_err(DEV, "Configuration error.\n");
2573                 break;
2574         case ASB_DISCONNECT:
2575                 break;
2576         case ASB_DISCARD_YOUNGER_PRI:
2577                 if (self == 0 && peer == 1) {
2578                         rv = -1;
2579                         break;
2580                 }
2581                 if (self == 1 && peer == 0) {
2582                         rv =  1;
2583                         break;
2584                 }
2585                 /* Else fall through to one of the other strategies... */
2586         case ASB_DISCARD_OLDER_PRI:
2587                 if (self == 0 && peer == 1) {
2588                         rv = 1;
2589                         break;
2590                 }
2591                 if (self == 1 && peer == 0) {
2592                         rv = -1;
2593                         break;
2594                 }
2595                 /* Else fall through to one of the other strategies... */
2596                 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2597                      "Using discard-least-changes instead\n");
2598         case ASB_DISCARD_ZERO_CHG:
2599                 if (ch_peer == 0 && ch_self == 0) {
2600                         rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
2601                                 ? -1 : 1;
2602                         break;
2603                 } else {
2604                         if (ch_peer == 0) { rv =  1; break; }
2605                         if (ch_self == 0) { rv = -1; break; }
2606                 }
2607                 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
2608                         break;
2609         case ASB_DISCARD_LEAST_CHG:
2610                 if      (ch_self < ch_peer)
2611                         rv = -1;
2612                 else if (ch_self > ch_peer)
2613                         rv =  1;
2614                 else /* ( ch_self == ch_peer ) */
2615                      /* Well, then use something else. */
2616                         rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
2617                                 ? -1 : 1;
2618                 break;
2619         case ASB_DISCARD_LOCAL:
2620                 rv = -1;
2621                 break;
2622         case ASB_DISCARD_REMOTE:
2623                 rv =  1;
2624         }
2625
2626         return rv;
2627 }
2628
2629 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2630 {
2631         int hg, rv = -100;
2632         enum drbd_after_sb_p after_sb_1p;
2633
2634         rcu_read_lock();
2635         after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2636         rcu_read_unlock();
2637         switch (after_sb_1p) {
2638         case ASB_DISCARD_YOUNGER_PRI:
2639         case ASB_DISCARD_OLDER_PRI:
2640         case ASB_DISCARD_LEAST_CHG:
2641         case ASB_DISCARD_LOCAL:
2642         case ASB_DISCARD_REMOTE:
2643         case ASB_DISCARD_ZERO_CHG:
2644                 dev_err(DEV, "Configuration error.\n");
2645                 break;
2646         case ASB_DISCONNECT:
2647                 break;
2648         case ASB_CONSENSUS:
2649                 hg = drbd_asb_recover_0p(mdev);
2650                 if (hg == -1 && mdev->state.role == R_SECONDARY)
2651                         rv = hg;
2652                 if (hg == 1  && mdev->state.role == R_PRIMARY)
2653                         rv = hg;
2654                 break;
2655         case ASB_VIOLENTLY:
2656                 rv = drbd_asb_recover_0p(mdev);
2657                 break;
2658         case ASB_DISCARD_SECONDARY:
2659                 return mdev->state.role == R_PRIMARY ? 1 : -1;
2660         case ASB_CALL_HELPER:
2661                 hg = drbd_asb_recover_0p(mdev);
2662                 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2663                         enum drbd_state_rv rv2;
2664
2665                         drbd_set_role(mdev, R_SECONDARY, 0);
2666                          /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2667                           * we might be here in C_WF_REPORT_PARAMS which is transient.
2668                           * we do not need to wait for the after state change work either. */
2669                         rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2670                         if (rv2 != SS_SUCCESS) {
2671                                 drbd_khelper(mdev, "pri-lost-after-sb");
2672                         } else {
2673                                 dev_warn(DEV, "Successfully gave up primary role.\n");
2674                                 rv = hg;
2675                         }
2676                 } else
2677                         rv = hg;
2678         }
2679
2680         return rv;
2681 }
2682
2683 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2684 {
2685         int hg, rv = -100;
2686         enum drbd_after_sb_p after_sb_2p;
2687
2688         rcu_read_lock();
2689         after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2690         rcu_read_unlock();
2691         switch (after_sb_2p) {
2692         case ASB_DISCARD_YOUNGER_PRI:
2693         case ASB_DISCARD_OLDER_PRI:
2694         case ASB_DISCARD_LEAST_CHG:
2695         case ASB_DISCARD_LOCAL:
2696         case ASB_DISCARD_REMOTE:
2697         case ASB_CONSENSUS:
2698         case ASB_DISCARD_SECONDARY:
2699         case ASB_DISCARD_ZERO_CHG:
2700                 dev_err(DEV, "Configuration error.\n");
2701                 break;
2702         case ASB_VIOLENTLY:
2703                 rv = drbd_asb_recover_0p(mdev);
2704                 break;
2705         case ASB_DISCONNECT:
2706                 break;
2707         case ASB_CALL_HELPER:
2708                 hg = drbd_asb_recover_0p(mdev);
2709                 if (hg == -1) {
2710                         enum drbd_state_rv rv2;
2711
2712                          /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2713                           * we might be here in C_WF_REPORT_PARAMS which is transient.
2714                           * we do not need to wait for the after state change work either. */
2715                         rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2716                         if (rv2 != SS_SUCCESS) {
2717                                 drbd_khelper(mdev, "pri-lost-after-sb");
2718                         } else {
2719                                 dev_warn(DEV, "Successfully gave up primary role.\n");
2720                                 rv = hg;
2721                         }
2722                 } else
2723                         rv = hg;
2724         }
2725
2726         return rv;
2727 }
2728
2729 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2730                            u64 bits, u64 flags)
2731 {
2732         if (!uuid) {
2733                 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2734                 return;
2735         }
2736         dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2737              text,
2738              (unsigned long long)uuid[UI_CURRENT],
2739              (unsigned long long)uuid[UI_BITMAP],
2740              (unsigned long long)uuid[UI_HISTORY_START],
2741              (unsigned long long)uuid[UI_HISTORY_END],
2742              (unsigned long long)bits,
2743              (unsigned long long)flags);
2744 }
2745
2746 /*
2747   100   after split brain try auto recover
2748     2   C_SYNC_SOURCE set BitMap
2749     1   C_SYNC_SOURCE use BitMap
2750     0   no Sync
2751    -1   C_SYNC_TARGET use BitMap
2752    -2   C_SYNC_TARGET set BitMap
2753  -100   after split brain, disconnect
2754 -1000   unrelated data
2755 -1091   requires proto 91
2756 -1096   requires proto 96
2757  */
2758 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2759 {
2760         u64 self, peer;
2761         int i, j;
2762
2763         self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2764         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2765
2766         *rule_nr = 10;
2767         if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2768                 return 0;
2769
2770         *rule_nr = 20;
2771         if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2772              peer != UUID_JUST_CREATED)
2773                 return -2;
2774
2775         *rule_nr = 30;
2776         if (self != UUID_JUST_CREATED &&
2777             (peer == UUID_JUST_CREATED || peer == (u64)0))
2778                 return 2;
2779
2780         if (self == peer) {
2781                 int rct, dc; /* roles at crash time */
2782
2783                 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2784
2785                         if (mdev->tconn->agreed_pro_version < 91)
2786                                 return -1091;
2787
2788                         if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2789                             (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2790                                 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2791                                 drbd_uuid_move_history(mdev);
2792                                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2793                                 mdev->ldev->md.uuid[UI_BITMAP] = 0;
2794
2795                                 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2796                                                mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2797                                 *rule_nr = 34;
2798                         } else {
2799                                 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2800                                 *rule_nr = 36;
2801                         }
2802
2803                         return 1;
2804                 }
2805
2806                 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2807
2808                         if (mdev->tconn->agreed_pro_version < 91)
2809                                 return -1091;
2810
2811                         if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2812                             (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2813                                 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2814
2815                                 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2816                                 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2817                                 mdev->p_uuid[UI_BITMAP] = 0UL;
2818
2819                                 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2820                                 *rule_nr = 35;
2821                         } else {
2822                                 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2823                                 *rule_nr = 37;
2824                         }
2825
2826                         return -1;
2827                 }
2828
2829                 /* Common power [off|failure] */
2830                 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2831                         (mdev->p_uuid[UI_FLAGS] & 2);
2832                 /* lowest bit is set when we were primary,
2833                  * next bit (weight 2) is set when peer was primary */
2834                 *rule_nr = 40;
2835
2836                 switch (rct) {
2837                 case 0: /* !self_pri && !peer_pri */ return 0;
2838                 case 1: /*  self_pri && !peer_pri */ return 1;
2839                 case 2: /* !self_pri &&  peer_pri */ return -1;
2840                 case 3: /*  self_pri &&  peer_pri */
2841                         dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
2842                         return dc ? -1 : 1;
2843                 }
2844         }
2845
2846         *rule_nr = 50;
2847         peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2848         if (self == peer)
2849                 return -1;
2850
2851         *rule_nr = 51;
2852         peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2853         if (self == peer) {
2854                 if (mdev->tconn->agreed_pro_version < 96 ?
2855                     (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2856                     (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2857                     peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
2858                         /* The last P_SYNC_UUID did not get though. Undo the last start of
2859                            resync as sync source modifications of the peer's UUIDs. */
2860
2861                         if (mdev->tconn->agreed_pro_version < 91)
2862                                 return -1091;
2863
2864                         mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2865                         mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2866
2867                         dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
2868                         drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2869
2870                         return -1;
2871                 }
2872         }
2873
2874         *rule_nr = 60;
2875         self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2876         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2877                 peer = mdev->p_uuid[i] & ~((u64)1);
2878                 if (self == peer)
2879                         return -2;
2880         }
2881
2882         *rule_nr = 70;
2883         self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2884         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2885         if (self == peer)
2886                 return 1;
2887
2888         *rule_nr = 71;
2889         self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2890         if (self == peer) {
2891                 if (mdev->tconn->agreed_pro_version < 96 ?
2892                     (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2893                     (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2894                     self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2895                         /* The last P_SYNC_UUID did not get though. Undo the last start of
2896                            resync as sync source modifications of our UUIDs. */
2897
2898                         if (mdev->tconn->agreed_pro_version < 91)
2899                                 return -1091;
2900
2901                         __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2902                         __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2903
2904                         dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
2905                         drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2906                                        mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2907
2908                         return 1;
2909                 }
2910         }
2911
2912
2913         *rule_nr = 80;
2914         peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2915         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2916                 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2917                 if (self == peer)
2918                         return 2;
2919         }
2920
2921         *rule_nr = 90;
2922         self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2923         peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2924         if (self == peer && self != ((u64)0))
2925                 return 100;
2926
2927         *rule_nr = 100;
2928         for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2929                 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2930                 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2931                         peer = mdev->p_uuid[j] & ~((u64)1);
2932                         if (self == peer)
2933                                 return -100;
2934                 }
2935         }
2936
2937         return -1000;
2938 }
2939
2940 /* drbd_sync_handshake() returns the new conn state on success, or
2941    CONN_MASK (-1) on failure.
2942  */
2943 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2944                                            enum drbd_disk_state peer_disk) __must_hold(local)
2945 {
2946         enum drbd_conns rv = C_MASK;
2947         enum drbd_disk_state mydisk;
2948         struct net_conf *nc;
2949         int hg, rule_nr, rr_conflict, tentative;
2950
2951         mydisk = mdev->state.disk;
2952         if (mydisk == D_NEGOTIATING)
2953                 mydisk = mdev->new_state_tmp.disk;
2954
2955         dev_info(DEV, "drbd_sync_handshake:\n");
2956
2957         spin_lock_irq(&mdev->ldev->md.uuid_lock);
2958         drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2959         drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2960                        mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2961
2962         hg = drbd_uuid_compare(mdev, &rule_nr);
2963         spin_unlock_irq(&mdev->ldev->md.uuid_lock);
2964
2965         dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2966
2967         if (hg == -1000) {
2968                 dev_alert(DEV, "Unrelated data, aborting!\n");
2969                 return C_MASK;
2970         }
2971         if (hg < -1000) {
2972                 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2973                 return C_MASK;
2974         }
2975
2976         if    ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2977             (peer_disk == D_INCONSISTENT && mydisk    > D_INCONSISTENT)) {
2978                 int f = (hg == -100) || abs(hg) == 2;
2979                 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2980                 if (f)
2981                         hg = hg*2;
2982                 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2983                      hg > 0 ? "source" : "target");
2984         }
2985
2986         if (abs(hg) == 100)
2987                 drbd_khelper(mdev, "initial-split-brain");
2988
2989         rcu_read_lock();
2990         nc = rcu_dereference(mdev->tconn->net_conf);
2991
2992         if (hg == 100 || (hg == -100 && nc->always_asbp)) {
2993                 int pcount = (mdev->state.role == R_PRIMARY)
2994                            + (peer_role == R_PRIMARY);
2995                 int forced = (hg == -100);
2996
2997                 switch (pcount) {
2998                 case 0:
2999                         hg = drbd_asb_recover_0p(mdev);
3000                         break;
3001                 case 1:
3002                         hg = drbd_asb_recover_1p(mdev);
3003                         break;
3004                 case 2:
3005                         hg = drbd_asb_recover_2p(mdev);
3006                         break;
3007                 }
3008                 if (abs(hg) < 100) {
3009                         dev_warn(DEV, "Split-Brain detected, %d primaries, "
3010                              "automatically solved. Sync from %s node\n",
3011                              pcount, (hg < 0) ? "peer" : "this");
3012                         if (forced) {
3013                                 dev_warn(DEV, "Doing a full sync, since"
3014                                      " UUIDs where ambiguous.\n");
3015                                 hg = hg*2;
3016                         }
3017                 }
3018         }
3019
3020         if (hg == -100) {
3021                 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
3022                         hg = -1;
3023                 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
3024                         hg = 1;
3025
3026                 if (abs(hg) < 100)
3027                         dev_warn(DEV, "Split-Brain detected, manually solved. "
3028                              "Sync from %s node\n",
3029                              (hg < 0) ? "peer" : "this");
3030         }
3031         rr_conflict = nc->rr_conflict;
3032         tentative = nc->tentative;
3033         rcu_read_unlock();
3034
3035         if (hg == -100) {
3036                 /* FIXME this log message is not correct if we end up here
3037                  * after an attempted attach on a diskless node.
3038                  * We just refuse to attach -- well, we drop the "connection"
3039                  * to that disk, in a way... */
3040                 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
3041                 drbd_khelper(mdev, "split-brain");
3042                 return C_MASK;
3043         }
3044
3045         if (hg > 0 && mydisk <= D_INCONSISTENT) {
3046                 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3047                 return C_MASK;
3048         }
3049
3050         if (hg < 0 && /* by intention we do not use mydisk here. */
3051             mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
3052                 switch (rr_conflict) {
3053                 case ASB_CALL_HELPER:
3054                         drbd_khelper(mdev, "pri-lost");
3055                         /* fall through */
3056                 case ASB_DISCONNECT:
3057                         dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3058                         return C_MASK;
3059                 case ASB_VIOLENTLY:
3060                         dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3061                              "assumption\n");
3062                 }
3063         }
3064
3065         if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
3066                 if (hg == 0)
3067                         dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3068                 else
3069                         dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3070                                  drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3071                                  abs(hg) >= 2 ? "full" : "bit-map based");
3072                 return C_MASK;
3073         }
3074
3075         if (abs(hg) >= 2) {
3076                 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
3077                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3078                                         BM_LOCKED_SET_ALLOWED))
3079                         return C_MASK;
3080         }
3081
3082         if (hg > 0) { /* become sync source. */
3083                 rv = C_WF_BITMAP_S;
3084         } else if (hg < 0) { /* become sync target */
3085                 rv = C_WF_BITMAP_T;
3086         } else {
3087                 rv = C_CONNECTED;
3088                 if (drbd_bm_total_weight(mdev)) {
3089                         dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3090                              drbd_bm_total_weight(mdev));
3091                 }
3092         }
3093
3094         return rv;
3095 }
3096
3097 static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
3098 {
3099         /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
3100         if (peer == ASB_DISCARD_REMOTE)
3101                 return ASB_DISCARD_LOCAL;
3102
3103         /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
3104         if (peer == ASB_DISCARD_LOCAL)
3105                 return ASB_DISCARD_REMOTE;
3106
3107         /* everything else is valid if they are equal on both sides. */
3108         return peer;
3109 }
3110
3111 static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
3112 {
3113         struct p_protocol *p = pi->data;
3114         enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3115         int p_proto, p_discard_my_data, p_two_primaries, cf;
3116         struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3117         char integrity_alg[SHARED_SECRET_MAX] = "";
3118         struct crypto_hash *peer_integrity_tfm = NULL;
3119         void *int_dig_in = NULL, *int_dig_vv = NULL;
3120
3121         p_proto         = be32_to_cpu(p->protocol);
3122         p_after_sb_0p   = be32_to_cpu(p->after_sb_0p);
3123         p_after_sb_1p   = be32_to_cpu(p->after_sb_1p);
3124         p_after_sb_2p   = be32_to_cpu(p->after_sb_2p);
3125         p_two_primaries = be32_to_cpu(p->two_primaries);
3126         cf              = be32_to_cpu(p->conn_flags);
3127         p_discard_my_data = cf & CF_DISCARD_MY_DATA;
3128
3129         if (tconn->agreed_pro_version >= 87) {
3130                 int err;
3131
3132                 if (pi->size > sizeof(integrity_alg))
3133                         return -EIO;
3134                 err = drbd_recv_all(tconn, integrity_alg, pi->size);
3135                 if (err)
3136                         return err;
3137                 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
3138         }
3139
3140         if (pi->cmd != P_PROTOCOL_UPDATE) {
3141                 clear_bit(CONN_DRY_RUN, &tconn->flags);
3142
3143                 if (cf & CF_DRY_RUN)
3144                         set_bit(CONN_DRY_RUN, &tconn->flags);
3145
3146                 rcu_read_lock();
3147                 nc = rcu_dereference(tconn->net_conf);
3148
3149                 if (p_proto != nc->wire_protocol) {
3150                         conn_err(tconn, "incompatible %s settings\n", "protocol");
3151                         goto disconnect_rcu_unlock;
3152                 }
3153
3154                 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
3155                         conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
3156                         goto disconnect_rcu_unlock;
3157                 }
3158
3159                 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
3160                         conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
3161                         goto disconnect_rcu_unlock;
3162                 }
3163
3164                 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
3165                         conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
3166                         goto disconnect_rcu_unlock;
3167                 }
3168
3169                 if (p_discard_my_data && nc->discard_my_data) {
3170                         conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
3171                         goto disconnect_rcu_unlock;
3172                 }
3173
3174                 if (p_two_primaries != nc->two_primaries) {
3175                         conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
3176                         goto disconnect_rcu_unlock;
3177                 }
3178
3179                 if (strcmp(integrity_alg, nc->integrity_alg)) {
3180                         conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
3181                         goto disconnect_rcu_unlock;
3182                 }
3183
3184                 rcu_read_unlock();
3185         }
3186
3187         if (integrity_alg[0]) {
3188                 int hash_size;
3189
3190                 /*
3191                  * We can only change the peer data integrity algorithm
3192                  * here.  Changing our own data integrity algorithm
3193                  * requires that we send a P_PROTOCOL_UPDATE packet at
3194                  * the same time; otherwise, the peer has no way to
3195                  * tell between which packets the algorithm should
3196                  * change.
3197                  */
3198
3199                 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3200                 if (!peer_integrity_tfm) {
3201                         conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3202                                  integrity_alg);
3203                         goto disconnect;
3204                 }
3205
3206                 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3207                 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3208                 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3209                 if (!(int_dig_in && int_dig_vv)) {
3210                         conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
3211                         goto disconnect;
3212                 }
3213         }
3214
3215         new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3216         if (!new_net_conf) {
3217                 conn_err(tconn, "Allocation of new net_conf failed\n");
3218                 goto disconnect;
3219         }
3220
3221         mutex_lock(&tconn->data.mutex);
3222         mutex_lock(&tconn->conf_update);
3223         old_net_conf = tconn->net_conf;
3224         *new_net_conf = *old_net_conf;
3225
3226         new_net_conf->wire_protocol = p_proto;
3227         new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3228         new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3229         new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3230         new_net_conf->two_primaries = p_two_primaries;
3231
3232         rcu_assign_pointer(tconn->net_conf, new_net_conf);
3233         mutex_unlock(&tconn->conf_update);
3234         mutex_unlock(&tconn->data.mutex);
3235
3236         crypto_free_hash(tconn->peer_integrity_tfm);
3237         kfree(tconn->int_dig_in);
3238         kfree(tconn->int_dig_vv);
3239         tconn->peer_integrity_tfm = peer_integrity_tfm;
3240         tconn->int_dig_in = int_dig_in;
3241         tconn->int_dig_vv = int_dig_vv;
3242
3243         if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3244                 conn_info(tconn, "peer data-integrity-alg: %s\n",
3245                           integrity_alg[0] ? integrity_alg : "(none)");
3246
3247         synchronize_rcu();
3248         kfree(old_net_conf);
3249         return 0;
3250
3251 disconnect_rcu_unlock:
3252         rcu_read_unlock();
3253 disconnect:
3254         crypto_free_hash(peer_integrity_tfm);
3255         kfree(int_dig_in);
3256         kfree(int_dig_vv);
3257         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3258         return -EIO;
3259 }
3260
3261 /* helper function
3262  * input: alg name, feature name
3263  * return: NULL (alg name was "")
3264  *         ERR_PTR(error) if something goes wrong
3265  *         or the crypto hash ptr, if it worked out ok. */
3266 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3267                 const char *alg, const char *name)
3268 {
3269         struct crypto_hash *tfm;
3270
3271         if (!alg[0])
3272                 return NULL;
3273
3274         tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3275         if (IS_ERR(tfm)) {
3276                 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3277                         alg, name, PTR_ERR(tfm));
3278                 return tfm;
3279         }
3280         return tfm;
3281 }
3282
3283 static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
3284 {
3285         void *buffer = tconn->data.rbuf;
3286         int size = pi->size;
3287
3288         while (size) {
3289                 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3290                 s = drbd_recv(tconn, buffer, s);
3291                 if (s <= 0) {
3292                         if (s < 0)
3293                                 return s;
3294                         break;
3295                 }
3296                 size -= s;
3297         }
3298         if (size)
3299                 return -EIO;
3300         return 0;
3301 }
3302
3303 /*
3304  * config_unknown_volume  -  device configuration command for unknown volume
3305  *
3306  * When a device is added to an existing connection, the node on which the
3307  * device is added first will send configuration commands to its peer but the
3308  * peer will not know about the device yet.  It will warn and ignore these
3309  * commands.  Once the device is added on the second node, the second node will
3310  * send the same device configuration commands, but in the other direction.
3311  *
3312  * (We can also end up here if drbd is misconfigured.)
3313  */
3314 static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3315 {
3316         conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3317                   cmdname(pi->cmd), pi->vnr);
3318         return ignore_remaining_packet(tconn, pi);
3319 }
3320
3321 static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3322 {
3323         struct drbd_conf *mdev;
3324         struct p_rs_param_95 *p;
3325         unsigned int header_size, data_size, exp_max_sz;
3326         struct crypto_hash *verify_tfm = NULL;
3327         struct crypto_hash *csums_tfm = NULL;
3328         struct net_conf *old_net_conf, *new_net_conf = NULL;
3329         struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
3330         const int apv = tconn->agreed_pro_version;
3331         struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
3332         int fifo_size = 0;
3333         int err;
3334
3335         mdev = vnr_to_mdev(tconn, pi->vnr);
3336         if (!mdev)
3337                 return config_unknown_volume(tconn, pi);
3338
3339         exp_max_sz  = apv <= 87 ? sizeof(struct p_rs_param)
3340                     : apv == 88 ? sizeof(struct p_rs_param)
3341                                         + SHARED_SECRET_MAX
3342                     : apv <= 94 ? sizeof(struct p_rs_param_89)
3343                     : /* apv >= 95 */ sizeof(struct p_rs_param_95);
3344
3345         if (pi->size > exp_max_sz) {
3346                 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3347                     pi->size, exp_max_sz);
3348                 return -EIO;
3349         }
3350
3351         if (apv <= 88) {
3352                 header_size = sizeof(struct p_rs_param);
3353                 data_size = pi->size - header_size;
3354         } else if (apv <= 94) {
3355                 header_size = sizeof(struct p_rs_param_89);
3356                 data_size = pi->size - header_size;
3357                 D_ASSERT(data_size == 0);
3358         } else {
3359                 header_size = sizeof(struct p_rs_param_95);
3360                 data_size = pi->size - header_size;
3361                 D_ASSERT(data_size == 0);
3362         }
3363
3364         /* initialize verify_alg and csums_alg */
3365         p = pi->data;
3366         memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3367
3368         err = drbd_recv_all(mdev->tconn, p, header_size);
3369         if (err)
3370                 return err;
3371
3372         mutex_lock(&mdev->tconn->conf_update);
3373         old_net_conf = mdev->tconn->net_conf;
3374         if (get_ldev(mdev)) {
3375                 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3376                 if (!new_disk_conf) {
3377                         put_ldev(mdev);
3378                         mutex_unlock(&mdev->tconn->conf_update);
3379                         dev_err(DEV, "Allocation of new disk_conf failed\n");
3380                         return -ENOMEM;
3381                 }
3382
3383                 old_disk_conf = mdev->ldev->disk_conf;
3384                 *new_disk_conf = *old_disk_conf;
3385
3386                 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
3387         }
3388
3389         if (apv >= 88) {
3390                 if (apv == 88) {
3391                         if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3392                                 dev_err(DEV, "verify-alg of wrong size, "
3393                                         "peer wants %u, accepting only up to %u byte\n",
3394                                         data_size, SHARED_SECRET_MAX);
3395                                 err = -EIO;
3396                                 goto reconnect;
3397                         }
3398
3399                         err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
3400                         if (err)
3401                                 goto reconnect;
3402                         /* we expect NUL terminated string */
3403                         /* but just in case someone tries to be evil */
3404                         D_ASSERT(p->verify_alg[data_size-1] == 0);
3405                         p->verify_alg[data_size-1] = 0;
3406
3407                 } else /* apv >= 89 */ {
3408                         /* we still expect NUL terminated strings */
3409                         /* but just in case someone tries to be evil */
3410                         D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3411                         D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3412                         p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3413                         p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3414                 }
3415
3416                 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
3417                         if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3418                                 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3419                                     old_net_conf->verify_alg, p->verify_alg);
3420                                 goto disconnect;
3421                         }
3422                         verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3423                                         p->verify_alg, "verify-alg");
3424                         if (IS_ERR(verify_tfm)) {
3425                                 verify_tfm = NULL;
3426                                 goto disconnect;
3427                         }
3428                 }
3429
3430                 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
3431                         if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3432                                 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3433                                     old_net_conf->csums_alg, p->csums_alg);
3434                                 goto disconnect;
3435                         }
3436                         csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3437                                         p->csums_alg, "csums-alg");
3438                         if (IS_ERR(csums_tfm)) {
3439                                 csums_tfm = NULL;
3440                                 goto disconnect;
3441                         }
3442                 }
3443
3444                 if (apv > 94 && new_disk_conf) {
3445                         new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3446                         new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3447                         new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3448                         new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
3449
3450                         fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
3451                         if (fifo_size != mdev->rs_plan_s->size) {
3452                                 new_plan = fifo_alloc(fifo_size);
3453                                 if (!new_plan) {
3454                                         dev_err(DEV, "kmalloc of fifo_buffer failed");
3455                                         put_ldev(mdev);
3456                                         goto disconnect;
3457                                 }
3458                         }
3459                 }
3460
3461                 if (verify_tfm || csums_tfm) {
3462                         new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3463                         if (!new_net_conf) {
3464                                 dev_err(DEV, "Allocation of new net_conf failed\n");
3465                                 goto disconnect;
3466                         }
3467
3468                         *new_net_conf = *old_net_conf;
3469
3470                         if (verify_tfm) {
3471                                 strcpy(new_net_conf->verify_alg, p->verify_alg);
3472                                 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3473                                 crypto_free_hash(mdev->tconn->verify_tfm);
3474                                 mdev->tconn->verify_tfm = verify_tfm;
3475                                 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3476                         }
3477                         if (csums_tfm) {
3478                                 strcpy(new_net_conf->csums_alg, p->csums_alg);
3479                                 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3480                                 crypto_free_hash(mdev->tconn->csums_tfm);
3481                                 mdev->tconn->csums_tfm = csums_tfm;
3482                                 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3483                         }
3484                         rcu_assign_pointer(tconn->net_conf, new_net_conf);
3485                 }
3486         }
3487
3488         if (new_disk_conf) {
3489                 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3490                 put_ldev(mdev);
3491         }
3492
3493         if (new_plan) {
3494                 old_plan = mdev->rs_plan_s;
3495                 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
3496         }
3497
3498         mutex_unlock(&mdev->tconn->conf_update);
3499         synchronize_rcu();
3500         if (new_net_conf)
3501                 kfree(old_net_conf);
3502         kfree(old_disk_conf);
3503         kfree(old_plan);
3504
3505         return 0;
3506
3507 reconnect:
3508         if (new_disk_conf) {
3509                 put_ldev(mdev);
3510                 kfree(new_disk_conf);
3511         }
3512         mutex_unlock(&mdev->tconn->conf_update);
3513         return -EIO;
3514
3515 disconnect:
3516         kfree(new_plan);
3517         if (new_disk_conf) {
3518                 put_ldev(mdev);
3519                 kfree(new_disk_conf);
3520         }
3521         mutex_unlock(&mdev->tconn->conf_update);
3522         /* just for completeness: actually not needed,
3523          * as this is not reached if csums_tfm was ok. */
3524         crypto_free_hash(csums_tfm);
3525         /* but free the verify_tfm again, if csums_tfm did not work out */
3526         crypto_free_hash(verify_tfm);
3527         conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3528         return -EIO;
3529 }
3530
3531 /* warn if the arguments differ by more than 12.5% */
3532 static void warn_if_differ_considerably(struct drbd_conf *mdev,
3533         const char *s, sector_t a, sector_t b)
3534 {
3535         sector_t d;
3536         if (a == 0 || b == 0)
3537                 return;
3538         d = (a > b) ? (a - b) : (b - a);
3539         if (d > (a>>3) || d > (b>>3))
3540                 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3541                      (unsigned long long)a, (unsigned long long)b);
3542 }
3543
3544 static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3545 {
3546         struct drbd_conf *mdev;
3547         struct p_sizes *p = pi->data;
3548         enum determine_dev_size dd = unchanged;
3549         sector_t p_size, p_usize, my_usize;
3550         int ldsc = 0; /* local disk size changed */
3551         enum dds_flags ddsf;
3552
3553         mdev = vnr_to_mdev(tconn, pi->vnr);
3554         if (!mdev)
3555                 return config_unknown_volume(tconn, pi);
3556
3557         p_size = be64_to_cpu(p->d_size);
3558         p_usize = be64_to_cpu(p->u_size);
3559
3560         /* just store the peer's disk size for now.
3561          * we still need to figure out whether we accept that. */
3562         mdev->p_size = p_size;
3563
3564         if (get_ldev(mdev)) {
3565                 rcu_read_lock();
3566                 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3567                 rcu_read_unlock();
3568
3569                 warn_if_differ_considerably(mdev, "lower level device sizes",
3570                            p_size, drbd_get_max_capacity(mdev->ldev));
3571                 warn_if_differ_considerably(mdev, "user requested size",
3572                                             p_usize, my_usize);
3573
3574                 /* if this is the first connect, or an otherwise expected
3575                  * param exchange, choose the minimum */
3576                 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3577                         p_usize = min_not_zero(my_usize, p_usize);
3578
3579                 /* Never shrink a device with usable data during connect.
3580                    But allow online shrinking if we are connected. */
3581                 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
3582                     drbd_get_capacity(mdev->this_bdev) &&
3583                     mdev->state.disk >= D_OUTDATED &&
3584                     mdev->state.conn < C_CONNECTED) {
3585                         dev_err(DEV, "The peer's disk size is too small!\n");
3586                         conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3587                         put_ldev(mdev);
3588                         return -EIO;
3589                 }
3590
3591                 if (my_usize != p_usize) {
3592                         struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3593
3594                         new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3595                         if (!new_disk_conf) {
3596                                 dev_err(DEV, "Allocation of new disk_conf failed\n");
3597                                 put_ldev(mdev);
3598                                 return -ENOMEM;
3599                         }
3600
3601                         mutex_lock(&mdev->tconn->conf_update);
3602                         old_disk_conf = mdev->ldev->disk_conf;
3603                         *new_disk_conf = *old_disk_conf;
3604                         new_disk_conf->disk_size = p_usize;
3605
3606                         rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3607                         mutex_unlock(&mdev->tconn->conf_update);
3608                         synchronize_rcu();
3609                         kfree(old_disk_conf);
3610
3611                         dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3612                                  (unsigned long)my_usize);
3613                 }
3614
3615                 put_ldev(mdev);
3616         }
3617
3618         ddsf = be16_to_cpu(p->dds_flags);
3619         if (get_ldev(mdev)) {
3620                 dd = drbd_determine_dev_size(mdev, ddsf);
3621                 put_ldev(mdev);
3622                 if (dd == dev_size_error)
3623                         return -EIO;
3624                 drbd_md_sync(mdev);
3625         } else {
3626                 /* I am diskless, need to accept the peer's size. */
3627                 drbd_set_my_capacity(mdev, p_size);
3628         }
3629
3630         mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3631         drbd_reconsider_max_bio_size(mdev);
3632
3633         if (get_ldev(mdev)) {
3634                 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3635                         mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3636                         ldsc = 1;
3637                 }
3638
3639                 put_ldev(mdev);
3640         }
3641
3642         if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3643                 if (be64_to_cpu(p->c_size) !=
3644                     drbd_get_capacity(mdev->this_bdev) || ldsc) {
3645                         /* we have different sizes, probably peer
3646                          * needs to know my new size... */
3647                         drbd_send_sizes(mdev, 0, ddsf);
3648                 }
3649                 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3650                     (dd == grew && mdev->state.conn == C_CONNECTED)) {
3651                         if (mdev->state.pdsk >= D_INCONSISTENT &&
3652                             mdev->state.disk >= D_INCONSISTENT) {
3653                                 if (ddsf & DDSF_NO_RESYNC)
3654                                         dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3655                                 else
3656                                         resync_after_online_grow(mdev);
3657                         } else
3658                                 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3659                 }
3660         }
3661
3662         return 0;
3663 }
3664
3665 static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
3666 {
3667         struct drbd_conf *mdev;
3668         struct p_uuids *p = pi->data;
3669         u64 *p_uuid;
3670         int i, updated_uuids = 0;
3671
3672         mdev = vnr_to_mdev(tconn, pi->vnr);
3673         if (!mdev)
3674                 return config_unknown_volume(tconn, pi);
3675
3676         p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3677         if (!p_uuid) {
3678                 dev_err(DEV, "kmalloc of p_uuid failed\n");
3679                 return false;
3680         }
3681
3682         for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3683                 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3684
3685         kfree(mdev->p_uuid);
3686         mdev->p_uuid = p_uuid;
3687
3688         if (mdev->state.conn < C_CONNECTED &&
3689             mdev->state.disk < D_INCONSISTENT &&
3690             mdev->state.role == R_PRIMARY &&
3691             (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3692                 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3693                     (unsigned long long)mdev->ed_uuid);
3694                 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3695                 return -EIO;
3696         }
3697
3698         if (get_ldev(mdev)) {
3699                 int skip_initial_sync =
3700                         mdev->state.conn == C_CONNECTED &&
3701                         mdev->tconn->agreed_pro_version >= 90 &&
3702                         mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3703                         (p_uuid[UI_FLAGS] & 8);
3704                 if (skip_initial_sync) {
3705                         dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3706                         drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3707                                         "clear_n_write from receive_uuids",
3708                                         BM_LOCKED_TEST_ALLOWED);
3709                         _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3710                         _drbd_uuid_set(mdev, UI_BITMAP, 0);
3711                         _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3712                                         CS_VERBOSE, NULL);
3713                         drbd_md_sync(mdev);
3714                         updated_uuids = 1;
3715                 }
3716                 put_ldev(mdev);
3717         } else if (mdev->state.disk < D_INCONSISTENT &&
3718                    mdev->state.role == R_PRIMARY) {
3719                 /* I am a diskless primary, the peer just created a new current UUID
3720                    for me. */
3721                 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3722         }
3723
3724         /* Before we test for the disk state, we should wait until an eventually
3725            ongoing cluster wide state change is finished. That is important if
3726            we are primary and are detaching from our disk. We need to see the
3727            new disk state... */
3728         mutex_lock(mdev->state_mutex);
3729         mutex_unlock(mdev->state_mutex);
3730         if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3731                 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3732
3733         if (updated_uuids)
3734                 drbd_print_uuids(mdev, "receiver updated UUIDs to");
3735
3736         return 0;
3737 }
3738
3739 /**
3740  * convert_state() - Converts the peer's view of the cluster state to our point of view
3741  * @ps:         The state as seen by the peer.
3742  */
3743 static union drbd_state convert_state(union drbd_state ps)
3744 {
3745         union drbd_state ms;
3746
3747         static enum drbd_conns c_tab[] = {
3748                 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
3749                 [C_CONNECTED] = C_CONNECTED,
3750
3751                 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3752                 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3753                 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3754                 [C_VERIFY_S]       = C_VERIFY_T,
3755                 [C_MASK]   = C_MASK,
3756         };
3757
3758         ms.i = ps.i;
3759
3760         ms.conn = c_tab[ps.conn];
3761         ms.peer = ps.role;
3762         ms.role = ps.peer;
3763         ms.pdsk = ps.disk;
3764         ms.disk = ps.pdsk;
3765         ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3766
3767         return ms;
3768 }
3769
3770 static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
3771 {
3772         struct drbd_conf *mdev;
3773         struct p_req_state *p = pi->data;
3774         union drbd_state mask, val;
3775         enum drbd_state_rv rv;
3776
3777         mdev = vnr_to_mdev(tconn, pi->vnr);
3778         if (!mdev)
3779                 return -EIO;
3780
3781         mask.i = be32_to_cpu(p->mask);
3782         val.i = be32_to_cpu(p->val);
3783
3784         if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
3785             mutex_is_locked(mdev->state_mutex)) {
3786                 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3787                 return 0;
3788         }
3789
3790         mask = convert_state(mask);
3791         val = convert_state(val);
3792
3793         rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3794         drbd_send_sr_reply(mdev, rv);
3795
3796         drbd_md_sync(mdev);
3797
3798         return 0;
3799 }
3800
3801 static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
3802 {
3803         struct p_req_state *p = pi->data;
3804         union drbd_state mask, val;
3805         enum drbd_state_rv rv;
3806
3807         mask.i = be32_to_cpu(p->mask);
3808         val.i = be32_to_cpu(p->val);
3809
3810         if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
3811             mutex_is_locked(&tconn->cstate_mutex)) {
3812                 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
3813                 return 0;
3814         }
3815
3816         mask = convert_state(mask);
3817         val = convert_state(val);
3818
3819         rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
3820         conn_send_sr_reply(tconn, rv);
3821
3822         return 0;
3823 }
3824
3825 static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
3826 {
3827         struct drbd_conf *mdev;
3828         struct p_state *p = pi->data;
3829         union drbd_state os, ns, peer_state;
3830         enum drbd_disk_state real_peer_disk;
3831         enum chg_state_flags cs_flags;
3832         int rv;
3833
3834         mdev = vnr_to_mdev(tconn, pi->vnr);
3835         if (!mdev)
3836                 return config_unknown_volume(tconn, pi);
3837
3838         peer_state.i = be32_to_cpu(p->state);
3839
3840         real_peer_disk = peer_state.disk;
3841         if (peer_state.disk == D_NEGOTIATING) {
3842                 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3843                 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3844         }
3845
3846         spin_lock_irq(&mdev->tconn->req_lock);
3847  retry:
3848         os = ns = drbd_read_state(mdev);
3849         spin_unlock_irq(&mdev->tconn->req_lock);
3850
3851         /* If some other part of the code (asender thread, timeout)
3852          * already decided to close the connection again,
3853          * we must not "re-establish" it here. */
3854         if (os.conn <= C_TEAR_DOWN)
3855                 return -ECONNRESET;
3856
3857         /* If this is the "end of sync" confirmation, usually the peer disk
3858          * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3859          * set) resync started in PausedSyncT, or if the timing of pause-/
3860          * unpause-sync events has been "just right", the peer disk may
3861          * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3862          */
3863         if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3864             real_peer_disk == D_UP_TO_DATE &&
3865             os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3866                 /* If we are (becoming) SyncSource, but peer is still in sync
3867                  * preparation, ignore its uptodate-ness to avoid flapping, it
3868                  * will change to inconsistent once the peer reaches active
3869                  * syncing states.
3870                  * It may have changed syncer-paused flags, however, so we
3871                  * cannot ignore this completely. */
3872                 if (peer_state.conn > C_CONNECTED &&
3873                     peer_state.conn < C_SYNC_SOURCE)
3874                         real_peer_disk = D_INCONSISTENT;
3875
3876                 /* if peer_state changes to connected at the same time,
3877                  * it explicitly notifies us that it finished resync.
3878                  * Maybe we should finish it up, too? */
3879                 else if (os.conn >= C_SYNC_SOURCE &&
3880                          peer_state.conn == C_CONNECTED) {
3881                         if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3882                                 drbd_resync_finished(mdev);
3883                         return 0;
3884                 }
3885         }
3886
3887         /* explicit verify finished notification, stop sector reached. */
3888         if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3889             peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
3890                 ov_out_of_sync_print(mdev);
3891                 drbd_resync_finished(mdev);
3892                 return 0;
3893         }
3894
3895         /* peer says his disk is inconsistent, while we think it is uptodate,
3896          * and this happens while the peer still thinks we have a sync going on,
3897          * but we think we are already done with the sync.
3898          * We ignore this to avoid flapping pdsk.
3899          * This should not happen, if the peer is a recent version of drbd. */
3900         if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3901             os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3902                 real_peer_disk = D_UP_TO_DATE;
3903
3904         if (ns.conn == C_WF_REPORT_PARAMS)
3905                 ns.conn = C_CONNECTED;
3906
3907         if (peer_state.conn == C_AHEAD)
3908                 ns.conn = C_BEHIND;
3909
3910         if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3911             get_ldev_if_state(mdev, D_NEGOTIATING)) {
3912                 int cr; /* consider resync */
3913
3914                 /* if we established a new connection */
3915                 cr  = (os.conn < C_CONNECTED);
3916                 /* if we had an established connection
3917                  * and one of the nodes newly attaches a disk */
3918                 cr |= (os.conn == C_CONNECTED &&
3919                        (peer_state.disk == D_NEGOTIATING ||
3920                         os.disk == D_NEGOTIATING));
3921                 /* if we have both been inconsistent, and the peer has been
3922                  * forced to be UpToDate with --overwrite-data */
3923                 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3924                 /* if we had been plain connected, and the admin requested to
3925                  * start a sync by "invalidate" or "invalidate-remote" */
3926                 cr |= (os.conn == C_CONNECTED &&
3927                                 (peer_state.conn >= C_STARTING_SYNC_S &&
3928                                  peer_state.conn <= C_WF_BITMAP_T));
3929
3930                 if (cr)
3931                         ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3932
3933                 put_ldev(mdev);
3934                 if (ns.conn == C_MASK) {
3935                         ns.conn = C_CONNECTED;
3936                         if (mdev->state.disk == D_NEGOTIATING) {
3937                                 drbd_force_state(mdev, NS(disk, D_FAILED));
3938                         } else if (peer_state.disk == D_NEGOTIATING) {
3939                                 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3940                                 peer_state.disk = D_DISKLESS;
3941                                 real_peer_disk = D_DISKLESS;
3942                         } else {
3943                                 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
3944                                         return -EIO;
3945                                 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3946                                 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3947                                 return -EIO;
3948                         }
3949                 }
3950         }
3951
3952         spin_lock_irq(&mdev->tconn->req_lock);
3953         if (os.i != drbd_read_state(mdev).i)
3954                 goto retry;
3955         clear_bit(CONSIDER_RESYNC, &mdev->flags);
3956         ns.peer = peer_state.role;
3957         ns.pdsk = real_peer_disk;
3958         ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3959         if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3960                 ns.disk = mdev->new_state_tmp.disk;
3961         cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3962         if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3963             test_bit(NEW_CUR_UUID, &mdev->flags)) {
3964                 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3965                    for temporal network outages! */
3966                 spin_unlock_irq(&mdev->tconn->req_lock);
3967                 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3968                 tl_clear(mdev->tconn);
3969                 drbd_uuid_new_current(mdev);
3970                 clear_bit(NEW_CUR_UUID, &mdev->flags);
3971                 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
3972                 return -EIO;
3973         }
3974         rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3975         ns = drbd_read_state(mdev);
3976         spin_unlock_irq(&mdev->tconn->req_lock);
3977
3978         if (rv < SS_SUCCESS) {
3979                 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3980                 return -EIO;
3981         }
3982
3983         if (os.conn > C_WF_REPORT_PARAMS) {
3984                 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3985                     peer_state.disk != D_NEGOTIATING ) {
3986                         /* we want resync, peer has not yet decided to sync... */
3987                         /* Nowadays only used when forcing a node into primary role and
3988                            setting its disk to UpToDate with that */
3989                         drbd_send_uuids(mdev);
3990                         drbd_send_current_state(mdev);
3991                 }
3992         }
3993
3994         clear_bit(DISCARD_MY_DATA, &mdev->flags);
3995
3996         drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3997
3998         return 0;
3999 }
4000
4001 static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
4002 {
4003         struct drbd_conf *mdev;
4004         struct p_rs_uuid *p = pi->data;
4005
4006         mdev = vnr_to_mdev(tconn, pi->vnr);
4007         if (!mdev)
4008                 return -EIO;
4009
4010         wait_event(mdev->misc_wait,
4011                    mdev->state.conn == C_WF_SYNC_UUID ||
4012                    mdev->state.conn == C_BEHIND ||
4013                    mdev->state.conn < C_CONNECTED ||
4014                    mdev->state.disk < D_NEGOTIATING);
4015
4016         /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4017
4018         /* Here the _drbd_uuid_ functions are right, current should
4019            _not_ be rotated into the history */
4020         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
4021                 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
4022                 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
4023
4024                 drbd_print_uuids(mdev, "updated sync uuid");
4025                 drbd_start_resync(mdev, C_SYNC_TARGET);
4026
4027                 put_ldev(mdev);
4028         } else
4029                 dev_err(DEV, "Ignoring SyncUUID packet!\n");
4030
4031         return 0;
4032 }
4033
4034 /**
4035  * receive_bitmap_plain
4036  *
4037  * Return 0 when done, 1 when another iteration is needed, and a negative error
4038  * code upon failure.
4039  */
4040 static int
4041 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
4042                      unsigned long *p, struct bm_xfer_ctx *c)
4043 {
4044         unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4045                                  drbd_header_size(mdev->tconn);
4046         unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
4047                                        c->bm_words - c->word_offset);
4048         unsigned int want = num_words * sizeof(*p);
4049         int err;
4050
4051         if (want != size) {
4052                 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
4053                 return -EIO;
4054         }
4055         if (want == 0)
4056                 return 0;
4057         err = drbd_recv_all(mdev->tconn, p, want);
4058         if (err)
4059                 return err;
4060
4061         drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
4062
4063         c->word_offset += num_words;
4064         c->bit_offset = c->word_offset * BITS_PER_LONG;
4065         if (c->bit_offset > c->bm_bits)
4066                 c->bit_offset = c->bm_bits;
4067
4068         return 1;
4069 }
4070
4071 static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4072 {
4073         return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4074 }
4075
4076 static int dcbp_get_start(struct p_compressed_bm *p)
4077 {
4078         return (p->encoding & 0x80) != 0;
4079 }
4080
4081 static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4082 {
4083         return (p->encoding >> 4) & 0x7;
4084 }
4085
4086 /**
4087  * recv_bm_rle_bits
4088  *
4089  * Return 0 when done, 1 when another iteration is needed, and a negative error
4090  * code upon failure.
4091  */
4092 static int
4093 recv_bm_rle_bits(struct drbd_conf *mdev,
4094                 struct p_compressed_bm *p,
4095                  struct bm_xfer_ctx *c,
4096                  unsigned int len)
4097 {
4098         struct bitstream bs;
4099         u64 look_ahead;
4100         u64 rl;
4101         u64 tmp;
4102         unsigned long s = c->bit_offset;
4103         unsigned long e;
4104         int toggle = dcbp_get_start(p);
4105         int have;
4106         int bits;
4107
4108         bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
4109
4110         bits = bitstream_get_bits(&bs, &look_ahead, 64);
4111         if (bits < 0)
4112                 return -EIO;
4113
4114         for (have = bits; have > 0; s += rl, toggle = !toggle) {
4115                 bits = vli_decode_bits(&rl, look_ahead);
4116                 if (bits <= 0)
4117                         return -EIO;
4118
4119                 if (toggle) {
4120                         e = s + rl -1;
4121                         if (e >= c->bm_bits) {
4122                                 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
4123                                 return -EIO;
4124                         }
4125                         _drbd_bm_set_bits(mdev, s, e);
4126                 }
4127
4128                 if (have < bits) {
4129                         dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4130                                 have, bits, look_ahead,
4131                                 (unsigned int)(bs.cur.b - p->code),
4132                                 (unsigned int)bs.buf_len);
4133                         return -EIO;
4134                 }
4135                 look_ahead >>= bits;
4136                 have -= bits;
4137
4138                 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4139                 if (bits < 0)
4140                         return -EIO;
4141                 look_ahead |= tmp << have;
4142                 have += bits;
4143         }
4144
4145         c->bit_offset = s;
4146         bm_xfer_ctx_bit_to_word_offset(c);
4147
4148         return (s != c->bm_bits);
4149 }
4150
4151 /**
4152  * decode_bitmap_c
4153  *
4154  * Return 0 when done, 1 when another iteration is needed, and a negative error
4155  * code upon failure.
4156  */
4157 static int
4158 decode_bitmap_c(struct drbd_conf *mdev,
4159                 struct p_compressed_bm *p,
4160                 struct bm_xfer_ctx *c,
4161                 unsigned int len)
4162 {
4163         if (dcbp_get_code(p) == RLE_VLI_Bits)
4164                 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
4165
4166         /* other variants had been implemented for evaluation,
4167          * but have been dropped as this one turned out to be "best"
4168          * during all our tests. */
4169
4170         dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4171         conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4172         return -EIO;
4173 }
4174
4175 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4176                 const char *direction, struct bm_xfer_ctx *c)
4177 {
4178         /* what would it take to transfer it "plaintext" */
4179         unsigned int header_size = drbd_header_size(mdev->tconn);
4180         unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4181         unsigned int plain =
4182                 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4183                 c->bm_words * sizeof(unsigned long);
4184         unsigned int total = c->bytes[0] + c->bytes[1];
4185         unsigned int r;
4186
4187         /* total can not be zero. but just in case: */
4188         if (total == 0)
4189                 return;
4190
4191         /* don't report if not compressed */
4192         if (total >= plain)
4193                 return;
4194
4195         /* total < plain. check for overflow, still */
4196         r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4197                                     : (1000 * total / plain);
4198
4199         if (r > 1000)
4200                 r = 1000;
4201
4202         r = 1000 - r;
4203         dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4204              "total %u; compression: %u.%u%%\n",
4205                         direction,
4206                         c->bytes[1], c->packets[1],
4207                         c->bytes[0], c->packets[0],
4208                         total, r/10, r % 10);
4209 }
4210
4211 /* Since we are processing the bitfield from lower addresses to higher,
4212    it does not matter if the process it in 32 bit chunks or 64 bit
4213    chunks as long as it is little endian. (Understand it as byte stream,
4214    beginning with the lowest byte...) If we would use big endian
4215    we would need to process it from the highest address to the lowest,
4216    in order to be agnostic to the 32 vs 64 bits issue.
4217
4218    returns 0 on failure, 1 if we successfully received it. */
4219 static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
4220 {
4221         struct drbd_conf *mdev;
4222         struct bm_xfer_ctx c;
4223         int err;
4224
4225         mdev = vnr_to_mdev(tconn, pi->vnr);
4226         if (!mdev)
4227                 return -EIO;
4228
4229         drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4230         /* you are supposed to send additional out-of-sync information
4231          * if you actually set bits during this phase */
4232
4233         c = (struct bm_xfer_ctx) {
4234                 .bm_bits = drbd_bm_bits(mdev),
4235                 .bm_words = drbd_bm_words(mdev),
4236         };
4237
4238         for(;;) {
4239                 if (pi->cmd == P_BITMAP)
4240                         err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4241                 else if (pi->cmd == P_COMPRESSED_BITMAP) {
4242                         /* MAYBE: sanity check that we speak proto >= 90,
4243                          * and the feature is enabled! */
4244                         struct p_compressed_bm *p = pi->data;
4245
4246                         if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
4247                                 dev_err(DEV, "ReportCBitmap packet too large\n");
4248                                 err = -EIO;
4249                                 goto out;
4250                         }
4251                         if (pi->size <= sizeof(*p)) {
4252                                 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4253                                 err = -EIO;
4254                                 goto out;
4255                         }
4256                         err = drbd_recv_all(mdev->tconn, p, pi->size);
4257                         if (err)
4258                                goto out;
4259                         err = decode_bitmap_c(mdev, p, &c, pi->size);
4260                 } else {
4261                         dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4262                         err = -EIO;
4263                         goto out;
4264                 }
4265
4266                 c.packets[pi->cmd == P_BITMAP]++;
4267                 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
4268
4269                 if (err <= 0) {
4270                         if (err < 0)
4271                                 goto out;
4272                         break;
4273                 }
4274                 err = drbd_recv_header(mdev->tconn, pi);
4275                 if (err)
4276                         goto out;
4277         }
4278
4279         INFO_bm_xfer_stats(mdev, "receive", &c);
4280
4281         if (mdev->state.conn == C_WF_BITMAP_T) {
4282                 enum drbd_state_rv rv;
4283
4284                 err = drbd_send_bitmap(mdev);
4285                 if (err)
4286                         goto out;
4287                 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
4288                 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4289                 D_ASSERT(rv == SS_SUCCESS);
4290         } else if (mdev->state.conn != C_WF_BITMAP_S) {
4291                 /* admin may have requested C_DISCONNECTING,
4292                  * other threads may have noticed network errors */
4293                 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4294                     drbd_conn_str(mdev->state.conn));
4295         }
4296         err = 0;
4297
4298  out:
4299         drbd_bm_unlock(mdev);
4300         if (!err && mdev->state.conn == C_WF_BITMAP_S)
4301                 drbd_start_resync(mdev, C_SYNC_SOURCE);
4302         return err;
4303 }
4304
4305 static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
4306 {
4307         conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
4308                  pi->cmd, pi->size);
4309
4310         return ignore_remaining_packet(tconn, pi);
4311 }
4312
4313 static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
4314 {
4315         /* Make sure we've acked all the TCP data associated
4316          * with the data requests being unplugged */
4317         drbd_tcp_quickack(tconn->data.socket);
4318
4319         return 0;
4320 }
4321
4322 static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
4323 {
4324         struct drbd_conf *mdev;
4325         struct p_block_desc *p = pi->data;
4326
4327         mdev = vnr_to_mdev(tconn, pi->vnr);
4328         if (!mdev)
4329                 return -EIO;
4330
4331         switch (mdev->state.conn) {
4332         case C_WF_SYNC_UUID:
4333         case C_WF_BITMAP_T:
4334         case C_BEHIND:
4335                         break;
4336         default:
4337                 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4338                                 drbd_conn_str(mdev->state.conn));
4339         }
4340
4341         drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4342
4343         return 0;
4344 }
4345
4346 struct data_cmd {
4347         int expect_payload;
4348         size_t pkt_size;
4349         int (*fn)(struct drbd_tconn *, struct packet_info *);
4350 };
4351
4352 static struct data_cmd drbd_cmd_handler[] = {
4353         [P_DATA]            = { 1, sizeof(struct p_data), receive_Data },
4354         [P_DATA_REPLY]      = { 1, sizeof(struct p_data), receive_DataReply },
4355         [P_RS_DATA_REPLY]   = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4356         [P_BARRIER]         = { 0, sizeof(struct p_barrier), receive_Barrier } ,
4357         [P_BITMAP]          = { 1, 0, receive_bitmap } ,
4358         [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4359         [P_UNPLUG_REMOTE]   = { 0, 0, receive_UnplugRemote },
4360         [P_DATA_REQUEST]    = { 0, sizeof(struct p_block_req), receive_DataRequest },
4361         [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4362         [P_SYNC_PARAM]      = { 1, 0, receive_SyncParam },
4363         [P_SYNC_PARAM89]    = { 1, 0, receive_SyncParam },
4364         [P_PROTOCOL]        = { 1, sizeof(struct p_protocol), receive_protocol },
4365         [P_UUIDS]           = { 0, sizeof(struct p_uuids), receive_uuids },
4366         [P_SIZES]           = { 0, sizeof(struct p_sizes), receive_sizes },
4367         [P_STATE]           = { 0, sizeof(struct p_state), receive_state },
4368         [P_STATE_CHG_REQ]   = { 0, sizeof(struct p_req_state), receive_req_state },
4369         [P_SYNC_UUID]       = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4370         [P_OV_REQUEST]      = { 0, sizeof(struct p_block_req), receive_DataRequest },
4371         [P_OV_REPLY]        = { 1, sizeof(struct p_block_req), receive_DataRequest },
4372         [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4373         [P_DELAY_PROBE]     = { 0, sizeof(struct p_delay_probe93), receive_skip },
4374         [P_OUT_OF_SYNC]     = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4375         [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
4376         [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
4377 };
4378
4379 static void drbdd(struct drbd_tconn *tconn)
4380 {
4381         struct packet_info pi;
4382         size_t shs; /* sub header size */
4383         int err;
4384
4385         while (get_t_state(&tconn->receiver) == RUNNING) {
4386                 struct data_cmd *cmd;
4387
4388                 drbd_thread_current_set_cpu(&tconn->receiver);
4389                 if (drbd_recv_header(tconn, &pi))
4390                         goto err_out;
4391
4392                 cmd = &drbd_cmd_handler[pi.cmd];
4393                 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
4394                         conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4395                                  cmdname(pi.cmd), pi.cmd);
4396                         goto err_out;
4397                 }
4398
4399                 shs = cmd->pkt_size;
4400                 if (pi.size > shs && !cmd->expect_payload) {
4401                         conn_err(tconn, "No payload expected %s l:%d\n",
4402                                  cmdname(pi.cmd), pi.size);
4403                         goto err_out;
4404                 }
4405
4406                 if (shs) {
4407                         err = drbd_recv_all_warn(tconn, pi.data, shs);
4408                         if (err)
4409                                 goto err_out;
4410                         pi.size -= shs;
4411                 }
4412
4413                 err = cmd->fn(tconn, &pi);
4414                 if (err) {
4415                         conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4416                                  cmdname(pi.cmd), err, pi.size);
4417                         goto err_out;
4418                 }
4419         }
4420         return;
4421
4422     err_out:
4423         conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4424 }
4425
4426 void conn_flush_workqueue(struct drbd_tconn *tconn)
4427 {
4428         struct drbd_wq_barrier barr;
4429
4430         barr.w.cb = w_prev_work_done;
4431         barr.w.tconn = tconn;
4432         init_completion(&barr.done);
4433         drbd_queue_work(&tconn->sender_work, &barr.w);
4434         wait_for_completion(&barr.done);
4435 }
4436
4437 static void conn_disconnect(struct drbd_tconn *tconn)
4438 {
4439         struct drbd_conf *mdev;
4440         enum drbd_conns oc;
4441         int vnr;
4442
4443         if (tconn->cstate == C_STANDALONE)
4444                 return;
4445
4446         /* We are about to start the cleanup after connection loss.
4447          * Make sure drbd_make_request knows about that.
4448          * Usually we should be in some network failure state already,
4449          * but just in case we are not, we fix it up here.
4450          */
4451         conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
4452
4453         /* asender does not clean up anything. it must not interfere, either */
4454         drbd_thread_stop(&tconn->asender);
4455         drbd_free_sock(tconn);
4456
4457         rcu_read_lock();
4458         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4459                 kref_get(&mdev->kref);
4460                 rcu_read_unlock();
4461                 drbd_disconnected(mdev);
4462                 kref_put(&mdev->kref, &drbd_minor_destroy);
4463                 rcu_read_lock();
4464         }
4465         rcu_read_unlock();
4466
4467         if (!list_empty(&tconn->current_epoch->list))
4468                 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4469         /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4470         atomic_set(&tconn->current_epoch->epoch_size, 0);
4471         tconn->send.seen_any_write_yet = false;
4472
4473         conn_info(tconn, "Connection closed\n");
4474
4475         if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4476                 conn_try_outdate_peer_async(tconn);
4477
4478         spin_lock_irq(&tconn->req_lock);
4479         oc = tconn->cstate;
4480         if (oc >= C_UNCONNECTED)
4481                 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4482
4483         spin_unlock_irq(&tconn->req_lock);
4484
4485         if (oc == C_DISCONNECTING)
4486                 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
4487 }
4488
4489 static int drbd_disconnected(struct drbd_conf *mdev)
4490 {
4491         unsigned int i;
4492
4493         /* wait for current activity to cease. */
4494         spin_lock_irq(&mdev->tconn->req_lock);
4495         _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4496         _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4497         _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
4498         spin_unlock_irq(&mdev->tconn->req_lock);
4499
4500         /* We do not have data structures that would allow us to
4501          * get the rs_pending_cnt down to 0 again.
4502          *  * On C_SYNC_TARGET we do not have any data structures describing
4503          *    the pending RSDataRequest's we have sent.
4504          *  * On C_SYNC_SOURCE there is no data structure that tracks
4505          *    the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4506          *  And no, it is not the sum of the reference counts in the
4507          *  resync_LRU. The resync_LRU tracks the whole operation including
4508          *  the disk-IO, while the rs_pending_cnt only tracks the blocks
4509          *  on the fly. */
4510         drbd_rs_cancel_all(mdev);
4511         mdev->rs_total = 0;
4512         mdev->rs_failed = 0;
4513         atomic_set(&mdev->rs_pending_cnt, 0);
4514         wake_up(&mdev->misc_wait);
4515
4516         del_timer_sync(&mdev->resync_timer);
4517         resync_timer_fn((unsigned long)mdev);
4518
4519         /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4520          * w_make_resync_request etc. which may still be on the worker queue
4521          * to be "canceled" */
4522         drbd_flush_workqueue(mdev);
4523
4524         drbd_finish_peer_reqs(mdev);
4525
4526         /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4527            might have issued a work again. The one before drbd_finish_peer_reqs() is
4528            necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4529         drbd_flush_workqueue(mdev);
4530
4531         /* need to do it again, drbd_finish_peer_reqs() may have populated it
4532          * again via drbd_try_clear_on_disk_bm(). */
4533         drbd_rs_cancel_all(mdev);
4534
4535         kfree(mdev->p_uuid);
4536         mdev->p_uuid = NULL;
4537
4538         if (!drbd_suspended(mdev))
4539                 tl_clear(mdev->tconn);
4540
4541         drbd_md_sync(mdev);
4542
4543         /* serialize with bitmap writeout triggered by the state change,
4544          * if any. */
4545         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4546
4547         /* tcp_close and release of sendpage pages can be deferred.  I don't
4548          * want to use SO_LINGER, because apparently it can be deferred for
4549          * more than 20 seconds (longest time I checked).
4550          *
4551          * Actually we don't care for exactly when the network stack does its
4552          * put_page(), but release our reference on these pages right here.
4553          */
4554         i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
4555         if (i)
4556                 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
4557         i = atomic_read(&mdev->pp_in_use_by_net);
4558         if (i)
4559                 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
4560         i = atomic_read(&mdev->pp_in_use);
4561         if (i)
4562                 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
4563
4564         D_ASSERT(list_empty(&mdev->read_ee));
4565         D_ASSERT(list_empty(&mdev->active_ee));
4566         D_ASSERT(list_empty(&mdev->sync_ee));
4567         D_ASSERT(list_empty(&mdev->done_ee));
4568
4569         return 0;
4570 }
4571
4572 /*
4573  * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4574  * we can agree on is stored in agreed_pro_version.
4575  *
4576  * feature flags and the reserved array should be enough room for future
4577  * enhancements of the handshake protocol, and possible plugins...
4578  *
4579  * for now, they are expected to be zero, but ignored.
4580  */
4581 static int drbd_send_features(struct drbd_tconn *tconn)
4582 {
4583         struct drbd_socket *sock;
4584         struct p_connection_features *p;
4585
4586         sock = &tconn->data;
4587         p = conn_prepare_command(tconn, sock);
4588         if (!p)
4589                 return -EIO;
4590         memset(p, 0, sizeof(*p));
4591         p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4592         p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
4593         return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
4594 }
4595
4596 /*
4597  * return values:
4598  *   1 yes, we have a valid connection
4599  *   0 oops, did not work out, please try again
4600  *  -1 peer talks different language,
4601  *     no point in trying again, please go standalone.
4602  */
4603 static int drbd_do_features(struct drbd_tconn *tconn)
4604 {
4605         /* ASSERT current == tconn->receiver ... */
4606         struct p_connection_features *p;
4607         const int expect = sizeof(struct p_connection_features);
4608         struct packet_info pi;
4609         int err;
4610
4611         err = drbd_send_features(tconn);
4612         if (err)
4613                 return 0;
4614
4615         err = drbd_recv_header(tconn, &pi);
4616         if (err)
4617                 return 0;
4618
4619         if (pi.cmd != P_CONNECTION_FEATURES) {
4620                 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
4621                          cmdname(pi.cmd), pi.cmd);
4622                 return -1;
4623         }
4624
4625         if (pi.size != expect) {
4626                 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
4627                      expect, pi.size);
4628                 return -1;
4629         }
4630
4631         p = pi.data;
4632         err = drbd_recv_all_warn(tconn, p, expect);
4633         if (err)
4634                 return 0;
4635
4636         p->protocol_min = be32_to_cpu(p->protocol_min);
4637         p->protocol_max = be32_to_cpu(p->protocol_max);
4638         if (p->protocol_max == 0)
4639                 p->protocol_max = p->protocol_min;
4640
4641         if (PRO_VERSION_MAX < p->protocol_min ||
4642             PRO_VERSION_MIN > p->protocol_max)
4643                 goto incompat;
4644
4645         tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4646
4647         conn_info(tconn, "Handshake successful: "
4648              "Agreed network protocol version %d\n", tconn->agreed_pro_version);
4649
4650         return 1;
4651
4652  incompat:
4653         conn_err(tconn, "incompatible DRBD dialects: "
4654             "I support %d-%d, peer supports %d-%d\n",
4655             PRO_VERSION_MIN, PRO_VERSION_MAX,
4656             p->protocol_min, p->protocol_max);
4657         return -1;
4658 }
4659
4660 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4661 static int drbd_do_auth(struct drbd_tconn *tconn)
4662 {
4663         dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4664         dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4665         return -1;
4666 }
4667 #else
4668 #define CHALLENGE_LEN 64
4669
4670 /* Return value:
4671         1 - auth succeeded,
4672         0 - failed, try again (network error),
4673         -1 - auth failed, don't try again.
4674 */
4675
4676 static int drbd_do_auth(struct drbd_tconn *tconn)
4677 {
4678         struct drbd_socket *sock;
4679         char my_challenge[CHALLENGE_LEN];  /* 64 Bytes... */
4680         struct scatterlist sg;
4681         char *response = NULL;
4682         char *right_response = NULL;
4683         char *peers_ch = NULL;
4684         unsigned int key_len;
4685         char secret[SHARED_SECRET_MAX]; /* 64 byte */
4686         unsigned int resp_size;
4687         struct hash_desc desc;
4688         struct packet_info pi;
4689         struct net_conf *nc;
4690         int err, rv;
4691
4692         /* FIXME: Put the challenge/response into the preallocated socket buffer.  */
4693
4694         rcu_read_lock();
4695         nc = rcu_dereference(tconn->net_conf);
4696         key_len = strlen(nc->shared_secret);
4697         memcpy(secret, nc->shared_secret, key_len);
4698         rcu_read_unlock();
4699
4700         desc.tfm = tconn->cram_hmac_tfm;
4701         desc.flags = 0;
4702
4703         rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
4704         if (rv) {
4705                 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
4706                 rv = -1;
4707                 goto fail;
4708         }
4709
4710         get_random_bytes(my_challenge, CHALLENGE_LEN);
4711
4712         sock = &tconn->data;
4713         if (!conn_prepare_command(tconn, sock)) {
4714                 rv = 0;
4715                 goto fail;
4716         }
4717         rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
4718                                 my_challenge, CHALLENGE_LEN);
4719         if (!rv)
4720                 goto fail;
4721
4722         err = drbd_recv_header(tconn, &pi);
4723         if (err) {
4724                 rv = 0;
4725                 goto fail;
4726         }
4727
4728         if (pi.cmd != P_AUTH_CHALLENGE) {
4729                 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4730                          cmdname(pi.cmd), pi.cmd);
4731                 rv = 0;
4732                 goto fail;
4733         }
4734
4735         if (pi.size > CHALLENGE_LEN * 2) {
4736                 conn_err(tconn, "expected AuthChallenge payload too big.\n");
4737                 rv = -1;
4738                 goto fail;
4739         }
4740
4741         peers_ch = kmalloc(pi.size, GFP_NOIO);
4742         if (peers_ch == NULL) {
4743                 conn_err(tconn, "kmalloc of peers_ch failed\n");
4744                 rv = -1;
4745                 goto fail;
4746         }
4747
4748         err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4749         if (err) {
4750                 rv = 0;
4751                 goto fail;
4752         }
4753
4754         resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
4755         response = kmalloc(resp_size, GFP_NOIO);
4756         if (response == NULL) {
4757                 conn_err(tconn, "kmalloc of response failed\n");
4758                 rv = -1;
4759                 goto fail;
4760         }
4761
4762         sg_init_table(&sg, 1);
4763         sg_set_buf(&sg, peers_ch, pi.size);
4764
4765         rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4766         if (rv) {
4767                 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4768                 rv = -1;
4769                 goto fail;
4770         }
4771
4772         if (!conn_prepare_command(tconn, sock)) {
4773                 rv = 0;
4774                 goto fail;
4775         }
4776         rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
4777                                 response, resp_size);
4778         if (!rv)
4779                 goto fail;
4780
4781         err = drbd_recv_header(tconn, &pi);
4782         if (err) {
4783                 rv = 0;
4784                 goto fail;
4785         }
4786
4787         if (pi.cmd != P_AUTH_RESPONSE) {
4788                 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
4789                          cmdname(pi.cmd), pi.cmd);
4790                 rv = 0;
4791                 goto fail;
4792         }
4793
4794         if (pi.size != resp_size) {
4795                 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
4796                 rv = 0;
4797                 goto fail;
4798         }
4799
4800         err = drbd_recv_all_warn(tconn, response , resp_size);
4801         if (err) {
4802                 rv = 0;
4803                 goto fail;
4804         }
4805
4806         right_response = kmalloc(resp_size, GFP_NOIO);
4807         if (right_response == NULL) {
4808                 conn_err(tconn, "kmalloc of right_response failed\n");
4809                 rv = -1;
4810                 goto fail;
4811         }
4812
4813         sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4814
4815         rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4816         if (rv) {
4817                 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4818                 rv = -1;
4819                 goto fail;
4820         }
4821
4822         rv = !memcmp(response, right_response, resp_size);
4823
4824         if (rv)
4825                 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4826                      resp_size);
4827         else
4828                 rv = -1;
4829
4830  fail:
4831         kfree(peers_ch);
4832         kfree(response);
4833         kfree(right_response);
4834
4835         return rv;
4836 }
4837 #endif
4838
4839 int drbdd_init(struct drbd_thread *thi)
4840 {
4841         struct drbd_tconn *tconn = thi->tconn;
4842         int h;
4843
4844         conn_info(tconn, "receiver (re)started\n");
4845
4846         do {
4847                 h = conn_connect(tconn);
4848                 if (h == 0) {
4849                         conn_disconnect(tconn);
4850                         schedule_timeout_interruptible(HZ);
4851                 }
4852                 if (h == -1) {
4853                         conn_warn(tconn, "Discarding network configuration.\n");
4854                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
4855                 }
4856         } while (h == 0);
4857
4858         if (h > 0)
4859                 drbdd(tconn);
4860
4861         conn_disconnect(tconn);
4862
4863         conn_info(tconn, "receiver terminated\n");
4864         return 0;
4865 }
4866
4867 /* ********* acknowledge sender ******** */
4868
4869 static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4870 {
4871         struct p_req_state_reply *p = pi->data;
4872         int retcode = be32_to_cpu(p->retcode);
4873
4874         if (retcode >= SS_SUCCESS) {
4875                 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4876         } else {
4877                 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4878                 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4879                          drbd_set_st_err_str(retcode), retcode);
4880         }
4881         wake_up(&tconn->ping_wait);
4882
4883         return 0;
4884 }
4885
4886 static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4887 {
4888         struct drbd_conf *mdev;
4889         struct p_req_state_reply *p = pi->data;
4890         int retcode = be32_to_cpu(p->retcode);
4891
4892         mdev = vnr_to_mdev(tconn, pi->vnr);
4893         if (!mdev)
4894                 return -EIO;
4895
4896         if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
4897                 D_ASSERT(tconn->agreed_pro_version < 100);
4898                 return got_conn_RqSReply(tconn, pi);
4899         }
4900
4901         if (retcode >= SS_SUCCESS) {
4902                 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4903         } else {
4904                 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4905                 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4906                         drbd_set_st_err_str(retcode), retcode);
4907         }
4908         wake_up(&mdev->state_wait);
4909
4910         return 0;
4911 }
4912
4913 static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
4914 {
4915         return drbd_send_ping_ack(tconn);
4916
4917 }
4918
4919 static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
4920 {
4921         /* restore idle timeout */
4922         tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4923         if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4924                 wake_up(&tconn->ping_wait);
4925
4926         return 0;
4927 }
4928
4929 static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
4930 {
4931         struct drbd_conf *mdev;
4932         struct p_block_ack *p = pi->data;
4933         sector_t sector = be64_to_cpu(p->sector);
4934         int blksize = be32_to_cpu(p->blksize);
4935
4936         mdev = vnr_to_mdev(tconn, pi->vnr);
4937         if (!mdev)
4938                 return -EIO;
4939
4940         D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
4941
4942         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4943
4944         if (get_ldev(mdev)) {
4945                 drbd_rs_complete_io(mdev, sector);
4946                 drbd_set_in_sync(mdev, sector, blksize);
4947                 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4948                 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4949                 put_ldev(mdev);
4950         }
4951         dec_rs_pending(mdev);
4952         atomic_add(blksize >> 9, &mdev->rs_sect_in);
4953
4954         return 0;
4955 }
4956
4957 static int
4958 validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4959                               struct rb_root *root, const char *func,
4960                               enum drbd_req_event what, bool missing_ok)
4961 {
4962         struct drbd_request *req;
4963         struct bio_and_error m;
4964
4965         spin_lock_irq(&mdev->tconn->req_lock);
4966         req = find_request(mdev, root, id, sector, missing_ok, func);
4967         if (unlikely(!req)) {
4968                 spin_unlock_irq(&mdev->tconn->req_lock);
4969                 return -EIO;
4970         }
4971         __req_mod(req, what, &m);
4972         spin_unlock_irq(&mdev->tconn->req_lock);
4973
4974         if (m.bio)
4975                 complete_master_bio(mdev, &m);
4976         return 0;
4977 }
4978
4979 static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
4980 {
4981         struct drbd_conf *mdev;
4982         struct p_block_ack *p = pi->data;
4983         sector_t sector = be64_to_cpu(p->sector);
4984         int blksize = be32_to_cpu(p->blksize);
4985         enum drbd_req_event what;
4986
4987         mdev = vnr_to_mdev(tconn, pi->vnr);
4988         if (!mdev)
4989                 return -EIO;
4990
4991         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4992
4993         if (p->block_id == ID_SYNCER) {
4994                 drbd_set_in_sync(mdev, sector, blksize);
4995                 dec_rs_pending(mdev);
4996                 return 0;
4997         }
4998         switch (pi->cmd) {
4999         case P_RS_WRITE_ACK:
5000                 what = WRITE_ACKED_BY_PEER_AND_SIS;
5001                 break;
5002         case P_WRITE_ACK:
5003                 what = WRITE_ACKED_BY_PEER;
5004                 break;
5005         case P_RECV_ACK:
5006                 what = RECV_ACKED_BY_PEER;
5007                 break;
5008         case P_SUPERSEDED:
5009                 what = CONFLICT_RESOLVED;
5010                 break;
5011         case P_RETRY_WRITE:
5012                 what = POSTPONE_WRITE;
5013                 break;
5014         default:
5015                 BUG();
5016         }
5017
5018         return validate_req_change_req_state(mdev, p->block_id, sector,
5019                                              &mdev->write_requests, __func__,
5020                                              what, false);
5021 }
5022
5023 static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
5024 {
5025         struct drbd_conf *mdev;
5026         struct p_block_ack *p = pi->data;
5027         sector_t sector = be64_to_cpu(p->sector);
5028         int size = be32_to_cpu(p->blksize);
5029         int err;
5030
5031         mdev = vnr_to_mdev(tconn, pi->vnr);
5032         if (!mdev)
5033                 return -EIO;
5034
5035         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5036
5037         if (p->block_id == ID_SYNCER) {
5038                 dec_rs_pending(mdev);
5039                 drbd_rs_failed_io(mdev, sector, size);
5040                 return 0;
5041         }
5042
5043         err = validate_req_change_req_state(mdev, p->block_id, sector,
5044                                             &mdev->write_requests, __func__,
5045                                             NEG_ACKED, true);
5046         if (err) {
5047                 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5048                    The master bio might already be completed, therefore the
5049                    request is no longer in the collision hash. */
5050                 /* In Protocol B we might already have got a P_RECV_ACK
5051                    but then get a P_NEG_ACK afterwards. */
5052                 drbd_set_out_of_sync(mdev, sector, size);
5053         }
5054         return 0;
5055 }
5056
5057 static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
5058 {
5059         struct drbd_conf *mdev;
5060         struct p_block_ack *p = pi->data;
5061         sector_t sector = be64_to_cpu(p->sector);
5062
5063         mdev = vnr_to_mdev(tconn, pi->vnr);
5064         if (!mdev)
5065                 return -EIO;
5066
5067         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5068
5069         dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
5070             (unsigned long long)sector, be32_to_cpu(p->blksize));
5071
5072         return validate_req_change_req_state(mdev, p->block_id, sector,
5073                                              &mdev->read_requests, __func__,
5074                                              NEG_ACKED, false);
5075 }
5076
5077 static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
5078 {
5079         struct drbd_conf *mdev;
5080         sector_t sector;
5081         int size;
5082         struct p_block_ack *p = pi->data;
5083
5084         mdev = vnr_to_mdev(tconn, pi->vnr);
5085         if (!mdev)
5086                 return -EIO;
5087
5088         sector = be64_to_cpu(p->sector);
5089         size = be32_to_cpu(p->blksize);
5090
5091         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5092
5093         dec_rs_pending(mdev);
5094
5095         if (get_ldev_if_state(mdev, D_FAILED)) {
5096                 drbd_rs_complete_io(mdev, sector);
5097                 switch (pi->cmd) {
5098                 case P_NEG_RS_DREPLY:
5099                         drbd_rs_failed_io(mdev, sector, size);
5100                 case P_RS_CANCEL:
5101                         break;
5102                 default:
5103                         BUG();
5104                 }
5105                 put_ldev(mdev);
5106         }
5107
5108         return 0;
5109 }
5110
5111 static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
5112 {
5113         struct p_barrier_ack *p = pi->data;
5114         struct drbd_conf *mdev;
5115         int vnr;
5116
5117         tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
5118
5119         rcu_read_lock();
5120         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5121                 if (mdev->state.conn == C_AHEAD &&
5122                     atomic_read(&mdev->ap_in_flight) == 0 &&
5123                     !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
5124                         mdev->start_resync_timer.expires = jiffies + HZ;
5125                         add_timer(&mdev->start_resync_timer);
5126                 }
5127         }
5128         rcu_read_unlock();
5129
5130         return 0;
5131 }
5132
5133 static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
5134 {
5135         struct drbd_conf *mdev;
5136         struct p_block_ack *p = pi->data;
5137         struct drbd_work *w;
5138         sector_t sector;
5139         int size;
5140
5141         mdev = vnr_to_mdev(tconn, pi->vnr);
5142         if (!mdev)
5143                 return -EIO;
5144
5145         sector = be64_to_cpu(p->sector);
5146         size = be32_to_cpu(p->blksize);
5147
5148         update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5149
5150         if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
5151                 drbd_ov_out_of_sync_found(mdev, sector, size);
5152         else
5153                 ov_out_of_sync_print(mdev);
5154
5155         if (!get_ldev(mdev))
5156                 return 0;
5157
5158         drbd_rs_complete_io(mdev, sector);
5159         dec_rs_pending(mdev);
5160
5161         --mdev->ov_left;
5162
5163         /* let's advance progress step marks only for every other megabyte */
5164         if ((mdev->ov_left & 0x200) == 0x200)
5165                 drbd_advance_rs_marks(mdev, mdev->ov_left);
5166
5167         if (mdev->ov_left == 0) {
5168                 w = kmalloc(sizeof(*w), GFP_NOIO);
5169                 if (w) {
5170                         w->cb = w_ov_finished;
5171                         w->mdev = mdev;
5172                         drbd_queue_work(&mdev->tconn->sender_work, w);
5173                 } else {
5174                         dev_err(DEV, "kmalloc(w) failed.");
5175                         ov_out_of_sync_print(mdev);
5176                         drbd_resync_finished(mdev);
5177                 }
5178         }
5179         put_ldev(mdev);
5180         return 0;
5181 }
5182
5183 static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
5184 {
5185         return 0;
5186 }
5187
5188 static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
5189 {
5190         struct drbd_conf *mdev;
5191         int vnr, not_empty = 0;
5192
5193         do {
5194                 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5195                 flush_signals(current);
5196
5197                 rcu_read_lock();
5198                 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5199                         kref_get(&mdev->kref);
5200                         rcu_read_unlock();
5201                         if (drbd_finish_peer_reqs(mdev)) {
5202                                 kref_put(&mdev->kref, &drbd_minor_destroy);
5203                                 return 1;
5204                         }
5205                         kref_put(&mdev->kref, &drbd_minor_destroy);
5206                         rcu_read_lock();
5207                 }
5208                 set_bit(SIGNAL_ASENDER, &tconn->flags);
5209
5210                 spin_lock_irq(&tconn->req_lock);
5211                 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5212                         not_empty = !list_empty(&mdev->done_ee);
5213                         if (not_empty)
5214                                 break;
5215                 }
5216                 spin_unlock_irq(&tconn->req_lock);
5217                 rcu_read_unlock();
5218         } while (not_empty);
5219
5220         return 0;
5221 }
5222
5223 struct asender_cmd {
5224         size_t pkt_size;
5225         int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
5226 };
5227
5228 static struct asender_cmd asender_tbl[] = {
5229         [P_PING]            = { 0, got_Ping },
5230         [P_PING_ACK]        = { 0, got_PingAck },
5231         [P_RECV_ACK]        = { sizeof(struct p_block_ack), got_BlockAck },
5232         [P_WRITE_ACK]       = { sizeof(struct p_block_ack), got_BlockAck },
5233         [P_RS_WRITE_ACK]    = { sizeof(struct p_block_ack), got_BlockAck },
5234         [P_SUPERSEDED]   = { sizeof(struct p_block_ack), got_BlockAck },
5235         [P_NEG_ACK]         = { sizeof(struct p_block_ack), got_NegAck },
5236         [P_NEG_DREPLY]      = { sizeof(struct p_block_ack), got_NegDReply },
5237         [P_NEG_RS_DREPLY]   = { sizeof(struct p_block_ack), got_NegRSDReply },
5238         [P_OV_RESULT]       = { sizeof(struct p_block_ack), got_OVResult },
5239         [P_BARRIER_ACK]     = { sizeof(struct p_barrier_ack), got_BarrierAck },
5240         [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5241         [P_RS_IS_IN_SYNC]   = { sizeof(struct p_block_ack), got_IsInSync },
5242         [P_DELAY_PROBE]     = { sizeof(struct p_delay_probe93), got_skip },
5243         [P_RS_CANCEL]       = { sizeof(struct p_block_ack), got_NegRSDReply },
5244         [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5245         [P_RETRY_WRITE]     = { sizeof(struct p_block_ack), got_BlockAck },
5246 };
5247
5248 int drbd_asender(struct drbd_thread *thi)
5249 {
5250         struct drbd_tconn *tconn = thi->tconn;
5251         struct asender_cmd *cmd = NULL;
5252         struct packet_info pi;
5253         int rv;
5254         void *buf    = tconn->meta.rbuf;
5255         int received = 0;
5256         unsigned int header_size = drbd_header_size(tconn);
5257         int expect   = header_size;
5258         bool ping_timeout_active = false;
5259         struct net_conf *nc;
5260         int ping_timeo, tcp_cork, ping_int;
5261
5262         current->policy = SCHED_RR;  /* Make this a realtime task! */
5263         current->rt_priority = 2;    /* more important than all other tasks */
5264
5265         while (get_t_state(thi) == RUNNING) {
5266                 drbd_thread_current_set_cpu(thi);
5267
5268                 rcu_read_lock();
5269                 nc = rcu_dereference(tconn->net_conf);
5270                 ping_timeo = nc->ping_timeo;
5271                 tcp_cork = nc->tcp_cork;
5272                 ping_int = nc->ping_int;
5273                 rcu_read_unlock();
5274
5275                 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
5276                         if (drbd_send_ping(tconn)) {
5277                                 conn_err(tconn, "drbd_send_ping has failed\n");
5278                                 goto reconnect;
5279                         }
5280                         tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5281                         ping_timeout_active = true;
5282                 }
5283
5284                 /* TODO: conditionally cork; it may hurt latency if we cork without
5285                    much to send */
5286                 if (tcp_cork)
5287                         drbd_tcp_cork(tconn->meta.socket);
5288                 if (tconn_finish_peer_reqs(tconn)) {
5289                         conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
5290                         goto reconnect;
5291                 }
5292                 /* but unconditionally uncork unless disabled */
5293                 if (tcp_cork)
5294                         drbd_tcp_uncork(tconn->meta.socket);
5295
5296                 /* short circuit, recv_msg would return EINTR anyways. */
5297                 if (signal_pending(current))
5298                         continue;
5299
5300                 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5301                 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5302
5303                 flush_signals(current);
5304
5305                 /* Note:
5306                  * -EINTR        (on meta) we got a signal
5307                  * -EAGAIN       (on meta) rcvtimeo expired
5308                  * -ECONNRESET   other side closed the connection
5309                  * -ERESTARTSYS  (on data) we got a signal
5310                  * rv <  0       other than above: unexpected error!
5311                  * rv == expected: full header or command
5312                  * rv <  expected: "woken" by signal during receive
5313                  * rv == 0       : "connection shut down by peer"
5314                  */
5315                 if (likely(rv > 0)) {
5316                         received += rv;
5317                         buf      += rv;
5318                 } else if (rv == 0) {
5319                         if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
5320                                 long t;
5321                                 rcu_read_lock();
5322                                 t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
5323                                 rcu_read_unlock();
5324
5325                                 t = wait_event_timeout(tconn->ping_wait,
5326                                                        tconn->cstate < C_WF_REPORT_PARAMS,
5327                                                        t);
5328                                 if (t)
5329                                         break;
5330                         }
5331                         conn_err(tconn, "meta connection shut down by peer.\n");
5332                         goto reconnect;
5333                 } else if (rv == -EAGAIN) {
5334                         /* If the data socket received something meanwhile,
5335                          * that is good enough: peer is still alive. */
5336                         if (time_after(tconn->last_received,
5337                                 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
5338                                 continue;
5339                         if (ping_timeout_active) {
5340                                 conn_err(tconn, "PingAck did not arrive in time.\n");
5341                                 goto reconnect;
5342                         }
5343                         set_bit(SEND_PING, &tconn->flags);
5344                         continue;
5345                 } else if (rv == -EINTR) {
5346                         continue;
5347                 } else {
5348                         conn_err(tconn, "sock_recvmsg returned %d\n", rv);
5349                         goto reconnect;
5350                 }
5351
5352                 if (received == expect && cmd == NULL) {
5353                         if (decode_header(tconn, tconn->meta.rbuf, &pi))
5354                                 goto reconnect;
5355                         cmd = &asender_tbl[pi.cmd];
5356                         if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
5357                                 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5358                                          cmdname(pi.cmd), pi.cmd);
5359                                 goto disconnect;
5360                         }
5361                         expect = header_size + cmd->pkt_size;
5362                         if (pi.size != expect - header_size) {
5363                                 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
5364                                         pi.cmd, pi.size);
5365                                 goto reconnect;
5366                         }
5367                 }
5368                 if (received == expect) {
5369                         bool err;
5370
5371                         err = cmd->fn(tconn, &pi);
5372                         if (err) {
5373                                 conn_err(tconn, "%pf failed\n", cmd->fn);
5374                                 goto reconnect;
5375                         }
5376
5377                         tconn->last_received = jiffies;
5378
5379                         if (cmd == &asender_tbl[P_PING_ACK]) {
5380                                 /* restore idle timeout */
5381                                 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5382                                 ping_timeout_active = false;
5383                         }
5384
5385                         buf      = tconn->meta.rbuf;
5386                         received = 0;
5387                         expect   = header_size;
5388                         cmd      = NULL;
5389                 }
5390         }
5391
5392         if (0) {
5393 reconnect:
5394                 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5395                 conn_md_sync(tconn);
5396         }
5397         if (0) {
5398 disconnect:
5399                 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
5400         }
5401         clear_bit(SIGNAL_ASENDER, &tconn->flags);
5402
5403         conn_info(tconn, "asender terminated\n");
5404
5405         return 0;
5406 }