Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[platform/adaptation/renesas_rcar/renesas_kernel.git] / net / rds / ib_recv.c
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <rdma/rdma_cm.h>
38
39 #include "rds.h"
40 #include "ib.h"
41
42 static struct kmem_cache *rds_ib_incoming_slab;
43 static struct kmem_cache *rds_ib_frag_slab;
44 static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
45
46 void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
47 {
48         struct rds_ib_recv_work *recv;
49         u32 i;
50
51         for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
52                 struct ib_sge *sge;
53
54                 recv->r_ibinc = NULL;
55                 recv->r_frag = NULL;
56
57                 recv->r_wr.next = NULL;
58                 recv->r_wr.wr_id = i;
59                 recv->r_wr.sg_list = recv->r_sge;
60                 recv->r_wr.num_sge = RDS_IB_RECV_SGE;
61
62                 sge = &recv->r_sge[0];
63                 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
64                 sge->length = sizeof(struct rds_header);
65                 sge->lkey = ic->i_mr->lkey;
66
67                 sge = &recv->r_sge[1];
68                 sge->addr = 0;
69                 sge->length = RDS_FRAG_SIZE;
70                 sge->lkey = ic->i_mr->lkey;
71         }
72 }
73
74 /*
75  * The entire 'from' list, including the from element itself, is put on
76  * to the tail of the 'to' list.
77  */
78 static void list_splice_entire_tail(struct list_head *from,
79                                     struct list_head *to)
80 {
81         struct list_head *from_last = from->prev;
82
83         list_splice_tail(from_last, to);
84         list_add_tail(from_last, to);
85 }
86
87 static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
88 {
89         struct list_head *tmp;
90
91         tmp = xchg(&cache->xfer, NULL);
92         if (tmp) {
93                 if (cache->ready)
94                         list_splice_entire_tail(tmp, cache->ready);
95                 else
96                         cache->ready = tmp;
97         }
98 }
99
100 static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
101 {
102         struct rds_ib_cache_head *head;
103         int cpu;
104
105         cache->percpu = alloc_percpu(struct rds_ib_cache_head);
106         if (!cache->percpu)
107                return -ENOMEM;
108
109         for_each_possible_cpu(cpu) {
110                 head = per_cpu_ptr(cache->percpu, cpu);
111                 head->first = NULL;
112                 head->count = 0;
113         }
114         cache->xfer = NULL;
115         cache->ready = NULL;
116
117         return 0;
118 }
119
120 int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
121 {
122         int ret;
123
124         ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
125         if (!ret) {
126                 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
127                 if (ret)
128                         free_percpu(ic->i_cache_incs.percpu);
129         }
130
131         return ret;
132 }
133
134 static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
135                                           struct list_head *caller_list)
136 {
137         struct rds_ib_cache_head *head;
138         int cpu;
139
140         for_each_possible_cpu(cpu) {
141                 head = per_cpu_ptr(cache->percpu, cpu);
142                 if (head->first) {
143                         list_splice_entire_tail(head->first, caller_list);
144                         head->first = NULL;
145                 }
146         }
147
148         if (cache->ready) {
149                 list_splice_entire_tail(cache->ready, caller_list);
150                 cache->ready = NULL;
151         }
152 }
153
154 void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
155 {
156         struct rds_ib_incoming *inc;
157         struct rds_ib_incoming *inc_tmp;
158         struct rds_page_frag *frag;
159         struct rds_page_frag *frag_tmp;
160         LIST_HEAD(list);
161
162         rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
163         rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
164         free_percpu(ic->i_cache_incs.percpu);
165
166         list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
167                 list_del(&inc->ii_cache_entry);
168                 WARN_ON(!list_empty(&inc->ii_frags));
169                 kmem_cache_free(rds_ib_incoming_slab, inc);
170         }
171
172         rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
173         rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
174         free_percpu(ic->i_cache_frags.percpu);
175
176         list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
177                 list_del(&frag->f_cache_entry);
178                 WARN_ON(!list_empty(&frag->f_item));
179                 kmem_cache_free(rds_ib_frag_slab, frag);
180         }
181 }
182
183 /* fwd decl */
184 static void rds_ib_recv_cache_put(struct list_head *new_item,
185                                   struct rds_ib_refill_cache *cache);
186 static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
187
188
189 /* Recycle frag and attached recv buffer f_sg */
190 static void rds_ib_frag_free(struct rds_ib_connection *ic,
191                              struct rds_page_frag *frag)
192 {
193         rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
194
195         rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
196 }
197
198 /* Recycle inc after freeing attached frags */
199 void rds_ib_inc_free(struct rds_incoming *inc)
200 {
201         struct rds_ib_incoming *ibinc;
202         struct rds_page_frag *frag;
203         struct rds_page_frag *pos;
204         struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
205
206         ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
207
208         /* Free attached frags */
209         list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
210                 list_del_init(&frag->f_item);
211                 rds_ib_frag_free(ic, frag);
212         }
213         BUG_ON(!list_empty(&ibinc->ii_frags));
214
215         rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
216         rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
217 }
218
219 static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
220                                   struct rds_ib_recv_work *recv)
221 {
222         if (recv->r_ibinc) {
223                 rds_inc_put(&recv->r_ibinc->ii_inc);
224                 recv->r_ibinc = NULL;
225         }
226         if (recv->r_frag) {
227                 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
228                 rds_ib_frag_free(ic, recv->r_frag);
229                 recv->r_frag = NULL;
230         }
231 }
232
233 void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
234 {
235         u32 i;
236
237         for (i = 0; i < ic->i_recv_ring.w_nr; i++)
238                 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
239 }
240
241 static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
242                                                      gfp_t slab_mask)
243 {
244         struct rds_ib_incoming *ibinc;
245         struct list_head *cache_item;
246         int avail_allocs;
247
248         cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
249         if (cache_item) {
250                 ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
251         } else {
252                 avail_allocs = atomic_add_unless(&rds_ib_allocation,
253                                                  1, rds_ib_sysctl_max_recv_allocation);
254                 if (!avail_allocs) {
255                         rds_ib_stats_inc(s_ib_rx_alloc_limit);
256                         return NULL;
257                 }
258                 ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
259                 if (!ibinc) {
260                         atomic_dec(&rds_ib_allocation);
261                         return NULL;
262                 }
263         }
264         INIT_LIST_HEAD(&ibinc->ii_frags);
265         rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
266
267         return ibinc;
268 }
269
270 static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
271                                                     gfp_t slab_mask, gfp_t page_mask)
272 {
273         struct rds_page_frag *frag;
274         struct list_head *cache_item;
275         int ret;
276
277         cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
278         if (cache_item) {
279                 frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
280         } else {
281                 frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
282                 if (!frag)
283                         return NULL;
284
285                 sg_init_table(&frag->f_sg, 1);
286                 ret = rds_page_remainder_alloc(&frag->f_sg,
287                                                RDS_FRAG_SIZE, page_mask);
288                 if (ret) {
289                         kmem_cache_free(rds_ib_frag_slab, frag);
290                         return NULL;
291                 }
292         }
293
294         INIT_LIST_HEAD(&frag->f_item);
295
296         return frag;
297 }
298
299 static int rds_ib_recv_refill_one(struct rds_connection *conn,
300                                   struct rds_ib_recv_work *recv, int prefill)
301 {
302         struct rds_ib_connection *ic = conn->c_transport_data;
303         struct ib_sge *sge;
304         int ret = -ENOMEM;
305         gfp_t slab_mask = GFP_NOWAIT;
306         gfp_t page_mask = GFP_NOWAIT;
307
308         if (prefill) {
309                 slab_mask = GFP_KERNEL;
310                 page_mask = GFP_HIGHUSER;
311         }
312
313         if (!ic->i_cache_incs.ready)
314                 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
315         if (!ic->i_cache_frags.ready)
316                 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
317
318         /*
319          * ibinc was taken from recv if recv contained the start of a message.
320          * recvs that were continuations will still have this allocated.
321          */
322         if (!recv->r_ibinc) {
323                 recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
324                 if (!recv->r_ibinc)
325                         goto out;
326         }
327
328         WARN_ON(recv->r_frag); /* leak! */
329         recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
330         if (!recv->r_frag)
331                 goto out;
332
333         ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
334                             1, DMA_FROM_DEVICE);
335         WARN_ON(ret != 1);
336
337         sge = &recv->r_sge[0];
338         sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
339         sge->length = sizeof(struct rds_header);
340
341         sge = &recv->r_sge[1];
342         sge->addr = ib_sg_dma_address(ic->i_cm_id->device, &recv->r_frag->f_sg);
343         sge->length = ib_sg_dma_len(ic->i_cm_id->device, &recv->r_frag->f_sg);
344
345         ret = 0;
346 out:
347         return ret;
348 }
349
350 /*
351  * This tries to allocate and post unused work requests after making sure that
352  * they have all the allocations they need to queue received fragments into
353  * sockets.
354  *
355  * -1 is returned if posting fails due to temporary resource exhaustion.
356  */
357 void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
358 {
359         struct rds_ib_connection *ic = conn->c_transport_data;
360         struct rds_ib_recv_work *recv;
361         struct ib_recv_wr *failed_wr;
362         unsigned int posted = 0;
363         int ret = 0;
364         u32 pos;
365
366         while ((prefill || rds_conn_up(conn)) &&
367                rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
368                 if (pos >= ic->i_recv_ring.w_nr) {
369                         printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
370                                         pos);
371                         break;
372                 }
373
374                 recv = &ic->i_recvs[pos];
375                 ret = rds_ib_recv_refill_one(conn, recv, prefill);
376                 if (ret) {
377                         break;
378                 }
379
380                 /* XXX when can this fail? */
381                 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
382                 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
383                          recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
384                          (long) ib_sg_dma_address(
385                                 ic->i_cm_id->device,
386                                 &recv->r_frag->f_sg),
387                         ret);
388                 if (ret) {
389                         rds_ib_conn_error(conn, "recv post on "
390                                "%pI4 returned %d, disconnecting and "
391                                "reconnecting\n", &conn->c_faddr,
392                                ret);
393                         break;
394                 }
395
396                 posted++;
397         }
398
399         /* We're doing flow control - update the window. */
400         if (ic->i_flowctl && posted)
401                 rds_ib_advertise_credits(conn, posted);
402
403         if (ret)
404                 rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
405 }
406
407 /*
408  * We want to recycle several types of recv allocations, like incs and frags.
409  * To use this, the *_free() function passes in the ptr to a list_head within
410  * the recyclee, as well as the cache to put it on.
411  *
412  * First, we put the memory on a percpu list. When this reaches a certain size,
413  * We move it to an intermediate non-percpu list in a lockless manner, with some
414  * xchg/compxchg wizardry.
415  *
416  * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
417  * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
418  * list_empty() will return true with one element is actually present.
419  */
420 static void rds_ib_recv_cache_put(struct list_head *new_item,
421                                  struct rds_ib_refill_cache *cache)
422 {
423         unsigned long flags;
424         struct list_head *old;
425         struct list_head __percpu *chpfirst;
426
427         local_irq_save(flags);
428
429         chpfirst = __this_cpu_read(cache->percpu->first);
430         if (!chpfirst)
431                 INIT_LIST_HEAD(new_item);
432         else /* put on front */
433                 list_add_tail(new_item, chpfirst);
434
435         __this_cpu_write(chpfirst, new_item);
436         __this_cpu_inc(cache->percpu->count);
437
438         if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT)
439                 goto end;
440
441         /*
442          * Return our per-cpu first list to the cache's xfer by atomically
443          * grabbing the current xfer list, appending it to our per-cpu list,
444          * and then atomically returning that entire list back to the
445          * cache's xfer list as long as it's still empty.
446          */
447         do {
448                 old = xchg(&cache->xfer, NULL);
449                 if (old)
450                         list_splice_entire_tail(old, chpfirst);
451                 old = cmpxchg(&cache->xfer, NULL, chpfirst);
452         } while (old);
453
454
455         __this_cpu_write(chpfirst, NULL);
456         __this_cpu_write(cache->percpu->count, 0);
457 end:
458         local_irq_restore(flags);
459 }
460
461 static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
462 {
463         struct list_head *head = cache->ready;
464
465         if (head) {
466                 if (!list_empty(head)) {
467                         cache->ready = head->next;
468                         list_del_init(head);
469                 } else
470                         cache->ready = NULL;
471         }
472
473         return head;
474 }
475
476 int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
477                             size_t size)
478 {
479         struct rds_ib_incoming *ibinc;
480         struct rds_page_frag *frag;
481         struct iovec *iov = first_iov;
482         unsigned long to_copy;
483         unsigned long frag_off = 0;
484         unsigned long iov_off = 0;
485         int copied = 0;
486         int ret;
487         u32 len;
488
489         ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
490         frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
491         len = be32_to_cpu(inc->i_hdr.h_len);
492
493         while (copied < size && copied < len) {
494                 if (frag_off == RDS_FRAG_SIZE) {
495                         frag = list_entry(frag->f_item.next,
496                                           struct rds_page_frag, f_item);
497                         frag_off = 0;
498                 }
499                 while (iov_off == iov->iov_len) {
500                         iov_off = 0;
501                         iov++;
502                 }
503
504                 to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
505                 to_copy = min_t(size_t, to_copy, size - copied);
506                 to_copy = min_t(unsigned long, to_copy, len - copied);
507
508                 rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
509                          "[%p, %u] + %lu\n",
510                          to_copy, iov->iov_base, iov->iov_len, iov_off,
511                          sg_page(&frag->f_sg), frag->f_sg.offset, frag_off);
512
513                 /* XXX needs + offset for multiple recvs per page */
514                 ret = rds_page_copy_to_user(sg_page(&frag->f_sg),
515                                             frag->f_sg.offset + frag_off,
516                                             iov->iov_base + iov_off,
517                                             to_copy);
518                 if (ret) {
519                         copied = ret;
520                         break;
521                 }
522
523                 iov_off += to_copy;
524                 frag_off += to_copy;
525                 copied += to_copy;
526         }
527
528         return copied;
529 }
530
531 /* ic starts out kzalloc()ed */
532 void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
533 {
534         struct ib_send_wr *wr = &ic->i_ack_wr;
535         struct ib_sge *sge = &ic->i_ack_sge;
536
537         sge->addr = ic->i_ack_dma;
538         sge->length = sizeof(struct rds_header);
539         sge->lkey = ic->i_mr->lkey;
540
541         wr->sg_list = sge;
542         wr->num_sge = 1;
543         wr->opcode = IB_WR_SEND;
544         wr->wr_id = RDS_IB_ACK_WR_ID;
545         wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
546 }
547
548 /*
549  * You'd think that with reliable IB connections you wouldn't need to ack
550  * messages that have been received.  The problem is that IB hardware generates
551  * an ack message before it has DMAed the message into memory.  This creates a
552  * potential message loss if the HCA is disabled for any reason between when it
553  * sends the ack and before the message is DMAed and processed.  This is only a
554  * potential issue if another HCA is available for fail-over.
555  *
556  * When the remote host receives our ack they'll free the sent message from
557  * their send queue.  To decrease the latency of this we always send an ack
558  * immediately after we've received messages.
559  *
560  * For simplicity, we only have one ack in flight at a time.  This puts
561  * pressure on senders to have deep enough send queues to absorb the latency of
562  * a single ack frame being in flight.  This might not be good enough.
563  *
564  * This is implemented by have a long-lived send_wr and sge which point to a
565  * statically allocated ack frame.  This ack wr does not fall under the ring
566  * accounting that the tx and rx wrs do.  The QP attribute specifically makes
567  * room for it beyond the ring size.  Send completion notices its special
568  * wr_id and avoids working with the ring in that case.
569  */
570 #ifndef KERNEL_HAS_ATOMIC64
571 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
572                                 int ack_required)
573 {
574         unsigned long flags;
575
576         spin_lock_irqsave(&ic->i_ack_lock, flags);
577         ic->i_ack_next = seq;
578         if (ack_required)
579                 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
580         spin_unlock_irqrestore(&ic->i_ack_lock, flags);
581 }
582
583 static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
584 {
585         unsigned long flags;
586         u64 seq;
587
588         clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
589
590         spin_lock_irqsave(&ic->i_ack_lock, flags);
591         seq = ic->i_ack_next;
592         spin_unlock_irqrestore(&ic->i_ack_lock, flags);
593
594         return seq;
595 }
596 #else
597 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
598                                 int ack_required)
599 {
600         atomic64_set(&ic->i_ack_next, seq);
601         if (ack_required) {
602                 smp_mb__before_clear_bit();
603                 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
604         }
605 }
606
607 static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
608 {
609         clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
610         smp_mb__after_clear_bit();
611
612         return atomic64_read(&ic->i_ack_next);
613 }
614 #endif
615
616
617 static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
618 {
619         struct rds_header *hdr = ic->i_ack;
620         struct ib_send_wr *failed_wr;
621         u64 seq;
622         int ret;
623
624         seq = rds_ib_get_ack(ic);
625
626         rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
627         rds_message_populate_header(hdr, 0, 0, 0);
628         hdr->h_ack = cpu_to_be64(seq);
629         hdr->h_credit = adv_credits;
630         rds_message_make_checksum(hdr);
631         ic->i_ack_queued = jiffies;
632
633         ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
634         if (unlikely(ret)) {
635                 /* Failed to send. Release the WR, and
636                  * force another ACK.
637                  */
638                 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
639                 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
640
641                 rds_ib_stats_inc(s_ib_ack_send_failure);
642
643                 rds_ib_conn_error(ic->conn, "sending ack failed\n");
644         } else
645                 rds_ib_stats_inc(s_ib_ack_sent);
646 }
647
648 /*
649  * There are 3 ways of getting acknowledgements to the peer:
650  *  1.  We call rds_ib_attempt_ack from the recv completion handler
651  *      to send an ACK-only frame.
652  *      However, there can be only one such frame in the send queue
653  *      at any time, so we may have to postpone it.
654  *  2.  When another (data) packet is transmitted while there's
655  *      an ACK in the queue, we piggyback the ACK sequence number
656  *      on the data packet.
657  *  3.  If the ACK WR is done sending, we get called from the
658  *      send queue completion handler, and check whether there's
659  *      another ACK pending (postponed because the WR was on the
660  *      queue). If so, we transmit it.
661  *
662  * We maintain 2 variables:
663  *  -   i_ack_flags, which keeps track of whether the ACK WR
664  *      is currently in the send queue or not (IB_ACK_IN_FLIGHT)
665  *  -   i_ack_next, which is the last sequence number we received
666  *
667  * Potentially, send queue and receive queue handlers can run concurrently.
668  * It would be nice to not have to use a spinlock to synchronize things,
669  * but the one problem that rules this out is that 64bit updates are
670  * not atomic on all platforms. Things would be a lot simpler if
671  * we had atomic64 or maybe cmpxchg64 everywhere.
672  *
673  * Reconnecting complicates this picture just slightly. When we
674  * reconnect, we may be seeing duplicate packets. The peer
675  * is retransmitting them, because it hasn't seen an ACK for
676  * them. It is important that we ACK these.
677  *
678  * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
679  * this flag set *MUST* be acknowledged immediately.
680  */
681
682 /*
683  * When we get here, we're called from the recv queue handler.
684  * Check whether we ought to transmit an ACK.
685  */
686 void rds_ib_attempt_ack(struct rds_ib_connection *ic)
687 {
688         unsigned int adv_credits;
689
690         if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
691                 return;
692
693         if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
694                 rds_ib_stats_inc(s_ib_ack_send_delayed);
695                 return;
696         }
697
698         /* Can we get a send credit? */
699         if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
700                 rds_ib_stats_inc(s_ib_tx_throttle);
701                 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
702                 return;
703         }
704
705         clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
706         rds_ib_send_ack(ic, adv_credits);
707 }
708
709 /*
710  * We get here from the send completion handler, when the
711  * adapter tells us the ACK frame was sent.
712  */
713 void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
714 {
715         clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
716         rds_ib_attempt_ack(ic);
717 }
718
719 /*
720  * This is called by the regular xmit code when it wants to piggyback
721  * an ACK on an outgoing frame.
722  */
723 u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
724 {
725         if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
726                 rds_ib_stats_inc(s_ib_ack_send_piggybacked);
727         return rds_ib_get_ack(ic);
728 }
729
730 /*
731  * It's kind of lame that we're copying from the posted receive pages into
732  * long-lived bitmaps.  We could have posted the bitmaps and rdma written into
733  * them.  But receiving new congestion bitmaps should be a *rare* event, so
734  * hopefully we won't need to invest that complexity in making it more
735  * efficient.  By copying we can share a simpler core with TCP which has to
736  * copy.
737  */
738 static void rds_ib_cong_recv(struct rds_connection *conn,
739                               struct rds_ib_incoming *ibinc)
740 {
741         struct rds_cong_map *map;
742         unsigned int map_off;
743         unsigned int map_page;
744         struct rds_page_frag *frag;
745         unsigned long frag_off;
746         unsigned long to_copy;
747         unsigned long copied;
748         uint64_t uncongested = 0;
749         void *addr;
750
751         /* catch completely corrupt packets */
752         if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
753                 return;
754
755         map = conn->c_fcong;
756         map_page = 0;
757         map_off = 0;
758
759         frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
760         frag_off = 0;
761
762         copied = 0;
763
764         while (copied < RDS_CONG_MAP_BYTES) {
765                 uint64_t *src, *dst;
766                 unsigned int k;
767
768                 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
769                 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
770
771                 addr = kmap_atomic(sg_page(&frag->f_sg));
772
773                 src = addr + frag_off;
774                 dst = (void *)map->m_page_addrs[map_page] + map_off;
775                 for (k = 0; k < to_copy; k += 8) {
776                         /* Record ports that became uncongested, ie
777                          * bits that changed from 0 to 1. */
778                         uncongested |= ~(*src) & *dst;
779                         *dst++ = *src++;
780                 }
781                 kunmap_atomic(addr);
782
783                 copied += to_copy;
784
785                 map_off += to_copy;
786                 if (map_off == PAGE_SIZE) {
787                         map_off = 0;
788                         map_page++;
789                 }
790
791                 frag_off += to_copy;
792                 if (frag_off == RDS_FRAG_SIZE) {
793                         frag = list_entry(frag->f_item.next,
794                                           struct rds_page_frag, f_item);
795                         frag_off = 0;
796                 }
797         }
798
799         /* the congestion map is in little endian order */
800         uncongested = le64_to_cpu(uncongested);
801
802         rds_cong_map_updated(map, uncongested);
803 }
804
805 /*
806  * Rings are posted with all the allocations they'll need to queue the
807  * incoming message to the receiving socket so this can't fail.
808  * All fragments start with a header, so we can make sure we're not receiving
809  * garbage, and we can tell a small 8 byte fragment from an ACK frame.
810  */
811 struct rds_ib_ack_state {
812         u64             ack_next;
813         u64             ack_recv;
814         unsigned int    ack_required:1;
815         unsigned int    ack_next_valid:1;
816         unsigned int    ack_recv_valid:1;
817 };
818
819 static void rds_ib_process_recv(struct rds_connection *conn,
820                                 struct rds_ib_recv_work *recv, u32 data_len,
821                                 struct rds_ib_ack_state *state)
822 {
823         struct rds_ib_connection *ic = conn->c_transport_data;
824         struct rds_ib_incoming *ibinc = ic->i_ibinc;
825         struct rds_header *ihdr, *hdr;
826
827         /* XXX shut down the connection if port 0,0 are seen? */
828
829         rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
830                  data_len);
831
832         if (data_len < sizeof(struct rds_header)) {
833                 rds_ib_conn_error(conn, "incoming message "
834                        "from %pI4 didn't include a "
835                        "header, disconnecting and "
836                        "reconnecting\n",
837                        &conn->c_faddr);
838                 return;
839         }
840         data_len -= sizeof(struct rds_header);
841
842         ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
843
844         /* Validate the checksum. */
845         if (!rds_message_verify_checksum(ihdr)) {
846                 rds_ib_conn_error(conn, "incoming message "
847                        "from %pI4 has corrupted header - "
848                        "forcing a reconnect\n",
849                        &conn->c_faddr);
850                 rds_stats_inc(s_recv_drop_bad_checksum);
851                 return;
852         }
853
854         /* Process the ACK sequence which comes with every packet */
855         state->ack_recv = be64_to_cpu(ihdr->h_ack);
856         state->ack_recv_valid = 1;
857
858         /* Process the credits update if there was one */
859         if (ihdr->h_credit)
860                 rds_ib_send_add_credits(conn, ihdr->h_credit);
861
862         if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
863                 /* This is an ACK-only packet. The fact that it gets
864                  * special treatment here is that historically, ACKs
865                  * were rather special beasts.
866                  */
867                 rds_ib_stats_inc(s_ib_ack_received);
868
869                 /*
870                  * Usually the frags make their way on to incs and are then freed as
871                  * the inc is freed.  We don't go that route, so we have to drop the
872                  * page ref ourselves.  We can't just leave the page on the recv
873                  * because that confuses the dma mapping of pages and each recv's use
874                  * of a partial page.
875                  *
876                  * FIXME: Fold this into the code path below.
877                  */
878                 rds_ib_frag_free(ic, recv->r_frag);
879                 recv->r_frag = NULL;
880                 return;
881         }
882
883         /*
884          * If we don't already have an inc on the connection then this
885          * fragment has a header and starts a message.. copy its header
886          * into the inc and save the inc so we can hang upcoming fragments
887          * off its list.
888          */
889         if (!ibinc) {
890                 ibinc = recv->r_ibinc;
891                 recv->r_ibinc = NULL;
892                 ic->i_ibinc = ibinc;
893
894                 hdr = &ibinc->ii_inc.i_hdr;
895                 memcpy(hdr, ihdr, sizeof(*hdr));
896                 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
897
898                 rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
899                          ic->i_recv_data_rem, hdr->h_flags);
900         } else {
901                 hdr = &ibinc->ii_inc.i_hdr;
902                 /* We can't just use memcmp here; fragments of a
903                  * single message may carry different ACKs */
904                 if (hdr->h_sequence != ihdr->h_sequence ||
905                     hdr->h_len != ihdr->h_len ||
906                     hdr->h_sport != ihdr->h_sport ||
907                     hdr->h_dport != ihdr->h_dport) {
908                         rds_ib_conn_error(conn,
909                                 "fragment header mismatch; forcing reconnect\n");
910                         return;
911                 }
912         }
913
914         list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
915         recv->r_frag = NULL;
916
917         if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
918                 ic->i_recv_data_rem -= RDS_FRAG_SIZE;
919         else {
920                 ic->i_recv_data_rem = 0;
921                 ic->i_ibinc = NULL;
922
923                 if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
924                         rds_ib_cong_recv(conn, ibinc);
925                 else {
926                         rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
927                                           &ibinc->ii_inc, GFP_ATOMIC);
928                         state->ack_next = be64_to_cpu(hdr->h_sequence);
929                         state->ack_next_valid = 1;
930                 }
931
932                 /* Evaluate the ACK_REQUIRED flag *after* we received
933                  * the complete frame, and after bumping the next_rx
934                  * sequence. */
935                 if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
936                         rds_stats_inc(s_recv_ack_required);
937                         state->ack_required = 1;
938                 }
939
940                 rds_inc_put(&ibinc->ii_inc);
941         }
942 }
943
944 /*
945  * Plucking the oldest entry from the ring can be done concurrently with
946  * the thread refilling the ring.  Each ring operation is protected by
947  * spinlocks and the transient state of refilling doesn't change the
948  * recording of which entry is oldest.
949  *
950  * This relies on IB only calling one cq comp_handler for each cq so that
951  * there will only be one caller of rds_recv_incoming() per RDS connection.
952  */
953 void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
954 {
955         struct rds_connection *conn = context;
956         struct rds_ib_connection *ic = conn->c_transport_data;
957
958         rdsdebug("conn %p cq %p\n", conn, cq);
959
960         rds_ib_stats_inc(s_ib_rx_cq_call);
961
962         tasklet_schedule(&ic->i_recv_tasklet);
963 }
964
965 static inline void rds_poll_cq(struct rds_ib_connection *ic,
966                                struct rds_ib_ack_state *state)
967 {
968         struct rds_connection *conn = ic->conn;
969         struct ib_wc wc;
970         struct rds_ib_recv_work *recv;
971
972         while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
973                 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
974                          (unsigned long long)wc.wr_id, wc.status,
975                          rds_ib_wc_status_str(wc.status), wc.byte_len,
976                          be32_to_cpu(wc.ex.imm_data));
977                 rds_ib_stats_inc(s_ib_rx_cq_event);
978
979                 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
980
981                 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
982
983                 /*
984                  * Also process recvs in connecting state because it is possible
985                  * to get a recv completion _before_ the rdmacm ESTABLISHED
986                  * event is processed.
987                  */
988                 if (wc.status == IB_WC_SUCCESS) {
989                         rds_ib_process_recv(conn, recv, wc.byte_len, state);
990                 } else {
991                         /* We expect errors as the qp is drained during shutdown */
992                         if (rds_conn_up(conn) || rds_conn_connecting(conn))
993                                 rds_ib_conn_error(conn, "recv completion on %pI4 had "
994                                                   "status %u (%s), disconnecting and "
995                                                   "reconnecting\n", &conn->c_faddr,
996                                                   wc.status,
997                                                   rds_ib_wc_status_str(wc.status));
998                 }
999
1000                 /*
1001                  * It's very important that we only free this ring entry if we've truly
1002                  * freed the resources allocated to the entry.  The refilling path can
1003                  * leak if we don't.
1004                  */
1005                 rds_ib_ring_free(&ic->i_recv_ring, 1);
1006         }
1007 }
1008
1009 void rds_ib_recv_tasklet_fn(unsigned long data)
1010 {
1011         struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
1012         struct rds_connection *conn = ic->conn;
1013         struct rds_ib_ack_state state = { 0, };
1014
1015         rds_poll_cq(ic, &state);
1016         ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
1017         rds_poll_cq(ic, &state);
1018
1019         if (state.ack_next_valid)
1020                 rds_ib_set_ack(ic, state.ack_next, state.ack_required);
1021         if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
1022                 rds_send_drop_acked(conn, state.ack_recv, NULL);
1023                 ic->i_ack_recv = state.ack_recv;
1024         }
1025         if (rds_conn_up(conn))
1026                 rds_ib_attempt_ack(ic);
1027
1028         /* If we ever end up with a really empty receive ring, we're
1029          * in deep trouble, as the sender will definitely see RNR
1030          * timeouts. */
1031         if (rds_ib_ring_empty(&ic->i_recv_ring))
1032                 rds_ib_stats_inc(s_ib_rx_ring_empty);
1033
1034         if (rds_ib_ring_low(&ic->i_recv_ring))
1035                 rds_ib_recv_refill(conn, 0);
1036 }
1037
1038 int rds_ib_recv(struct rds_connection *conn)
1039 {
1040         struct rds_ib_connection *ic = conn->c_transport_data;
1041         int ret = 0;
1042
1043         rdsdebug("conn %p\n", conn);
1044         if (rds_conn_up(conn))
1045                 rds_ib_attempt_ack(ic);
1046
1047         return ret;
1048 }
1049
1050 int rds_ib_recv_init(void)
1051 {
1052         struct sysinfo si;
1053         int ret = -ENOMEM;
1054
1055         /* Default to 30% of all available RAM for recv memory */
1056         si_meminfo(&si);
1057         rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
1058
1059         rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
1060                                         sizeof(struct rds_ib_incoming),
1061                                         0, SLAB_HWCACHE_ALIGN, NULL);
1062         if (!rds_ib_incoming_slab)
1063                 goto out;
1064
1065         rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
1066                                         sizeof(struct rds_page_frag),
1067                                         0, SLAB_HWCACHE_ALIGN, NULL);
1068         if (!rds_ib_frag_slab)
1069                 kmem_cache_destroy(rds_ib_incoming_slab);
1070         else
1071                 ret = 0;
1072 out:
1073         return ret;
1074 }
1075
1076 void rds_ib_recv_exit(void)
1077 {
1078         kmem_cache_destroy(rds_ib_incoming_slab);
1079         kmem_cache_destroy(rds_ib_frag_slab);
1080 }