fcadbb9978ca7a1982a348d8b42be5b4ef437802
[platform/kernel/linux-rpi.git] / drivers / infiniband / hw / hfi1 / user_sdma.c
1 /*
2  * Copyright(c) 2015 - 2017 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 #include <linux/mm.h>
48 #include <linux/types.h>
49 #include <linux/device.h>
50 #include <linux/dmapool.h>
51 #include <linux/slab.h>
52 #include <linux/list.h>
53 #include <linux/highmem.h>
54 #include <linux/io.h>
55 #include <linux/uio.h>
56 #include <linux/rbtree.h>
57 #include <linux/spinlock.h>
58 #include <linux/delay.h>
59 #include <linux/kthread.h>
60 #include <linux/mmu_context.h>
61 #include <linux/module.h>
62 #include <linux/vmalloc.h>
63 #include <linux/string.h>
64
65 #include "hfi.h"
66 #include "sdma.h"
67 #include "user_sdma.h"
68 #include "verbs.h"  /* for the headers */
69 #include "common.h" /* for struct hfi1_tid_info */
70 #include "trace.h"
71 #include "mmu_rb.h"
72
73 static uint hfi1_sdma_comp_ring_size = 128;
74 module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
75 MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
76
77 /* The maximum number of Data io vectors per message/request */
78 #define MAX_VECTORS_PER_REQ 8
79 /*
80  * Maximum number of packet to send from each message/request
81  * before moving to the next one.
82  */
83 #define MAX_PKTS_PER_QUEUE 16
84
85 #define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT))
86
87 #define req_opcode(x) \
88         (((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
89 #define req_version(x) \
90         (((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
91 #define req_iovcnt(x) \
92         (((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK)
93
94 /* Number of BTH.PSN bits used for sequence number in expected rcvs */
95 #define BTH_SEQ_MASK 0x7ffull
96
97 /*
98  * Define fields in the KDETH header so we can update the header
99  * template.
100  */
101 #define KDETH_OFFSET_SHIFT        0
102 #define KDETH_OFFSET_MASK         0x7fff
103 #define KDETH_OM_SHIFT            15
104 #define KDETH_OM_MASK             0x1
105 #define KDETH_TID_SHIFT           16
106 #define KDETH_TID_MASK            0x3ff
107 #define KDETH_TIDCTRL_SHIFT       26
108 #define KDETH_TIDCTRL_MASK        0x3
109 #define KDETH_INTR_SHIFT          28
110 #define KDETH_INTR_MASK           0x1
111 #define KDETH_SH_SHIFT            29
112 #define KDETH_SH_MASK             0x1
113 #define KDETH_HCRC_UPPER_SHIFT    16
114 #define KDETH_HCRC_UPPER_MASK     0xff
115 #define KDETH_HCRC_LOWER_SHIFT    24
116 #define KDETH_HCRC_LOWER_MASK     0xff
117
118 #define AHG_KDETH_INTR_SHIFT 12
119 #define AHG_KDETH_SH_SHIFT   13
120 #define AHG_KDETH_ARRAY_SIZE  9
121
122 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
123 #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
124
125 #define KDETH_GET(val, field)                                           \
126         (((le32_to_cpu((val))) >> KDETH_##field##_SHIFT) & KDETH_##field##_MASK)
127 #define KDETH_SET(dw, field, val) do {                                  \
128                 u32 dwval = le32_to_cpu(dw);                            \
129                 dwval &= ~(KDETH_##field##_MASK << KDETH_##field##_SHIFT); \
130                 dwval |= (((val) & KDETH_##field##_MASK) << \
131                           KDETH_##field##_SHIFT);                       \
132                 dw = cpu_to_le32(dwval);                                \
133         } while (0)
134
135 #define AHG_HEADER_SET(arr, idx, dw, bit, width, value)                 \
136         do {                                                            \
137                 if ((idx) < ARRAY_SIZE((arr)))                          \
138                         (arr)[(idx++)] = sdma_build_ahg_descriptor(     \
139                                 (__force u16)(value), (dw), (bit),      \
140                                                         (width));       \
141                 else                                                    \
142                         return -ERANGE;                                 \
143         } while (0)
144
145 /* KDETH OM multipliers and switch over point */
146 #define KDETH_OM_SMALL     4
147 #define KDETH_OM_SMALL_SHIFT     2
148 #define KDETH_OM_LARGE     64
149 #define KDETH_OM_LARGE_SHIFT     6
150 #define KDETH_OM_MAX_SIZE  (1 << ((KDETH_OM_LARGE / KDETH_OM_SMALL) + 1))
151
152 /* Tx request flag bits */
153 #define TXREQ_FLAGS_REQ_ACK   BIT(0)      /* Set the ACK bit in the header */
154 #define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
155
156 #define SDMA_PKT_Q_INACTIVE BIT(0)
157 #define SDMA_PKT_Q_ACTIVE   BIT(1)
158 #define SDMA_PKT_Q_DEFERRED BIT(2)
159
160 /*
161  * Maximum retry attempts to submit a TX request
162  * before putting the process to sleep.
163  */
164 #define MAX_DEFER_RETRY_COUNT 1
165
166 static unsigned initial_pkt_count = 8;
167
168 #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
169
170 struct sdma_mmu_node;
171
172 struct user_sdma_iovec {
173         struct list_head list;
174         struct iovec iov;
175         /* number of pages in this vector */
176         unsigned npages;
177         /* array of pinned pages for this vector */
178         struct page **pages;
179         /*
180          * offset into the virtual address space of the vector at
181          * which we last left off.
182          */
183         u64 offset;
184         struct sdma_mmu_node *node;
185 };
186
187 struct sdma_mmu_node {
188         struct mmu_rb_node rb;
189         struct hfi1_user_sdma_pkt_q *pq;
190         atomic_t refcount;
191         struct page **pages;
192         unsigned npages;
193 };
194
195 /* evict operation argument */
196 struct evict_data {
197         u32 cleared;    /* count evicted so far */
198         u32 target;     /* target count to evict */
199 };
200
201 struct user_sdma_request {
202         /* This is the original header from user space */
203         struct hfi1_pkt_header hdr;
204
205         /* Read mostly fields */
206         struct hfi1_user_sdma_pkt_q *pq ____cacheline_aligned_in_smp;
207         struct hfi1_user_sdma_comp_q *cq;
208         /*
209          * Pointer to the SDMA engine for this request.
210          * Since different request could be on different VLs,
211          * each request will need it's own engine pointer.
212          */
213         struct sdma_engine *sde;
214         struct sdma_req_info info;
215         /* TID array values copied from the tid_iov vector */
216         u32 *tids;
217         /* total length of the data in the request */
218         u32 data_len;
219         /* number of elements copied to the tids array */
220         u16 n_tids;
221         /*
222          * We copy the iovs for this request (based on
223          * info.iovcnt). These are only the data vectors
224          */
225         u8 data_iovs;
226         s8 ahg_idx;
227
228         /* Writeable fields shared with interrupt */
229         u64 seqcomp ____cacheline_aligned_in_smp;
230         u64 seqsubmitted;
231         /* status of the last txreq completed */
232         int status;
233
234         /* Send side fields */
235         struct list_head txps ____cacheline_aligned_in_smp;
236         u64 seqnum;
237         /*
238          * KDETH.OFFSET (TID) field
239          * The offset can cover multiple packets, depending on the
240          * size of the TID entry.
241          */
242         u32 tidoffset;
243         /*
244          * KDETH.Offset (Eager) field
245          * We need to remember the initial value so the headers
246          * can be updated properly.
247          */
248         u32 koffset;
249         u32 sent;
250         /* TID index copied from the tid_iov vector */
251         u16 tididx;
252         /* progress index moving along the iovs array */
253         u8 iov_idx;
254         u8 done;
255         u8 has_error;
256
257         struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
258 } ____cacheline_aligned_in_smp;
259
260 /*
261  * A single txreq could span up to 3 physical pages when the MTU
262  * is sufficiently large (> 4K). Each of the IOV pointers also
263  * needs it's own set of flags so the vector has been handled
264  * independently of each other.
265  */
266 struct user_sdma_txreq {
267         /* Packet header for the txreq */
268         struct hfi1_pkt_header hdr;
269         struct sdma_txreq txreq;
270         struct list_head list;
271         struct user_sdma_request *req;
272         u16 flags;
273         unsigned busycount;
274         u64 seqnum;
275 };
276
277 #define SDMA_DBG(req, fmt, ...)                              \
278         hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
279                  (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
280                  ##__VA_ARGS__)
281 #define SDMA_Q_DBG(pq, fmt, ...)                         \
282         hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \
283                  (pq)->subctxt, ##__VA_ARGS__)
284
285 static int user_sdma_send_pkts(struct user_sdma_request *req,
286                                unsigned maxpkts);
287 static int num_user_pages(const struct iovec *iov);
288 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
289 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
290 static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
291 static int pin_vector_pages(struct user_sdma_request *req,
292                             struct user_sdma_iovec *iovec);
293 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
294                                unsigned start, unsigned npages);
295 static int check_header_template(struct user_sdma_request *req,
296                                  struct hfi1_pkt_header *hdr, u32 lrhlen,
297                                  u32 datalen);
298 static int set_txreq_header(struct user_sdma_request *req,
299                             struct user_sdma_txreq *tx, u32 datalen);
300 static int set_txreq_header_ahg(struct user_sdma_request *req,
301                                 struct user_sdma_txreq *tx, u32 len);
302 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
303                                   struct hfi1_user_sdma_comp_q *cq,
304                                   u16 idx, enum hfi1_sdma_comp_state state,
305                                   int ret);
306 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags);
307 static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
308
309 static int defer_packet_queue(
310         struct sdma_engine *sde,
311         struct iowait *wait,
312         struct sdma_txreq *txreq,
313         unsigned int seq);
314 static void activate_packet_queue(struct iowait *wait, int reason);
315 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
316                            unsigned long len);
317 static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode);
318 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
319                          void *arg2, bool *stop);
320 static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
321 static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
322
323 static struct mmu_rb_ops sdma_rb_ops = {
324         .filter = sdma_rb_filter,
325         .insert = sdma_rb_insert,
326         .evict = sdma_rb_evict,
327         .remove = sdma_rb_remove,
328         .invalidate = sdma_rb_invalidate
329 };
330
331 static int defer_packet_queue(
332         struct sdma_engine *sde,
333         struct iowait *wait,
334         struct sdma_txreq *txreq,
335         unsigned seq)
336 {
337         struct hfi1_user_sdma_pkt_q *pq =
338                 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
339         struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
340         struct user_sdma_txreq *tx =
341                 container_of(txreq, struct user_sdma_txreq, txreq);
342
343         if (sdma_progress(sde, seq, txreq)) {
344                 if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
345                         goto eagain;
346         }
347         /*
348          * We are assuming that if the list is enqueued somewhere, it
349          * is to the dmawait list since that is the only place where
350          * it is supposed to be enqueued.
351          */
352         xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
353         write_seqlock(&dev->iowait_lock);
354         if (list_empty(&pq->busy.list))
355                 list_add_tail(&pq->busy.list, &sde->dmawait);
356         write_sequnlock(&dev->iowait_lock);
357         return -EBUSY;
358 eagain:
359         return -EAGAIN;
360 }
361
362 static void activate_packet_queue(struct iowait *wait, int reason)
363 {
364         struct hfi1_user_sdma_pkt_q *pq =
365                 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
366         xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
367         wake_up(&wait->wait_dma);
368 };
369
370 static void sdma_kmem_cache_ctor(void *obj)
371 {
372         struct user_sdma_txreq *tx = obj;
373
374         memset(tx, 0, sizeof(*tx));
375 }
376
377 int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
378                                 struct hfi1_filedata *fd)
379 {
380         int ret = -ENOMEM;
381         char buf[64];
382         struct hfi1_devdata *dd;
383         struct hfi1_user_sdma_comp_q *cq;
384         struct hfi1_user_sdma_pkt_q *pq;
385         unsigned long flags;
386
387         if (!uctxt || !fd)
388                 return -EBADF;
389
390         if (!hfi1_sdma_comp_ring_size)
391                 return -EINVAL;
392
393         dd = uctxt->dd;
394
395         pq = kzalloc(sizeof(*pq), GFP_KERNEL);
396         if (!pq)
397                 return -ENOMEM;
398
399         INIT_LIST_HEAD(&pq->list);
400         pq->dd = dd;
401         pq->ctxt = uctxt->ctxt;
402         pq->subctxt = fd->subctxt;
403         pq->n_max_reqs = hfi1_sdma_comp_ring_size;
404         pq->state = SDMA_PKT_Q_INACTIVE;
405         atomic_set(&pq->n_reqs, 0);
406         init_waitqueue_head(&pq->wait);
407         atomic_set(&pq->n_locked, 0);
408         pq->mm = fd->mm;
409
410         iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
411                     activate_packet_queue, NULL);
412         pq->reqidx = 0;
413
414         pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
415                            sizeof(*pq->reqs),
416                            GFP_KERNEL);
417         if (!pq->reqs)
418                 goto pq_reqs_nomem;
419
420         pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size),
421                                  sizeof(*pq->req_in_use),
422                                  GFP_KERNEL);
423         if (!pq->req_in_use)
424                 goto pq_reqs_no_in_use;
425
426         snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
427                  fd->subctxt);
428         pq->txreq_cache = kmem_cache_create(buf,
429                                             sizeof(struct user_sdma_txreq),
430                                             L1_CACHE_BYTES,
431                                             SLAB_HWCACHE_ALIGN,
432                                             sdma_kmem_cache_ctor);
433         if (!pq->txreq_cache) {
434                 dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
435                            uctxt->ctxt);
436                 goto pq_txreq_nomem;
437         }
438
439         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
440         if (!cq)
441                 goto cq_nomem;
442
443         cq->comps = vmalloc_user(PAGE_ALIGN(sizeof(*cq->comps)
444                                  * hfi1_sdma_comp_ring_size));
445         if (!cq->comps)
446                 goto cq_comps_nomem;
447
448         cq->nentries = hfi1_sdma_comp_ring_size;
449
450         ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
451                                    &pq->handler);
452         if (ret) {
453                 dd_dev_err(dd, "Failed to register with MMU %d", ret);
454                 goto pq_mmu_fail;
455         }
456
457         fd->pq = pq;
458         fd->cq = cq;
459
460         spin_lock_irqsave(&uctxt->sdma_qlock, flags);
461         list_add(&pq->list, &uctxt->sdma_queues);
462         spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
463
464         return 0;
465
466 pq_mmu_fail:
467         vfree(cq->comps);
468 cq_comps_nomem:
469         kfree(cq);
470 cq_nomem:
471         kmem_cache_destroy(pq->txreq_cache);
472 pq_txreq_nomem:
473         kfree(pq->req_in_use);
474 pq_reqs_no_in_use:
475         kfree(pq->reqs);
476 pq_reqs_nomem:
477         kfree(pq);
478
479         return ret;
480 }
481
482 int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
483 {
484         struct hfi1_ctxtdata *uctxt = fd->uctxt;
485         struct hfi1_user_sdma_pkt_q *pq;
486         unsigned long flags;
487
488         hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
489                   uctxt->ctxt, fd->subctxt);
490         pq = fd->pq;
491         if (pq) {
492                 if (pq->handler)
493                         hfi1_mmu_rb_unregister(pq->handler);
494                 spin_lock_irqsave(&uctxt->sdma_qlock, flags);
495                 if (!list_empty(&pq->list))
496                         list_del_init(&pq->list);
497                 spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
498                 iowait_sdma_drain(&pq->busy);
499                 /* Wait until all requests have been freed. */
500                 wait_event_interruptible(
501                         pq->wait,
502                         (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
503                 kfree(pq->reqs);
504                 kfree(pq->req_in_use);
505                 kmem_cache_destroy(pq->txreq_cache);
506                 kfree(pq);
507                 fd->pq = NULL;
508         }
509         if (fd->cq) {
510                 vfree(fd->cq->comps);
511                 kfree(fd->cq);
512                 fd->cq = NULL;
513         }
514         return 0;
515 }
516
517 static u8 dlid_to_selector(u16 dlid)
518 {
519         static u8 mapping[256];
520         static int initialized;
521         static u8 next;
522         int hash;
523
524         if (!initialized) {
525                 memset(mapping, 0xFF, 256);
526                 initialized = 1;
527         }
528
529         hash = ((dlid >> 8) ^ dlid) & 0xFF;
530         if (mapping[hash] == 0xFF) {
531                 mapping[hash] = next;
532                 next = (next + 1) & 0x7F;
533         }
534
535         return mapping[hash];
536 }
537
538 int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
539                                    struct iovec *iovec, unsigned long dim,
540                                    unsigned long *count)
541 {
542         int ret = 0, i;
543         struct hfi1_ctxtdata *uctxt = fd->uctxt;
544         struct hfi1_user_sdma_pkt_q *pq = fd->pq;
545         struct hfi1_user_sdma_comp_q *cq = fd->cq;
546         struct hfi1_devdata *dd = pq->dd;
547         unsigned long idx = 0;
548         u8 pcount = initial_pkt_count;
549         struct sdma_req_info info;
550         struct user_sdma_request *req;
551         u8 opcode, sc, vl;
552         int req_queued = 0;
553         u16 dlid;
554         u32 selector;
555
556         if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
557                 hfi1_cdbg(
558                    SDMA,
559                    "[%u:%u:%u] First vector not big enough for header %lu/%lu",
560                    dd->unit, uctxt->ctxt, fd->subctxt,
561                    iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
562                 return -EINVAL;
563         }
564         ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
565         if (ret) {
566                 hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
567                           dd->unit, uctxt->ctxt, fd->subctxt, ret);
568                 return -EFAULT;
569         }
570
571         trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
572                                      (u16 *)&info);
573
574         if (info.comp_idx >= hfi1_sdma_comp_ring_size) {
575                 hfi1_cdbg(SDMA,
576                           "[%u:%u:%u:%u] Invalid comp index",
577                           dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
578                 return -EINVAL;
579         }
580
581         /*
582          * Sanity check the header io vector count.  Need at least 1 vector
583          * (header) and cannot be larger than the actual io vector count.
584          */
585         if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) {
586                 hfi1_cdbg(SDMA,
587                           "[%u:%u:%u:%u] Invalid iov count %d, dim %ld",
588                           dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx,
589                           req_iovcnt(info.ctrl), dim);
590                 return -EINVAL;
591         }
592
593         if (!info.fragsize) {
594                 hfi1_cdbg(SDMA,
595                           "[%u:%u:%u:%u] Request does not specify fragsize",
596                           dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
597                 return -EINVAL;
598         }
599
600         /* Try to claim the request. */
601         if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
602                 hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use",
603                           dd->unit, uctxt->ctxt, fd->subctxt,
604                           info.comp_idx);
605                 return -EBADSLT;
606         }
607         /*
608          * All safety checks have been done and this request has been claimed.
609          */
610         hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
611                   uctxt->ctxt, fd->subctxt, info.comp_idx);
612         req = pq->reqs + info.comp_idx;
613         req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
614         req->data_len  = 0;
615         req->pq = pq;
616         req->cq = cq;
617         req->status = -1;
618         req->ahg_idx = -1;
619         req->iov_idx = 0;
620         req->sent = 0;
621         req->seqnum = 0;
622         req->seqcomp = 0;
623         req->seqsubmitted = 0;
624         req->tids = NULL;
625         req->done = 0;
626         req->has_error = 0;
627         INIT_LIST_HEAD(&req->txps);
628
629         memcpy(&req->info, &info, sizeof(info));
630
631         if (req_opcode(info.ctrl) == EXPECTED) {
632                 /* expected must have a TID info and at least one data vector */
633                 if (req->data_iovs < 2) {
634                         SDMA_DBG(req,
635                                  "Not enough vectors for expected request");
636                         ret = -EINVAL;
637                         goto free_req;
638                 }
639                 req->data_iovs--;
640         }
641
642         if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
643                 SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
644                          MAX_VECTORS_PER_REQ);
645                 ret = -EINVAL;
646                 goto free_req;
647         }
648         /* Copy the header from the user buffer */
649         ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
650                              sizeof(req->hdr));
651         if (ret) {
652                 SDMA_DBG(req, "Failed to copy header template (%d)", ret);
653                 ret = -EFAULT;
654                 goto free_req;
655         }
656
657         /* If Static rate control is not enabled, sanitize the header. */
658         if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL))
659                 req->hdr.pbc[2] = 0;
660
661         /* Validate the opcode. Do not trust packets from user space blindly. */
662         opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
663         if ((opcode & USER_OPCODE_CHECK_MASK) !=
664              USER_OPCODE_CHECK_VAL) {
665                 SDMA_DBG(req, "Invalid opcode (%d)", opcode);
666                 ret = -EINVAL;
667                 goto free_req;
668         }
669         /*
670          * Validate the vl. Do not trust packets from user space blindly.
671          * VL comes from PBC, SC comes from LRH, and the VL needs to
672          * match the SC look up.
673          */
674         vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
675         sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
676               (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
677         if (vl >= dd->pport->vls_operational ||
678             vl != sc_to_vlt(dd, sc)) {
679                 SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
680                 ret = -EINVAL;
681                 goto free_req;
682         }
683
684         /* Checking P_KEY for requests from user-space */
685         if (egress_pkey_check(dd->pport, req->hdr.lrh, req->hdr.bth, sc,
686                               PKEY_CHECK_INVALID)) {
687                 ret = -EINVAL;
688                 goto free_req;
689         }
690
691         /*
692          * Also should check the BTH.lnh. If it says the next header is GRH then
693          * the RXE parsing will be off and will land in the middle of the KDETH
694          * or miss it entirely.
695          */
696         if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
697                 SDMA_DBG(req, "User tried to pass in a GRH");
698                 ret = -EINVAL;
699                 goto free_req;
700         }
701
702         req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
703         /*
704          * Calculate the initial TID offset based on the values of
705          * KDETH.OFFSET and KDETH.OM that are passed in.
706          */
707         req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
708                 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
709                  KDETH_OM_LARGE : KDETH_OM_SMALL);
710         SDMA_DBG(req, "Initial TID offset %u", req->tidoffset);
711         idx++;
712
713         /* Save all the IO vector structures */
714         for (i = 0; i < req->data_iovs; i++) {
715                 req->iovs[i].offset = 0;
716                 INIT_LIST_HEAD(&req->iovs[i].list);
717                 memcpy(&req->iovs[i].iov,
718                        iovec + idx++,
719                        sizeof(req->iovs[i].iov));
720                 ret = pin_vector_pages(req, &req->iovs[i]);
721                 if (ret) {
722                         req->data_iovs = i;
723                         req->status = ret;
724                         goto free_req;
725                 }
726                 req->data_len += req->iovs[i].iov.iov_len;
727         }
728         SDMA_DBG(req, "total data length %u", req->data_len);
729
730         if (pcount > req->info.npkts)
731                 pcount = req->info.npkts;
732         /*
733          * Copy any TID info
734          * User space will provide the TID info only when the
735          * request type is EXPECTED. This is true even if there is
736          * only one packet in the request and the header is already
737          * setup. The reason for the singular TID case is that the
738          * driver needs to perform safety checks.
739          */
740         if (req_opcode(req->info.ctrl) == EXPECTED) {
741                 u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
742                 u32 *tmp;
743
744                 if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
745                         ret = -EINVAL;
746                         goto free_req;
747                 }
748
749                 /*
750                  * We have to copy all of the tids because they may vary
751                  * in size and, therefore, the TID count might not be
752                  * equal to the pkt count. However, there is no way to
753                  * tell at this point.
754                  */
755                 tmp = memdup_user(iovec[idx].iov_base,
756                                   ntids * sizeof(*req->tids));
757                 if (IS_ERR(tmp)) {
758                         ret = PTR_ERR(tmp);
759                         SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
760                                  ntids, ret);
761                         goto free_req;
762                 }
763                 req->tids = tmp;
764                 req->n_tids = ntids;
765                 req->tididx = 0;
766                 idx++;
767         }
768
769         dlid = be16_to_cpu(req->hdr.lrh[1]);
770         selector = dlid_to_selector(dlid);
771         selector += uctxt->ctxt + fd->subctxt;
772         req->sde = sdma_select_user_engine(dd, selector, vl);
773
774         if (!req->sde || !sdma_running(req->sde)) {
775                 ret = -ECOMM;
776                 goto free_req;
777         }
778
779         /* We don't need an AHG entry if the request contains only one packet */
780         if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG))
781                 req->ahg_idx = sdma_ahg_alloc(req->sde);
782
783         set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
784         atomic_inc(&pq->n_reqs);
785         req_queued = 1;
786         /* Send the first N packets in the request to buy us some time */
787         ret = user_sdma_send_pkts(req, pcount);
788         if (unlikely(ret < 0 && ret != -EBUSY)) {
789                 req->status = ret;
790                 goto free_req;
791         }
792
793         /*
794          * It is possible that the SDMA engine would have processed all the
795          * submitted packets by the time we get here. Therefore, only set
796          * packet queue state to ACTIVE if there are still uncompleted
797          * requests.
798          */
799         if (atomic_read(&pq->n_reqs))
800                 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
801
802         /*
803          * This is a somewhat blocking send implementation.
804          * The driver will block the caller until all packets of the
805          * request have been submitted to the SDMA engine. However, it
806          * will not wait for send completions.
807          */
808         while (req->seqsubmitted != req->info.npkts) {
809                 ret = user_sdma_send_pkts(req, pcount);
810                 if (ret < 0) {
811                         if (ret != -EBUSY) {
812                                 req->status = ret;
813                                 WRITE_ONCE(req->has_error, 1);
814                                 if (ACCESS_ONCE(req->seqcomp) ==
815                                     req->seqsubmitted - 1)
816                                         goto free_req;
817                                 return ret;
818                         }
819                         wait_event_interruptible_timeout(
820                                 pq->busy.wait_dma,
821                                 (pq->state == SDMA_PKT_Q_ACTIVE),
822                                 msecs_to_jiffies(
823                                         SDMA_IOWAIT_TIMEOUT));
824                 }
825         }
826         *count += idx;
827         return 0;
828 free_req:
829         user_sdma_free_request(req, true);
830         if (req_queued)
831                 pq_update(pq);
832         set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
833         return ret;
834 }
835
836 static inline u32 compute_data_length(struct user_sdma_request *req,
837                                       struct user_sdma_txreq *tx)
838 {
839         /*
840          * Determine the proper size of the packet data.
841          * The size of the data of the first packet is in the header
842          * template. However, it includes the header and ICRC, which need
843          * to be subtracted.
844          * The minimum representable packet data length in a header is 4 bytes,
845          * therefore, when the data length request is less than 4 bytes, there's
846          * only one packet, and the packet data length is equal to that of the
847          * request data length.
848          * The size of the remaining packets is the minimum of the frag
849          * size (MTU) or remaining data in the request.
850          */
851         u32 len;
852
853         if (!req->seqnum) {
854                 if (req->data_len < sizeof(u32))
855                         len = req->data_len;
856                 else
857                         len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
858                                (sizeof(tx->hdr) - 4));
859         } else if (req_opcode(req->info.ctrl) == EXPECTED) {
860                 u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
861                         PAGE_SIZE;
862                 /*
863                  * Get the data length based on the remaining space in the
864                  * TID pair.
865                  */
866                 len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
867                 /* If we've filled up the TID pair, move to the next one. */
868                 if (unlikely(!len) && ++req->tididx < req->n_tids &&
869                     req->tids[req->tididx]) {
870                         tidlen = EXP_TID_GET(req->tids[req->tididx],
871                                              LEN) * PAGE_SIZE;
872                         req->tidoffset = 0;
873                         len = min_t(u32, tidlen, req->info.fragsize);
874                 }
875                 /*
876                  * Since the TID pairs map entire pages, make sure that we
877                  * are not going to try to send more data that we have
878                  * remaining.
879                  */
880                 len = min(len, req->data_len - req->sent);
881         } else {
882                 len = min(req->data_len - req->sent, (u32)req->info.fragsize);
883         }
884         SDMA_DBG(req, "Data Length = %u", len);
885         return len;
886 }
887
888 static inline u32 pad_len(u32 len)
889 {
890         if (len & (sizeof(u32) - 1))
891                 len += sizeof(u32) - (len & (sizeof(u32) - 1));
892         return len;
893 }
894
895 static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
896 {
897         /* (Size of complete header - size of PBC) + 4B ICRC + data length */
898         return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
899 }
900
901 static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
902 {
903         int ret = 0, count;
904         unsigned npkts = 0;
905         struct user_sdma_txreq *tx = NULL;
906         struct hfi1_user_sdma_pkt_q *pq = NULL;
907         struct user_sdma_iovec *iovec = NULL;
908
909         if (!req->pq)
910                 return -EINVAL;
911
912         pq = req->pq;
913
914         /* If tx completion has reported an error, we are done. */
915         if (READ_ONCE(req->has_error))
916                 return -EFAULT;
917
918         /*
919          * Check if we might have sent the entire request already
920          */
921         if (unlikely(req->seqnum == req->info.npkts)) {
922                 if (!list_empty(&req->txps))
923                         goto dosend;
924                 return ret;
925         }
926
927         if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
928                 maxpkts = req->info.npkts - req->seqnum;
929
930         while (npkts < maxpkts) {
931                 u32 datalen = 0, queued = 0, data_sent = 0;
932                 u64 iov_offset = 0;
933
934                 /*
935                  * Check whether any of the completions have come back
936                  * with errors. If so, we are not going to process any
937                  * more packets from this request.
938                  */
939                 if (READ_ONCE(req->has_error))
940                         return -EFAULT;
941
942                 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
943                 if (!tx)
944                         return -ENOMEM;
945
946                 tx->flags = 0;
947                 tx->req = req;
948                 tx->busycount = 0;
949                 INIT_LIST_HEAD(&tx->list);
950
951                 /*
952                  * For the last packet set the ACK request
953                  * and disable header suppression.
954                  */
955                 if (req->seqnum == req->info.npkts - 1)
956                         tx->flags |= (TXREQ_FLAGS_REQ_ACK |
957                                       TXREQ_FLAGS_REQ_DISABLE_SH);
958
959                 /*
960                  * Calculate the payload size - this is min of the fragment
961                  * (MTU) size or the remaining bytes in the request but only
962                  * if we have payload data.
963                  */
964                 if (req->data_len) {
965                         iovec = &req->iovs[req->iov_idx];
966                         if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
967                                 if (++req->iov_idx == req->data_iovs) {
968                                         ret = -EFAULT;
969                                         goto free_txreq;
970                                 }
971                                 iovec = &req->iovs[req->iov_idx];
972                                 WARN_ON(iovec->offset);
973                         }
974
975                         datalen = compute_data_length(req, tx);
976
977                         /*
978                          * Disable header suppression for the payload <= 8DWS.
979                          * If there is an uncorrectable error in the receive
980                          * data FIFO when the received payload size is less than
981                          * or equal to 8DWS then the RxDmaDataFifoRdUncErr is
982                          * not reported.There is set RHF.EccErr if the header
983                          * is not suppressed.
984                          */
985                         if (!datalen) {
986                                 SDMA_DBG(req,
987                                          "Request has data but pkt len is 0");
988                                 ret = -EFAULT;
989                                 goto free_tx;
990                         } else if (datalen <= 32) {
991                                 tx->flags |= TXREQ_FLAGS_REQ_DISABLE_SH;
992                         }
993                 }
994
995                 if (req->ahg_idx >= 0) {
996                         if (!req->seqnum) {
997                                 u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
998                                 u32 lrhlen = get_lrh_len(req->hdr,
999                                                          pad_len(datalen));
1000                                 /*
1001                                  * Copy the request header into the tx header
1002                                  * because the HW needs a cacheline-aligned
1003                                  * address.
1004                                  * This copy can be optimized out if the hdr
1005                                  * member of user_sdma_request were also
1006                                  * cacheline aligned.
1007                                  */
1008                                 memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
1009                                 if (PBC2LRH(pbclen) != lrhlen) {
1010                                         pbclen = (pbclen & 0xf000) |
1011                                                 LRH2PBC(lrhlen);
1012                                         tx->hdr.pbc[0] = cpu_to_le16(pbclen);
1013                                 }
1014                                 ret = check_header_template(req, &tx->hdr,
1015                                                             lrhlen, datalen);
1016                                 if (ret)
1017                                         goto free_tx;
1018                                 ret = sdma_txinit_ahg(&tx->txreq,
1019                                                       SDMA_TXREQ_F_AHG_COPY,
1020                                                       sizeof(tx->hdr) + datalen,
1021                                                       req->ahg_idx, 0, NULL, 0,
1022                                                       user_sdma_txreq_cb);
1023                                 if (ret)
1024                                         goto free_tx;
1025                                 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq,
1026                                                         &tx->hdr,
1027                                                         sizeof(tx->hdr));
1028                                 if (ret)
1029                                         goto free_txreq;
1030                         } else {
1031                                 int changes;
1032
1033                                 changes = set_txreq_header_ahg(req, tx,
1034                                                                datalen);
1035                                 if (changes < 0)
1036                                         goto free_tx;
1037                         }
1038                 } else {
1039                         ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
1040                                           datalen, user_sdma_txreq_cb);
1041                         if (ret)
1042                                 goto free_tx;
1043                         /*
1044                          * Modify the header for this packet. This only needs
1045                          * to be done if we are not going to use AHG. Otherwise,
1046                          * the HW will do it based on the changes we gave it
1047                          * during sdma_txinit_ahg().
1048                          */
1049                         ret = set_txreq_header(req, tx, datalen);
1050                         if (ret)
1051                                 goto free_txreq;
1052                 }
1053
1054                 /*
1055                  * If the request contains any data vectors, add up to
1056                  * fragsize bytes to the descriptor.
1057                  */
1058                 while (queued < datalen &&
1059                        (req->sent + data_sent) < req->data_len) {
1060                         unsigned long base, offset;
1061                         unsigned pageidx, len;
1062
1063                         base = (unsigned long)iovec->iov.iov_base;
1064                         offset = offset_in_page(base + iovec->offset +
1065                                                 iov_offset);
1066                         pageidx = (((iovec->offset + iov_offset +
1067                                      base) - (base & PAGE_MASK)) >> PAGE_SHIFT);
1068                         len = offset + req->info.fragsize > PAGE_SIZE ?
1069                                 PAGE_SIZE - offset : req->info.fragsize;
1070                         len = min((datalen - queued), len);
1071                         ret = sdma_txadd_page(pq->dd, &tx->txreq,
1072                                               iovec->pages[pageidx],
1073                                               offset, len);
1074                         if (ret) {
1075                                 SDMA_DBG(req, "SDMA txreq add page failed %d\n",
1076                                          ret);
1077                                 goto free_txreq;
1078                         }
1079                         iov_offset += len;
1080                         queued += len;
1081                         data_sent += len;
1082                         if (unlikely(queued < datalen &&
1083                                      pageidx == iovec->npages &&
1084                                      req->iov_idx < req->data_iovs - 1)) {
1085                                 iovec->offset += iov_offset;
1086                                 iovec = &req->iovs[++req->iov_idx];
1087                                 iov_offset = 0;
1088                         }
1089                 }
1090                 /*
1091                  * The txreq was submitted successfully so we can update
1092                  * the counters.
1093                  */
1094                 req->koffset += datalen;
1095                 if (req_opcode(req->info.ctrl) == EXPECTED)
1096                         req->tidoffset += datalen;
1097                 req->sent += data_sent;
1098                 if (req->data_len)
1099                         iovec->offset += iov_offset;
1100                 list_add_tail(&tx->txreq.list, &req->txps);
1101                 /*
1102                  * It is important to increment this here as it is used to
1103                  * generate the BTH.PSN and, therefore, can't be bulk-updated
1104                  * outside of the loop.
1105                  */
1106                 tx->seqnum = req->seqnum++;
1107                 npkts++;
1108         }
1109 dosend:
1110         ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
1111         req->seqsubmitted += count;
1112         if (req->seqsubmitted == req->info.npkts) {
1113                 WRITE_ONCE(req->done, 1);
1114                 /*
1115                  * The txreq has already been submitted to the HW queue
1116                  * so we can free the AHG entry now. Corruption will not
1117                  * happen due to the sequential manner in which
1118                  * descriptors are processed.
1119                  */
1120                 if (req->ahg_idx >= 0)
1121                         sdma_ahg_free(req->sde, req->ahg_idx);
1122         }
1123         return ret;
1124
1125 free_txreq:
1126         sdma_txclean(pq->dd, &tx->txreq);
1127 free_tx:
1128         kmem_cache_free(pq->txreq_cache, tx);
1129         return ret;
1130 }
1131
1132 /*
1133  * How many pages in this iovec element?
1134  */
1135 static inline int num_user_pages(const struct iovec *iov)
1136 {
1137         const unsigned long addr  = (unsigned long)iov->iov_base;
1138         const unsigned long len   = iov->iov_len;
1139         const unsigned long spage = addr & PAGE_MASK;
1140         const unsigned long epage = (addr + len - 1) & PAGE_MASK;
1141
1142         return 1 + ((epage - spage) >> PAGE_SHIFT);
1143 }
1144
1145 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
1146 {
1147         struct evict_data evict_data;
1148
1149         evict_data.cleared = 0;
1150         evict_data.target = npages;
1151         hfi1_mmu_rb_evict(pq->handler, &evict_data);
1152         return evict_data.cleared;
1153 }
1154
1155 static int pin_vector_pages(struct user_sdma_request *req,
1156                             struct user_sdma_iovec *iovec)
1157 {
1158         int ret = 0, pinned, npages, cleared;
1159         struct page **pages;
1160         struct hfi1_user_sdma_pkt_q *pq = req->pq;
1161         struct sdma_mmu_node *node = NULL;
1162         struct mmu_rb_node *rb_node;
1163         bool extracted;
1164
1165         extracted =
1166                 hfi1_mmu_rb_remove_unless_exact(pq->handler,
1167                                                 (unsigned long)
1168                                                 iovec->iov.iov_base,
1169                                                 iovec->iov.iov_len, &rb_node);
1170         if (rb_node) {
1171                 node = container_of(rb_node, struct sdma_mmu_node, rb);
1172                 if (!extracted) {
1173                         atomic_inc(&node->refcount);
1174                         iovec->pages = node->pages;
1175                         iovec->npages = node->npages;
1176                         iovec->node = node;
1177                         return 0;
1178                 }
1179         }
1180
1181         if (!node) {
1182                 node = kzalloc(sizeof(*node), GFP_KERNEL);
1183                 if (!node)
1184                         return -ENOMEM;
1185
1186                 node->rb.addr = (unsigned long)iovec->iov.iov_base;
1187                 node->pq = pq;
1188                 atomic_set(&node->refcount, 0);
1189         }
1190
1191         npages = num_user_pages(&iovec->iov);
1192         if (node->npages < npages) {
1193                 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1194                 if (!pages) {
1195                         SDMA_DBG(req, "Failed page array alloc");
1196                         ret = -ENOMEM;
1197                         goto bail;
1198                 }
1199                 memcpy(pages, node->pages, node->npages * sizeof(*pages));
1200
1201                 npages -= node->npages;
1202
1203 retry:
1204                 if (!hfi1_can_pin_pages(pq->dd, pq->mm,
1205                                         atomic_read(&pq->n_locked), npages)) {
1206                         cleared = sdma_cache_evict(pq, npages);
1207                         if (cleared >= npages)
1208                                 goto retry;
1209                 }
1210                 pinned = hfi1_acquire_user_pages(pq->mm,
1211                         ((unsigned long)iovec->iov.iov_base +
1212                          (node->npages * PAGE_SIZE)), npages, 0,
1213                         pages + node->npages);
1214                 if (pinned < 0) {
1215                         kfree(pages);
1216                         ret = pinned;
1217                         goto bail;
1218                 }
1219                 if (pinned != npages) {
1220                         unpin_vector_pages(pq->mm, pages, node->npages,
1221                                            pinned);
1222                         ret = -EFAULT;
1223                         goto bail;
1224                 }
1225                 kfree(node->pages);
1226                 node->rb.len = iovec->iov.iov_len;
1227                 node->pages = pages;
1228                 node->npages += pinned;
1229                 npages = node->npages;
1230                 atomic_add(pinned, &pq->n_locked);
1231         }
1232         iovec->pages = node->pages;
1233         iovec->npages = npages;
1234         iovec->node = node;
1235
1236         ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
1237         if (ret) {
1238                 atomic_sub(node->npages, &pq->n_locked);
1239                 iovec->node = NULL;
1240                 goto bail;
1241         }
1242         return 0;
1243 bail:
1244         if (rb_node)
1245                 unpin_vector_pages(pq->mm, node->pages, 0, node->npages);
1246         kfree(node);
1247         return ret;
1248 }
1249
1250 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
1251                                unsigned start, unsigned npages)
1252 {
1253         hfi1_release_user_pages(mm, pages + start, npages, false);
1254         kfree(pages);
1255 }
1256
1257 static int check_header_template(struct user_sdma_request *req,
1258                                  struct hfi1_pkt_header *hdr, u32 lrhlen,
1259                                  u32 datalen)
1260 {
1261         /*
1262          * Perform safety checks for any type of packet:
1263          *    - transfer size is multiple of 64bytes
1264          *    - packet length is multiple of 4 bytes
1265          *    - packet length is not larger than MTU size
1266          *
1267          * These checks are only done for the first packet of the
1268          * transfer since the header is "given" to us by user space.
1269          * For the remainder of the packets we compute the values.
1270          */
1271         if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 ||
1272             lrhlen > get_lrh_len(*hdr, req->info.fragsize))
1273                 return -EINVAL;
1274
1275         if (req_opcode(req->info.ctrl) == EXPECTED) {
1276                 /*
1277                  * The header is checked only on the first packet. Furthermore,
1278                  * we ensure that at least one TID entry is copied when the
1279                  * request is submitted. Therefore, we don't have to verify that
1280                  * tididx points to something sane.
1281                  */
1282                 u32 tidval = req->tids[req->tididx],
1283                         tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
1284                         tididx = EXP_TID_GET(tidval, IDX),
1285                         tidctrl = EXP_TID_GET(tidval, CTRL),
1286                         tidoff;
1287                 __le32 kval = hdr->kdeth.ver_tid_offset;
1288
1289                 tidoff = KDETH_GET(kval, OFFSET) *
1290                           (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
1291                            KDETH_OM_LARGE : KDETH_OM_SMALL);
1292                 /*
1293                  * Expected receive packets have the following
1294                  * additional checks:
1295                  *     - offset is not larger than the TID size
1296                  *     - TIDCtrl values match between header and TID array
1297                  *     - TID indexes match between header and TID array
1298                  */
1299                 if ((tidoff + datalen > tidlen) ||
1300                     KDETH_GET(kval, TIDCTRL) != tidctrl ||
1301                     KDETH_GET(kval, TID) != tididx)
1302                         return -EINVAL;
1303         }
1304         return 0;
1305 }
1306
1307 /*
1308  * Correctly set the BTH.PSN field based on type of
1309  * transfer - eager packets can just increment the PSN but
1310  * expected packets encode generation and sequence in the
1311  * BTH.PSN field so just incrementing will result in errors.
1312  */
1313 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
1314 {
1315         u32 val = be32_to_cpu(bthpsn),
1316                 mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull :
1317                         0xffffffull),
1318                 psn = val & mask;
1319         if (expct)
1320                 psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK);
1321         else
1322                 psn = psn + frags;
1323         return psn & mask;
1324 }
1325
1326 static int set_txreq_header(struct user_sdma_request *req,
1327                             struct user_sdma_txreq *tx, u32 datalen)
1328 {
1329         struct hfi1_user_sdma_pkt_q *pq = req->pq;
1330         struct hfi1_pkt_header *hdr = &tx->hdr;
1331         u8 omfactor; /* KDETH.OM */
1332         u16 pbclen;
1333         int ret;
1334         u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
1335
1336         /* Copy the header template to the request before modification */
1337         memcpy(hdr, &req->hdr, sizeof(*hdr));
1338
1339         /*
1340          * Check if the PBC and LRH length are mismatched. If so
1341          * adjust both in the header.
1342          */
1343         pbclen = le16_to_cpu(hdr->pbc[0]);
1344         if (PBC2LRH(pbclen) != lrhlen) {
1345                 pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
1346                 hdr->pbc[0] = cpu_to_le16(pbclen);
1347                 hdr->lrh[2] = cpu_to_be16(lrhlen >> 2);
1348                 /*
1349                  * Third packet
1350                  * This is the first packet in the sequence that has
1351                  * a "static" size that can be used for the rest of
1352                  * the packets (besides the last one).
1353                  */
1354                 if (unlikely(req->seqnum == 2)) {
1355                         /*
1356                          * From this point on the lengths in both the
1357                          * PBC and LRH are the same until the last
1358                          * packet.
1359                          * Adjust the template so we don't have to update
1360                          * every packet
1361                          */
1362                         req->hdr.pbc[0] = hdr->pbc[0];
1363                         req->hdr.lrh[2] = hdr->lrh[2];
1364                 }
1365         }
1366         /*
1367          * We only have to modify the header if this is not the
1368          * first packet in the request. Otherwise, we use the
1369          * header given to us.
1370          */
1371         if (unlikely(!req->seqnum)) {
1372                 ret = check_header_template(req, hdr, lrhlen, datalen);
1373                 if (ret)
1374                         return ret;
1375                 goto done;
1376         }
1377
1378         hdr->bth[2] = cpu_to_be32(
1379                 set_pkt_bth_psn(hdr->bth[2],
1380                                 (req_opcode(req->info.ctrl) == EXPECTED),
1381                                 req->seqnum));
1382
1383         /* Set ACK request on last packet */
1384         if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
1385                 hdr->bth[2] |= cpu_to_be32(1UL << 31);
1386
1387         /* Set the new offset */
1388         hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
1389         /* Expected packets have to fill in the new TID information */
1390         if (req_opcode(req->info.ctrl) == EXPECTED) {
1391                 tidval = req->tids[req->tididx];
1392                 /*
1393                  * If the offset puts us at the end of the current TID,
1394                  * advance everything.
1395                  */
1396                 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1397                                          PAGE_SIZE)) {
1398                         req->tidoffset = 0;
1399                         /*
1400                          * Since we don't copy all the TIDs, all at once,
1401                          * we have to check again.
1402                          */
1403                         if (++req->tididx > req->n_tids - 1 ||
1404                             !req->tids[req->tididx]) {
1405                                 return -EINVAL;
1406                         }
1407                         tidval = req->tids[req->tididx];
1408                 }
1409                 omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
1410                         KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE_SHIFT :
1411                         KDETH_OM_SMALL_SHIFT;
1412                 /* Set KDETH.TIDCtrl based on value for this TID. */
1413                 KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
1414                           EXP_TID_GET(tidval, CTRL));
1415                 /* Set KDETH.TID based on value for this TID */
1416                 KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
1417                           EXP_TID_GET(tidval, IDX));
1418                 /* Clear KDETH.SH when DISABLE_SH flag is set */
1419                 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH))
1420                         KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
1421                 /*
1422                  * Set the KDETH.OFFSET and KDETH.OM based on size of
1423                  * transfer.
1424                  */
1425                 SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
1426                          req->tidoffset, req->tidoffset >> omfactor,
1427                          omfactor != KDETH_OM_SMALL_SHIFT);
1428                 KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
1429                           req->tidoffset >> omfactor);
1430                 KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
1431                           omfactor != KDETH_OM_SMALL_SHIFT);
1432         }
1433 done:
1434         trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
1435                                     req->info.comp_idx, hdr, tidval);
1436         return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
1437 }
1438
1439 static int set_txreq_header_ahg(struct user_sdma_request *req,
1440                                 struct user_sdma_txreq *tx, u32 datalen)
1441 {
1442         u32 ahg[AHG_KDETH_ARRAY_SIZE];
1443         int diff = 0;
1444         u8 omfactor; /* KDETH.OM */
1445         struct hfi1_user_sdma_pkt_q *pq = req->pq;
1446         struct hfi1_pkt_header *hdr = &req->hdr;
1447         u16 pbclen = le16_to_cpu(hdr->pbc[0]);
1448         u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
1449
1450         if (PBC2LRH(pbclen) != lrhlen) {
1451                 /* PBC.PbcLengthDWs */
1452                 AHG_HEADER_SET(ahg, diff, 0, 0, 12,
1453                                cpu_to_le16(LRH2PBC(lrhlen)));
1454                 /* LRH.PktLen (we need the full 16 bits due to byte swap) */
1455                 AHG_HEADER_SET(ahg, diff, 3, 0, 16,
1456                                cpu_to_be16(lrhlen >> 2));
1457         }
1458
1459         /*
1460          * Do the common updates
1461          */
1462         /* BTH.PSN and BTH.A */
1463         val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
1464                 (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
1465         if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
1466                 val32 |= 1UL << 31;
1467         AHG_HEADER_SET(ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
1468         AHG_HEADER_SET(ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
1469         /* KDETH.Offset */
1470         AHG_HEADER_SET(ahg, diff, 15, 0, 16,
1471                        cpu_to_le16(req->koffset & 0xffff));
1472         AHG_HEADER_SET(ahg, diff, 15, 16, 16, cpu_to_le16(req->koffset >> 16));
1473         if (req_opcode(req->info.ctrl) == EXPECTED) {
1474                 __le16 val;
1475
1476                 tidval = req->tids[req->tididx];
1477
1478                 /*
1479                  * If the offset puts us at the end of the current TID,
1480                  * advance everything.
1481                  */
1482                 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1483                                          PAGE_SIZE)) {
1484                         req->tidoffset = 0;
1485                         /*
1486                          * Since we don't copy all the TIDs, all at once,
1487                          * we have to check again.
1488                          */
1489                         if (++req->tididx > req->n_tids - 1 ||
1490                             !req->tids[req->tididx])
1491                                 return -EINVAL;
1492                         tidval = req->tids[req->tididx];
1493                 }
1494                 omfactor = ((EXP_TID_GET(tidval, LEN) *
1495                                   PAGE_SIZE) >=
1496                                  KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
1497                                  KDETH_OM_SMALL_SHIFT;
1498                 /* KDETH.OM and KDETH.OFFSET (TID) */
1499                 AHG_HEADER_SET(ahg, diff, 7, 0, 16,
1500                                ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
1501                                 ((req->tidoffset >> omfactor)
1502                                  & 0x7fff)));
1503                 /* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
1504                 val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
1505                                    (EXP_TID_GET(tidval, IDX) & 0x3ff));
1506
1507                 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) {
1508                         val |= cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
1509                                                       INTR) <<
1510                                             AHG_KDETH_INTR_SHIFT));
1511                 } else {
1512                         val |= KDETH_GET(hdr->kdeth.ver_tid_offset, SH) ?
1513                                cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT) :
1514                                cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
1515                                                       INTR) <<
1516                                              AHG_KDETH_INTR_SHIFT));
1517                 }
1518
1519                 AHG_HEADER_SET(ahg, diff, 7, 16, 14, val);
1520         }
1521         if (diff < 0)
1522                 return diff;
1523
1524         trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
1525                                         req->info.comp_idx, req->sde->this_idx,
1526                                         req->ahg_idx, ahg, diff, tidval);
1527         sdma_txinit_ahg(&tx->txreq,
1528                         SDMA_TXREQ_F_USE_AHG,
1529                         datalen, req->ahg_idx, diff,
1530                         ahg, sizeof(req->hdr),
1531                         user_sdma_txreq_cb);
1532
1533         return diff;
1534 }
1535
1536 /*
1537  * SDMA tx request completion callback. Called when the SDMA progress
1538  * state machine gets notification that the SDMA descriptors for this
1539  * tx request have been processed by the DMA engine. Called in
1540  * interrupt context.
1541  */
1542 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
1543 {
1544         struct user_sdma_txreq *tx =
1545                 container_of(txreq, struct user_sdma_txreq, txreq);
1546         struct user_sdma_request *req;
1547         struct hfi1_user_sdma_pkt_q *pq;
1548         struct hfi1_user_sdma_comp_q *cq;
1549         u16 idx;
1550
1551         if (!tx->req)
1552                 return;
1553
1554         req = tx->req;
1555         pq = req->pq;
1556         cq = req->cq;
1557
1558         if (status != SDMA_TXREQ_S_OK) {
1559                 SDMA_DBG(req, "SDMA completion with error %d",
1560                          status);
1561                 WRITE_ONCE(req->has_error, 1);
1562         }
1563
1564         req->seqcomp = tx->seqnum;
1565         kmem_cache_free(pq->txreq_cache, tx);
1566         tx = NULL;
1567
1568         idx = req->info.comp_idx;
1569         if (req->status == -1 && status == SDMA_TXREQ_S_OK) {
1570                 if (req->seqcomp == req->info.npkts - 1) {
1571                         req->status = 0;
1572                         user_sdma_free_request(req, false);
1573                         pq_update(pq);
1574                         set_comp_state(pq, cq, idx, COMPLETE, 0);
1575                 }
1576         } else {
1577                 if (status != SDMA_TXREQ_S_OK)
1578                         req->status = status;
1579                 if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
1580                     (READ_ONCE(req->done) ||
1581                      READ_ONCE(req->has_error))) {
1582                         user_sdma_free_request(req, false);
1583                         pq_update(pq);
1584                         set_comp_state(pq, cq, idx, ERROR, req->status);
1585                 }
1586         }
1587 }
1588
1589 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
1590 {
1591         if (atomic_dec_and_test(&pq->n_reqs)) {
1592                 xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
1593                 wake_up(&pq->wait);
1594         }
1595 }
1596
1597 static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
1598 {
1599         if (!list_empty(&req->txps)) {
1600                 struct sdma_txreq *t, *p;
1601
1602                 list_for_each_entry_safe(t, p, &req->txps, list) {
1603                         struct user_sdma_txreq *tx =
1604                                 container_of(t, struct user_sdma_txreq, txreq);
1605                         list_del_init(&t->list);
1606                         sdma_txclean(req->pq->dd, t);
1607                         kmem_cache_free(req->pq->txreq_cache, tx);
1608                 }
1609         }
1610         if (req->data_iovs) {
1611                 struct sdma_mmu_node *node;
1612                 int i;
1613
1614                 for (i = 0; i < req->data_iovs; i++) {
1615                         node = req->iovs[i].node;
1616                         if (!node)
1617                                 continue;
1618
1619                         if (unpin)
1620                                 hfi1_mmu_rb_remove(req->pq->handler,
1621                                                    &node->rb);
1622                         else
1623                                 atomic_dec(&node->refcount);
1624                 }
1625         }
1626         kfree(req->tids);
1627         clear_bit(req->info.comp_idx, req->pq->req_in_use);
1628 }
1629
1630 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
1631                                   struct hfi1_user_sdma_comp_q *cq,
1632                                   u16 idx, enum hfi1_sdma_comp_state state,
1633                                   int ret)
1634 {
1635         hfi1_cdbg(SDMA, "[%u:%u:%u:%u] Setting completion status %u %d",
1636                   pq->dd->unit, pq->ctxt, pq->subctxt, idx, state, ret);
1637         if (state == ERROR)
1638                 cq->comps[idx].errcode = -ret;
1639         smp_wmb(); /* make sure errcode is visible first */
1640         cq->comps[idx].status = state;
1641         trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
1642                                         idx, state, ret);
1643 }
1644
1645 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
1646                            unsigned long len)
1647 {
1648         return (bool)(node->addr == addr);
1649 }
1650
1651 static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode)
1652 {
1653         struct sdma_mmu_node *node =
1654                 container_of(mnode, struct sdma_mmu_node, rb);
1655
1656         atomic_inc(&node->refcount);
1657         return 0;
1658 }
1659
1660 /*
1661  * Return 1 to remove the node from the rb tree and call the remove op.
1662  *
1663  * Called with the rb tree lock held.
1664  */
1665 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
1666                          void *evict_arg, bool *stop)
1667 {
1668         struct sdma_mmu_node *node =
1669                 container_of(mnode, struct sdma_mmu_node, rb);
1670         struct evict_data *evict_data = evict_arg;
1671
1672         /* is this node still being used? */
1673         if (atomic_read(&node->refcount))
1674                 return 0; /* keep this node */
1675
1676         /* this node will be evicted, add its pages to our count */
1677         evict_data->cleared += node->npages;
1678
1679         /* have enough pages been cleared? */
1680         if (evict_data->cleared >= evict_data->target)
1681                 *stop = true;
1682
1683         return 1; /* remove this node */
1684 }
1685
1686 static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
1687 {
1688         struct sdma_mmu_node *node =
1689                 container_of(mnode, struct sdma_mmu_node, rb);
1690
1691         atomic_sub(node->npages, &node->pq->n_locked);
1692
1693         unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
1694
1695         kfree(node);
1696 }
1697
1698 static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
1699 {
1700         struct sdma_mmu_node *node =
1701                 container_of(mnode, struct sdma_mmu_node, rb);
1702
1703         if (!atomic_read(&node->refcount))
1704                 return 1;
1705         return 0;
1706 }