Merge branch 'misc' into k.o/for-next
[platform/kernel/linux-rpi.git] / drivers / infiniband / hw / hfi1 / user_sdma.c
1 /*
2  * Copyright(c) 2015 - 2017 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47 #include <linux/mm.h>
48 #include <linux/types.h>
49 #include <linux/device.h>
50 #include <linux/dmapool.h>
51 #include <linux/slab.h>
52 #include <linux/list.h>
53 #include <linux/highmem.h>
54 #include <linux/io.h>
55 #include <linux/uio.h>
56 #include <linux/rbtree.h>
57 #include <linux/spinlock.h>
58 #include <linux/delay.h>
59 #include <linux/kthread.h>
60 #include <linux/mmu_context.h>
61 #include <linux/module.h>
62 #include <linux/vmalloc.h>
63 #include <linux/string.h>
64
65 #include "hfi.h"
66 #include "sdma.h"
67 #include "user_sdma.h"
68 #include "verbs.h"  /* for the headers */
69 #include "common.h" /* for struct hfi1_tid_info */
70 #include "trace.h"
71 #include "mmu_rb.h"
72
73 static uint hfi1_sdma_comp_ring_size = 128;
74 module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
75 MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
76
77 /* The maximum number of Data io vectors per message/request */
78 #define MAX_VECTORS_PER_REQ 8
79 /*
80  * Maximum number of packet to send from each message/request
81  * before moving to the next one.
82  */
83 #define MAX_PKTS_PER_QUEUE 16
84
85 #define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT))
86
87 #define req_opcode(x) \
88         (((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
89 #define req_version(x) \
90         (((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
91 #define req_iovcnt(x) \
92         (((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK)
93
94 /* Number of BTH.PSN bits used for sequence number in expected rcvs */
95 #define BTH_SEQ_MASK 0x7ffull
96
97 #define AHG_KDETH_INTR_SHIFT 12
98 #define AHG_KDETH_SH_SHIFT   13
99 #define AHG_KDETH_ARRAY_SIZE  9
100
101 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
102 #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
103
104 #define AHG_HEADER_SET(arr, idx, dw, bit, width, value)                 \
105         do {                                                            \
106                 if ((idx) < ARRAY_SIZE((arr)))                          \
107                         (arr)[(idx++)] = sdma_build_ahg_descriptor(     \
108                                 (__force u16)(value), (dw), (bit),      \
109                                                         (width));       \
110                 else                                                    \
111                         return -ERANGE;                                 \
112         } while (0)
113
114 /* Tx request flag bits */
115 #define TXREQ_FLAGS_REQ_ACK   BIT(0)      /* Set the ACK bit in the header */
116 #define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
117
118 #define SDMA_PKT_Q_INACTIVE BIT(0)
119 #define SDMA_PKT_Q_ACTIVE   BIT(1)
120 #define SDMA_PKT_Q_DEFERRED BIT(2)
121
122 /*
123  * Maximum retry attempts to submit a TX request
124  * before putting the process to sleep.
125  */
126 #define MAX_DEFER_RETRY_COUNT 1
127
128 static unsigned initial_pkt_count = 8;
129
130 #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
131
132 struct sdma_mmu_node;
133
134 struct user_sdma_iovec {
135         struct list_head list;
136         struct iovec iov;
137         /* number of pages in this vector */
138         unsigned npages;
139         /* array of pinned pages for this vector */
140         struct page **pages;
141         /*
142          * offset into the virtual address space of the vector at
143          * which we last left off.
144          */
145         u64 offset;
146         struct sdma_mmu_node *node;
147 };
148
149 struct sdma_mmu_node {
150         struct mmu_rb_node rb;
151         struct hfi1_user_sdma_pkt_q *pq;
152         atomic_t refcount;
153         struct page **pages;
154         unsigned npages;
155 };
156
157 /* evict operation argument */
158 struct evict_data {
159         u32 cleared;    /* count evicted so far */
160         u32 target;     /* target count to evict */
161 };
162
163 struct user_sdma_request {
164         /* This is the original header from user space */
165         struct hfi1_pkt_header hdr;
166
167         /* Read mostly fields */
168         struct hfi1_user_sdma_pkt_q *pq ____cacheline_aligned_in_smp;
169         struct hfi1_user_sdma_comp_q *cq;
170         /*
171          * Pointer to the SDMA engine for this request.
172          * Since different request could be on different VLs,
173          * each request will need it's own engine pointer.
174          */
175         struct sdma_engine *sde;
176         struct sdma_req_info info;
177         /* TID array values copied from the tid_iov vector */
178         u32 *tids;
179         /* total length of the data in the request */
180         u32 data_len;
181         /* number of elements copied to the tids array */
182         u16 n_tids;
183         /*
184          * We copy the iovs for this request (based on
185          * info.iovcnt). These are only the data vectors
186          */
187         u8 data_iovs;
188         s8 ahg_idx;
189
190         /* Writeable fields shared with interrupt */
191         u64 seqcomp ____cacheline_aligned_in_smp;
192         u64 seqsubmitted;
193         /* status of the last txreq completed */
194         int status;
195
196         /* Send side fields */
197         struct list_head txps ____cacheline_aligned_in_smp;
198         u64 seqnum;
199         /*
200          * KDETH.OFFSET (TID) field
201          * The offset can cover multiple packets, depending on the
202          * size of the TID entry.
203          */
204         u32 tidoffset;
205         /*
206          * KDETH.Offset (Eager) field
207          * We need to remember the initial value so the headers
208          * can be updated properly.
209          */
210         u32 koffset;
211         u32 sent;
212         /* TID index copied from the tid_iov vector */
213         u16 tididx;
214         /* progress index moving along the iovs array */
215         u8 iov_idx;
216         u8 done;
217         u8 has_error;
218
219         struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
220 } ____cacheline_aligned_in_smp;
221
222 /*
223  * A single txreq could span up to 3 physical pages when the MTU
224  * is sufficiently large (> 4K). Each of the IOV pointers also
225  * needs it's own set of flags so the vector has been handled
226  * independently of each other.
227  */
228 struct user_sdma_txreq {
229         /* Packet header for the txreq */
230         struct hfi1_pkt_header hdr;
231         struct sdma_txreq txreq;
232         struct list_head list;
233         struct user_sdma_request *req;
234         u16 flags;
235         unsigned busycount;
236         u64 seqnum;
237 };
238
239 #define SDMA_DBG(req, fmt, ...)                              \
240         hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
241                  (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
242                  ##__VA_ARGS__)
243 #define SDMA_Q_DBG(pq, fmt, ...)                         \
244         hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \
245                  (pq)->subctxt, ##__VA_ARGS__)
246
247 static int user_sdma_send_pkts(struct user_sdma_request *req,
248                                unsigned maxpkts);
249 static int num_user_pages(const struct iovec *iov);
250 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
251 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
252 static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
253 static int pin_vector_pages(struct user_sdma_request *req,
254                             struct user_sdma_iovec *iovec);
255 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
256                                unsigned start, unsigned npages);
257 static int check_header_template(struct user_sdma_request *req,
258                                  struct hfi1_pkt_header *hdr, u32 lrhlen,
259                                  u32 datalen);
260 static int set_txreq_header(struct user_sdma_request *req,
261                             struct user_sdma_txreq *tx, u32 datalen);
262 static int set_txreq_header_ahg(struct user_sdma_request *req,
263                                 struct user_sdma_txreq *tx, u32 len);
264 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
265                                   struct hfi1_user_sdma_comp_q *cq,
266                                   u16 idx, enum hfi1_sdma_comp_state state,
267                                   int ret);
268 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags);
269 static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
270
271 static int defer_packet_queue(
272         struct sdma_engine *sde,
273         struct iowait *wait,
274         struct sdma_txreq *txreq,
275         unsigned int seq);
276 static void activate_packet_queue(struct iowait *wait, int reason);
277 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
278                            unsigned long len);
279 static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode);
280 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
281                          void *arg2, bool *stop);
282 static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
283 static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
284
285 static struct mmu_rb_ops sdma_rb_ops = {
286         .filter = sdma_rb_filter,
287         .insert = sdma_rb_insert,
288         .evict = sdma_rb_evict,
289         .remove = sdma_rb_remove,
290         .invalidate = sdma_rb_invalidate
291 };
292
293 static int defer_packet_queue(
294         struct sdma_engine *sde,
295         struct iowait *wait,
296         struct sdma_txreq *txreq,
297         unsigned seq)
298 {
299         struct hfi1_user_sdma_pkt_q *pq =
300                 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
301         struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
302         struct user_sdma_txreq *tx =
303                 container_of(txreq, struct user_sdma_txreq, txreq);
304
305         if (sdma_progress(sde, seq, txreq)) {
306                 if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
307                         goto eagain;
308         }
309         /*
310          * We are assuming that if the list is enqueued somewhere, it
311          * is to the dmawait list since that is the only place where
312          * it is supposed to be enqueued.
313          */
314         xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
315         write_seqlock(&dev->iowait_lock);
316         if (list_empty(&pq->busy.list))
317                 list_add_tail(&pq->busy.list, &sde->dmawait);
318         write_sequnlock(&dev->iowait_lock);
319         return -EBUSY;
320 eagain:
321         return -EAGAIN;
322 }
323
324 static void activate_packet_queue(struct iowait *wait, int reason)
325 {
326         struct hfi1_user_sdma_pkt_q *pq =
327                 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
328         xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
329         wake_up(&wait->wait_dma);
330 };
331
332 static void sdma_kmem_cache_ctor(void *obj)
333 {
334         struct user_sdma_txreq *tx = obj;
335
336         memset(tx, 0, sizeof(*tx));
337 }
338
339 int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
340                                 struct hfi1_filedata *fd)
341 {
342         int ret = -ENOMEM;
343         char buf[64];
344         struct hfi1_devdata *dd;
345         struct hfi1_user_sdma_comp_q *cq;
346         struct hfi1_user_sdma_pkt_q *pq;
347         unsigned long flags;
348
349         if (!uctxt || !fd)
350                 return -EBADF;
351
352         if (!hfi1_sdma_comp_ring_size)
353                 return -EINVAL;
354
355         dd = uctxt->dd;
356
357         pq = kzalloc(sizeof(*pq), GFP_KERNEL);
358         if (!pq)
359                 return -ENOMEM;
360
361         INIT_LIST_HEAD(&pq->list);
362         pq->dd = dd;
363         pq->ctxt = uctxt->ctxt;
364         pq->subctxt = fd->subctxt;
365         pq->n_max_reqs = hfi1_sdma_comp_ring_size;
366         pq->state = SDMA_PKT_Q_INACTIVE;
367         atomic_set(&pq->n_reqs, 0);
368         init_waitqueue_head(&pq->wait);
369         atomic_set(&pq->n_locked, 0);
370         pq->mm = fd->mm;
371
372         iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
373                     activate_packet_queue, NULL);
374         pq->reqidx = 0;
375
376         pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
377                            sizeof(*pq->reqs),
378                            GFP_KERNEL);
379         if (!pq->reqs)
380                 goto pq_reqs_nomem;
381
382         pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size),
383                                  sizeof(*pq->req_in_use),
384                                  GFP_KERNEL);
385         if (!pq->req_in_use)
386                 goto pq_reqs_no_in_use;
387
388         snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
389                  fd->subctxt);
390         pq->txreq_cache = kmem_cache_create(buf,
391                                             sizeof(struct user_sdma_txreq),
392                                             L1_CACHE_BYTES,
393                                             SLAB_HWCACHE_ALIGN,
394                                             sdma_kmem_cache_ctor);
395         if (!pq->txreq_cache) {
396                 dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
397                            uctxt->ctxt);
398                 goto pq_txreq_nomem;
399         }
400
401         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
402         if (!cq)
403                 goto cq_nomem;
404
405         cq->comps = vmalloc_user(PAGE_ALIGN(sizeof(*cq->comps)
406                                  * hfi1_sdma_comp_ring_size));
407         if (!cq->comps)
408                 goto cq_comps_nomem;
409
410         cq->nentries = hfi1_sdma_comp_ring_size;
411
412         ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
413                                    &pq->handler);
414         if (ret) {
415                 dd_dev_err(dd, "Failed to register with MMU %d", ret);
416                 goto pq_mmu_fail;
417         }
418
419         fd->pq = pq;
420         fd->cq = cq;
421
422         spin_lock_irqsave(&uctxt->sdma_qlock, flags);
423         list_add(&pq->list, &uctxt->sdma_queues);
424         spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
425
426         return 0;
427
428 pq_mmu_fail:
429         vfree(cq->comps);
430 cq_comps_nomem:
431         kfree(cq);
432 cq_nomem:
433         kmem_cache_destroy(pq->txreq_cache);
434 pq_txreq_nomem:
435         kfree(pq->req_in_use);
436 pq_reqs_no_in_use:
437         kfree(pq->reqs);
438 pq_reqs_nomem:
439         kfree(pq);
440
441         return ret;
442 }
443
444 int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd)
445 {
446         struct hfi1_ctxtdata *uctxt = fd->uctxt;
447         struct hfi1_user_sdma_pkt_q *pq;
448         unsigned long flags;
449
450         hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
451                   uctxt->ctxt, fd->subctxt);
452         pq = fd->pq;
453         if (pq) {
454                 if (pq->handler)
455                         hfi1_mmu_rb_unregister(pq->handler);
456                 spin_lock_irqsave(&uctxt->sdma_qlock, flags);
457                 if (!list_empty(&pq->list))
458                         list_del_init(&pq->list);
459                 spin_unlock_irqrestore(&uctxt->sdma_qlock, flags);
460                 iowait_sdma_drain(&pq->busy);
461                 /* Wait until all requests have been freed. */
462                 wait_event_interruptible(
463                         pq->wait,
464                         (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
465                 kfree(pq->reqs);
466                 kfree(pq->req_in_use);
467                 kmem_cache_destroy(pq->txreq_cache);
468                 kfree(pq);
469                 fd->pq = NULL;
470         }
471         if (fd->cq) {
472                 vfree(fd->cq->comps);
473                 kfree(fd->cq);
474                 fd->cq = NULL;
475         }
476         return 0;
477 }
478
479 static u8 dlid_to_selector(u16 dlid)
480 {
481         static u8 mapping[256];
482         static int initialized;
483         static u8 next;
484         int hash;
485
486         if (!initialized) {
487                 memset(mapping, 0xFF, 256);
488                 initialized = 1;
489         }
490
491         hash = ((dlid >> 8) ^ dlid) & 0xFF;
492         if (mapping[hash] == 0xFF) {
493                 mapping[hash] = next;
494                 next = (next + 1) & 0x7F;
495         }
496
497         return mapping[hash];
498 }
499
500 int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
501                                    struct iovec *iovec, unsigned long dim,
502                                    unsigned long *count)
503 {
504         int ret = 0, i;
505         struct hfi1_ctxtdata *uctxt = fd->uctxt;
506         struct hfi1_user_sdma_pkt_q *pq = fd->pq;
507         struct hfi1_user_sdma_comp_q *cq = fd->cq;
508         struct hfi1_devdata *dd = pq->dd;
509         unsigned long idx = 0;
510         u8 pcount = initial_pkt_count;
511         struct sdma_req_info info;
512         struct user_sdma_request *req;
513         u8 opcode, sc, vl;
514         int req_queued = 0;
515         u16 dlid;
516         u32 selector;
517
518         if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
519                 hfi1_cdbg(
520                    SDMA,
521                    "[%u:%u:%u] First vector not big enough for header %lu/%lu",
522                    dd->unit, uctxt->ctxt, fd->subctxt,
523                    iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
524                 return -EINVAL;
525         }
526         ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
527         if (ret) {
528                 hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
529                           dd->unit, uctxt->ctxt, fd->subctxt, ret);
530                 return -EFAULT;
531         }
532
533         trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
534                                      (u16 *)&info);
535
536         if (info.comp_idx >= hfi1_sdma_comp_ring_size) {
537                 hfi1_cdbg(SDMA,
538                           "[%u:%u:%u:%u] Invalid comp index",
539                           dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
540                 return -EINVAL;
541         }
542
543         /*
544          * Sanity check the header io vector count.  Need at least 1 vector
545          * (header) and cannot be larger than the actual io vector count.
546          */
547         if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) {
548                 hfi1_cdbg(SDMA,
549                           "[%u:%u:%u:%u] Invalid iov count %d, dim %ld",
550                           dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx,
551                           req_iovcnt(info.ctrl), dim);
552                 return -EINVAL;
553         }
554
555         if (!info.fragsize) {
556                 hfi1_cdbg(SDMA,
557                           "[%u:%u:%u:%u] Request does not specify fragsize",
558                           dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
559                 return -EINVAL;
560         }
561
562         /* Try to claim the request. */
563         if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
564                 hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use",
565                           dd->unit, uctxt->ctxt, fd->subctxt,
566                           info.comp_idx);
567                 return -EBADSLT;
568         }
569         /*
570          * All safety checks have been done and this request has been claimed.
571          */
572         hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
573                   uctxt->ctxt, fd->subctxt, info.comp_idx);
574         req = pq->reqs + info.comp_idx;
575         req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
576         req->data_len  = 0;
577         req->pq = pq;
578         req->cq = cq;
579         req->status = -1;
580         req->ahg_idx = -1;
581         req->iov_idx = 0;
582         req->sent = 0;
583         req->seqnum = 0;
584         req->seqcomp = 0;
585         req->seqsubmitted = 0;
586         req->tids = NULL;
587         req->done = 0;
588         req->has_error = 0;
589         INIT_LIST_HEAD(&req->txps);
590
591         memcpy(&req->info, &info, sizeof(info));
592
593         if (req_opcode(info.ctrl) == EXPECTED) {
594                 /* expected must have a TID info and at least one data vector */
595                 if (req->data_iovs < 2) {
596                         SDMA_DBG(req,
597                                  "Not enough vectors for expected request");
598                         ret = -EINVAL;
599                         goto free_req;
600                 }
601                 req->data_iovs--;
602         }
603
604         if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
605                 SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
606                          MAX_VECTORS_PER_REQ);
607                 ret = -EINVAL;
608                 goto free_req;
609         }
610         /* Copy the header from the user buffer */
611         ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
612                              sizeof(req->hdr));
613         if (ret) {
614                 SDMA_DBG(req, "Failed to copy header template (%d)", ret);
615                 ret = -EFAULT;
616                 goto free_req;
617         }
618
619         /* If Static rate control is not enabled, sanitize the header. */
620         if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL))
621                 req->hdr.pbc[2] = 0;
622
623         /* Validate the opcode. Do not trust packets from user space blindly. */
624         opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
625         if ((opcode & USER_OPCODE_CHECK_MASK) !=
626              USER_OPCODE_CHECK_VAL) {
627                 SDMA_DBG(req, "Invalid opcode (%d)", opcode);
628                 ret = -EINVAL;
629                 goto free_req;
630         }
631         /*
632          * Validate the vl. Do not trust packets from user space blindly.
633          * VL comes from PBC, SC comes from LRH, and the VL needs to
634          * match the SC look up.
635          */
636         vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
637         sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
638               (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
639         if (vl >= dd->pport->vls_operational ||
640             vl != sc_to_vlt(dd, sc)) {
641                 SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
642                 ret = -EINVAL;
643                 goto free_req;
644         }
645
646         /* Checking P_KEY for requests from user-space */
647         if (egress_pkey_check(dd->pport, req->hdr.lrh, req->hdr.bth, sc,
648                               PKEY_CHECK_INVALID)) {
649                 ret = -EINVAL;
650                 goto free_req;
651         }
652
653         /*
654          * Also should check the BTH.lnh. If it says the next header is GRH then
655          * the RXE parsing will be off and will land in the middle of the KDETH
656          * or miss it entirely.
657          */
658         if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
659                 SDMA_DBG(req, "User tried to pass in a GRH");
660                 ret = -EINVAL;
661                 goto free_req;
662         }
663
664         req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
665         /*
666          * Calculate the initial TID offset based on the values of
667          * KDETH.OFFSET and KDETH.OM that are passed in.
668          */
669         req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
670                 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
671                  KDETH_OM_LARGE : KDETH_OM_SMALL);
672         SDMA_DBG(req, "Initial TID offset %u", req->tidoffset);
673         idx++;
674
675         /* Save all the IO vector structures */
676         for (i = 0; i < req->data_iovs; i++) {
677                 req->iovs[i].offset = 0;
678                 INIT_LIST_HEAD(&req->iovs[i].list);
679                 memcpy(&req->iovs[i].iov,
680                        iovec + idx++,
681                        sizeof(req->iovs[i].iov));
682                 ret = pin_vector_pages(req, &req->iovs[i]);
683                 if (ret) {
684                         req->data_iovs = i;
685                         req->status = ret;
686                         goto free_req;
687                 }
688                 req->data_len += req->iovs[i].iov.iov_len;
689         }
690         SDMA_DBG(req, "total data length %u", req->data_len);
691
692         if (pcount > req->info.npkts)
693                 pcount = req->info.npkts;
694         /*
695          * Copy any TID info
696          * User space will provide the TID info only when the
697          * request type is EXPECTED. This is true even if there is
698          * only one packet in the request and the header is already
699          * setup. The reason for the singular TID case is that the
700          * driver needs to perform safety checks.
701          */
702         if (req_opcode(req->info.ctrl) == EXPECTED) {
703                 u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
704                 u32 *tmp;
705
706                 if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
707                         ret = -EINVAL;
708                         goto free_req;
709                 }
710
711                 /*
712                  * We have to copy all of the tids because they may vary
713                  * in size and, therefore, the TID count might not be
714                  * equal to the pkt count. However, there is no way to
715                  * tell at this point.
716                  */
717                 tmp = memdup_user(iovec[idx].iov_base,
718                                   ntids * sizeof(*req->tids));
719                 if (IS_ERR(tmp)) {
720                         ret = PTR_ERR(tmp);
721                         SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
722                                  ntids, ret);
723                         goto free_req;
724                 }
725                 req->tids = tmp;
726                 req->n_tids = ntids;
727                 req->tididx = 0;
728                 idx++;
729         }
730
731         dlid = be16_to_cpu(req->hdr.lrh[1]);
732         selector = dlid_to_selector(dlid);
733         selector += uctxt->ctxt + fd->subctxt;
734         req->sde = sdma_select_user_engine(dd, selector, vl);
735
736         if (!req->sde || !sdma_running(req->sde)) {
737                 ret = -ECOMM;
738                 goto free_req;
739         }
740
741         /* We don't need an AHG entry if the request contains only one packet */
742         if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG))
743                 req->ahg_idx = sdma_ahg_alloc(req->sde);
744
745         set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
746         atomic_inc(&pq->n_reqs);
747         req_queued = 1;
748         /* Send the first N packets in the request to buy us some time */
749         ret = user_sdma_send_pkts(req, pcount);
750         if (unlikely(ret < 0 && ret != -EBUSY)) {
751                 req->status = ret;
752                 goto free_req;
753         }
754
755         /*
756          * It is possible that the SDMA engine would have processed all the
757          * submitted packets by the time we get here. Therefore, only set
758          * packet queue state to ACTIVE if there are still uncompleted
759          * requests.
760          */
761         if (atomic_read(&pq->n_reqs))
762                 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
763
764         /*
765          * This is a somewhat blocking send implementation.
766          * The driver will block the caller until all packets of the
767          * request have been submitted to the SDMA engine. However, it
768          * will not wait for send completions.
769          */
770         while (req->seqsubmitted != req->info.npkts) {
771                 ret = user_sdma_send_pkts(req, pcount);
772                 if (ret < 0) {
773                         if (ret != -EBUSY) {
774                                 req->status = ret;
775                                 WRITE_ONCE(req->has_error, 1);
776                                 if (ACCESS_ONCE(req->seqcomp) ==
777                                     req->seqsubmitted - 1)
778                                         goto free_req;
779                                 return ret;
780                         }
781                         wait_event_interruptible_timeout(
782                                 pq->busy.wait_dma,
783                                 (pq->state == SDMA_PKT_Q_ACTIVE),
784                                 msecs_to_jiffies(
785                                         SDMA_IOWAIT_TIMEOUT));
786                 }
787         }
788         *count += idx;
789         return 0;
790 free_req:
791         user_sdma_free_request(req, true);
792         if (req_queued)
793                 pq_update(pq);
794         set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
795         return ret;
796 }
797
798 static inline u32 compute_data_length(struct user_sdma_request *req,
799                                       struct user_sdma_txreq *tx)
800 {
801         /*
802          * Determine the proper size of the packet data.
803          * The size of the data of the first packet is in the header
804          * template. However, it includes the header and ICRC, which need
805          * to be subtracted.
806          * The minimum representable packet data length in a header is 4 bytes,
807          * therefore, when the data length request is less than 4 bytes, there's
808          * only one packet, and the packet data length is equal to that of the
809          * request data length.
810          * The size of the remaining packets is the minimum of the frag
811          * size (MTU) or remaining data in the request.
812          */
813         u32 len;
814
815         if (!req->seqnum) {
816                 if (req->data_len < sizeof(u32))
817                         len = req->data_len;
818                 else
819                         len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
820                                (sizeof(tx->hdr) - 4));
821         } else if (req_opcode(req->info.ctrl) == EXPECTED) {
822                 u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
823                         PAGE_SIZE;
824                 /*
825                  * Get the data length based on the remaining space in the
826                  * TID pair.
827                  */
828                 len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
829                 /* If we've filled up the TID pair, move to the next one. */
830                 if (unlikely(!len) && ++req->tididx < req->n_tids &&
831                     req->tids[req->tididx]) {
832                         tidlen = EXP_TID_GET(req->tids[req->tididx],
833                                              LEN) * PAGE_SIZE;
834                         req->tidoffset = 0;
835                         len = min_t(u32, tidlen, req->info.fragsize);
836                 }
837                 /*
838                  * Since the TID pairs map entire pages, make sure that we
839                  * are not going to try to send more data that we have
840                  * remaining.
841                  */
842                 len = min(len, req->data_len - req->sent);
843         } else {
844                 len = min(req->data_len - req->sent, (u32)req->info.fragsize);
845         }
846         SDMA_DBG(req, "Data Length = %u", len);
847         return len;
848 }
849
850 static inline u32 pad_len(u32 len)
851 {
852         if (len & (sizeof(u32) - 1))
853                 len += sizeof(u32) - (len & (sizeof(u32) - 1));
854         return len;
855 }
856
857 static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
858 {
859         /* (Size of complete header - size of PBC) + 4B ICRC + data length */
860         return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
861 }
862
863 static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
864 {
865         int ret = 0, count;
866         unsigned npkts = 0;
867         struct user_sdma_txreq *tx = NULL;
868         struct hfi1_user_sdma_pkt_q *pq = NULL;
869         struct user_sdma_iovec *iovec = NULL;
870
871         if (!req->pq)
872                 return -EINVAL;
873
874         pq = req->pq;
875
876         /* If tx completion has reported an error, we are done. */
877         if (READ_ONCE(req->has_error))
878                 return -EFAULT;
879
880         /*
881          * Check if we might have sent the entire request already
882          */
883         if (unlikely(req->seqnum == req->info.npkts)) {
884                 if (!list_empty(&req->txps))
885                         goto dosend;
886                 return ret;
887         }
888
889         if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
890                 maxpkts = req->info.npkts - req->seqnum;
891
892         while (npkts < maxpkts) {
893                 u32 datalen = 0, queued = 0, data_sent = 0;
894                 u64 iov_offset = 0;
895
896                 /*
897                  * Check whether any of the completions have come back
898                  * with errors. If so, we are not going to process any
899                  * more packets from this request.
900                  */
901                 if (READ_ONCE(req->has_error))
902                         return -EFAULT;
903
904                 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
905                 if (!tx)
906                         return -ENOMEM;
907
908                 tx->flags = 0;
909                 tx->req = req;
910                 tx->busycount = 0;
911                 INIT_LIST_HEAD(&tx->list);
912
913                 /*
914                  * For the last packet set the ACK request
915                  * and disable header suppression.
916                  */
917                 if (req->seqnum == req->info.npkts - 1)
918                         tx->flags |= (TXREQ_FLAGS_REQ_ACK |
919                                       TXREQ_FLAGS_REQ_DISABLE_SH);
920
921                 /*
922                  * Calculate the payload size - this is min of the fragment
923                  * (MTU) size or the remaining bytes in the request but only
924                  * if we have payload data.
925                  */
926                 if (req->data_len) {
927                         iovec = &req->iovs[req->iov_idx];
928                         if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
929                                 if (++req->iov_idx == req->data_iovs) {
930                                         ret = -EFAULT;
931                                         goto free_txreq;
932                                 }
933                                 iovec = &req->iovs[req->iov_idx];
934                                 WARN_ON(iovec->offset);
935                         }
936
937                         datalen = compute_data_length(req, tx);
938
939                         /*
940                          * Disable header suppression for the payload <= 8DWS.
941                          * If there is an uncorrectable error in the receive
942                          * data FIFO when the received payload size is less than
943                          * or equal to 8DWS then the RxDmaDataFifoRdUncErr is
944                          * not reported.There is set RHF.EccErr if the header
945                          * is not suppressed.
946                          */
947                         if (!datalen) {
948                                 SDMA_DBG(req,
949                                          "Request has data but pkt len is 0");
950                                 ret = -EFAULT;
951                                 goto free_tx;
952                         } else if (datalen <= 32) {
953                                 tx->flags |= TXREQ_FLAGS_REQ_DISABLE_SH;
954                         }
955                 }
956
957                 if (req->ahg_idx >= 0) {
958                         if (!req->seqnum) {
959                                 u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
960                                 u32 lrhlen = get_lrh_len(req->hdr,
961                                                          pad_len(datalen));
962                                 /*
963                                  * Copy the request header into the tx header
964                                  * because the HW needs a cacheline-aligned
965                                  * address.
966                                  * This copy can be optimized out if the hdr
967                                  * member of user_sdma_request were also
968                                  * cacheline aligned.
969                                  */
970                                 memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
971                                 if (PBC2LRH(pbclen) != lrhlen) {
972                                         pbclen = (pbclen & 0xf000) |
973                                                 LRH2PBC(lrhlen);
974                                         tx->hdr.pbc[0] = cpu_to_le16(pbclen);
975                                 }
976                                 ret = check_header_template(req, &tx->hdr,
977                                                             lrhlen, datalen);
978                                 if (ret)
979                                         goto free_tx;
980                                 ret = sdma_txinit_ahg(&tx->txreq,
981                                                       SDMA_TXREQ_F_AHG_COPY,
982                                                       sizeof(tx->hdr) + datalen,
983                                                       req->ahg_idx, 0, NULL, 0,
984                                                       user_sdma_txreq_cb);
985                                 if (ret)
986                                         goto free_tx;
987                                 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq,
988                                                         &tx->hdr,
989                                                         sizeof(tx->hdr));
990                                 if (ret)
991                                         goto free_txreq;
992                         } else {
993                                 int changes;
994
995                                 changes = set_txreq_header_ahg(req, tx,
996                                                                datalen);
997                                 if (changes < 0)
998                                         goto free_tx;
999                         }
1000                 } else {
1001                         ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
1002                                           datalen, user_sdma_txreq_cb);
1003                         if (ret)
1004                                 goto free_tx;
1005                         /*
1006                          * Modify the header for this packet. This only needs
1007                          * to be done if we are not going to use AHG. Otherwise,
1008                          * the HW will do it based on the changes we gave it
1009                          * during sdma_txinit_ahg().
1010                          */
1011                         ret = set_txreq_header(req, tx, datalen);
1012                         if (ret)
1013                                 goto free_txreq;
1014                 }
1015
1016                 /*
1017                  * If the request contains any data vectors, add up to
1018                  * fragsize bytes to the descriptor.
1019                  */
1020                 while (queued < datalen &&
1021                        (req->sent + data_sent) < req->data_len) {
1022                         unsigned long base, offset;
1023                         unsigned pageidx, len;
1024
1025                         base = (unsigned long)iovec->iov.iov_base;
1026                         offset = offset_in_page(base + iovec->offset +
1027                                                 iov_offset);
1028                         pageidx = (((iovec->offset + iov_offset +
1029                                      base) - (base & PAGE_MASK)) >> PAGE_SHIFT);
1030                         len = offset + req->info.fragsize > PAGE_SIZE ?
1031                                 PAGE_SIZE - offset : req->info.fragsize;
1032                         len = min((datalen - queued), len);
1033                         ret = sdma_txadd_page(pq->dd, &tx->txreq,
1034                                               iovec->pages[pageidx],
1035                                               offset, len);
1036                         if (ret) {
1037                                 SDMA_DBG(req, "SDMA txreq add page failed %d\n",
1038                                          ret);
1039                                 goto free_txreq;
1040                         }
1041                         iov_offset += len;
1042                         queued += len;
1043                         data_sent += len;
1044                         if (unlikely(queued < datalen &&
1045                                      pageidx == iovec->npages &&
1046                                      req->iov_idx < req->data_iovs - 1)) {
1047                                 iovec->offset += iov_offset;
1048                                 iovec = &req->iovs[++req->iov_idx];
1049                                 iov_offset = 0;
1050                         }
1051                 }
1052                 /*
1053                  * The txreq was submitted successfully so we can update
1054                  * the counters.
1055                  */
1056                 req->koffset += datalen;
1057                 if (req_opcode(req->info.ctrl) == EXPECTED)
1058                         req->tidoffset += datalen;
1059                 req->sent += data_sent;
1060                 if (req->data_len)
1061                         iovec->offset += iov_offset;
1062                 list_add_tail(&tx->txreq.list, &req->txps);
1063                 /*
1064                  * It is important to increment this here as it is used to
1065                  * generate the BTH.PSN and, therefore, can't be bulk-updated
1066                  * outside of the loop.
1067                  */
1068                 tx->seqnum = req->seqnum++;
1069                 npkts++;
1070         }
1071 dosend:
1072         ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
1073         req->seqsubmitted += count;
1074         if (req->seqsubmitted == req->info.npkts) {
1075                 WRITE_ONCE(req->done, 1);
1076                 /*
1077                  * The txreq has already been submitted to the HW queue
1078                  * so we can free the AHG entry now. Corruption will not
1079                  * happen due to the sequential manner in which
1080                  * descriptors are processed.
1081                  */
1082                 if (req->ahg_idx >= 0)
1083                         sdma_ahg_free(req->sde, req->ahg_idx);
1084         }
1085         return ret;
1086
1087 free_txreq:
1088         sdma_txclean(pq->dd, &tx->txreq);
1089 free_tx:
1090         kmem_cache_free(pq->txreq_cache, tx);
1091         return ret;
1092 }
1093
1094 /*
1095  * How many pages in this iovec element?
1096  */
1097 static inline int num_user_pages(const struct iovec *iov)
1098 {
1099         const unsigned long addr  = (unsigned long)iov->iov_base;
1100         const unsigned long len   = iov->iov_len;
1101         const unsigned long spage = addr & PAGE_MASK;
1102         const unsigned long epage = (addr + len - 1) & PAGE_MASK;
1103
1104         return 1 + ((epage - spage) >> PAGE_SHIFT);
1105 }
1106
1107 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
1108 {
1109         struct evict_data evict_data;
1110
1111         evict_data.cleared = 0;
1112         evict_data.target = npages;
1113         hfi1_mmu_rb_evict(pq->handler, &evict_data);
1114         return evict_data.cleared;
1115 }
1116
1117 static int pin_vector_pages(struct user_sdma_request *req,
1118                             struct user_sdma_iovec *iovec)
1119 {
1120         int ret = 0, pinned, npages, cleared;
1121         struct page **pages;
1122         struct hfi1_user_sdma_pkt_q *pq = req->pq;
1123         struct sdma_mmu_node *node = NULL;
1124         struct mmu_rb_node *rb_node;
1125         bool extracted;
1126
1127         extracted =
1128                 hfi1_mmu_rb_remove_unless_exact(pq->handler,
1129                                                 (unsigned long)
1130                                                 iovec->iov.iov_base,
1131                                                 iovec->iov.iov_len, &rb_node);
1132         if (rb_node) {
1133                 node = container_of(rb_node, struct sdma_mmu_node, rb);
1134                 if (!extracted) {
1135                         atomic_inc(&node->refcount);
1136                         iovec->pages = node->pages;
1137                         iovec->npages = node->npages;
1138                         iovec->node = node;
1139                         return 0;
1140                 }
1141         }
1142
1143         if (!node) {
1144                 node = kzalloc(sizeof(*node), GFP_KERNEL);
1145                 if (!node)
1146                         return -ENOMEM;
1147
1148                 node->rb.addr = (unsigned long)iovec->iov.iov_base;
1149                 node->pq = pq;
1150                 atomic_set(&node->refcount, 0);
1151         }
1152
1153         npages = num_user_pages(&iovec->iov);
1154         if (node->npages < npages) {
1155                 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1156                 if (!pages) {
1157                         SDMA_DBG(req, "Failed page array alloc");
1158                         ret = -ENOMEM;
1159                         goto bail;
1160                 }
1161                 memcpy(pages, node->pages, node->npages * sizeof(*pages));
1162
1163                 npages -= node->npages;
1164
1165 retry:
1166                 if (!hfi1_can_pin_pages(pq->dd, pq->mm,
1167                                         atomic_read(&pq->n_locked), npages)) {
1168                         cleared = sdma_cache_evict(pq, npages);
1169                         if (cleared >= npages)
1170                                 goto retry;
1171                 }
1172                 pinned = hfi1_acquire_user_pages(pq->mm,
1173                         ((unsigned long)iovec->iov.iov_base +
1174                          (node->npages * PAGE_SIZE)), npages, 0,
1175                         pages + node->npages);
1176                 if (pinned < 0) {
1177                         kfree(pages);
1178                         ret = pinned;
1179                         goto bail;
1180                 }
1181                 if (pinned != npages) {
1182                         unpin_vector_pages(pq->mm, pages, node->npages,
1183                                            pinned);
1184                         ret = -EFAULT;
1185                         goto bail;
1186                 }
1187                 kfree(node->pages);
1188                 node->rb.len = iovec->iov.iov_len;
1189                 node->pages = pages;
1190                 node->npages += pinned;
1191                 npages = node->npages;
1192                 atomic_add(pinned, &pq->n_locked);
1193         }
1194         iovec->pages = node->pages;
1195         iovec->npages = npages;
1196         iovec->node = node;
1197
1198         ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
1199         if (ret) {
1200                 atomic_sub(node->npages, &pq->n_locked);
1201                 iovec->node = NULL;
1202                 goto bail;
1203         }
1204         return 0;
1205 bail:
1206         if (rb_node)
1207                 unpin_vector_pages(pq->mm, node->pages, 0, node->npages);
1208         kfree(node);
1209         return ret;
1210 }
1211
1212 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
1213                                unsigned start, unsigned npages)
1214 {
1215         hfi1_release_user_pages(mm, pages + start, npages, false);
1216         kfree(pages);
1217 }
1218
1219 static int check_header_template(struct user_sdma_request *req,
1220                                  struct hfi1_pkt_header *hdr, u32 lrhlen,
1221                                  u32 datalen)
1222 {
1223         /*
1224          * Perform safety checks for any type of packet:
1225          *    - transfer size is multiple of 64bytes
1226          *    - packet length is multiple of 4 bytes
1227          *    - packet length is not larger than MTU size
1228          *
1229          * These checks are only done for the first packet of the
1230          * transfer since the header is "given" to us by user space.
1231          * For the remainder of the packets we compute the values.
1232          */
1233         if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 ||
1234             lrhlen > get_lrh_len(*hdr, req->info.fragsize))
1235                 return -EINVAL;
1236
1237         if (req_opcode(req->info.ctrl) == EXPECTED) {
1238                 /*
1239                  * The header is checked only on the first packet. Furthermore,
1240                  * we ensure that at least one TID entry is copied when the
1241                  * request is submitted. Therefore, we don't have to verify that
1242                  * tididx points to something sane.
1243                  */
1244                 u32 tidval = req->tids[req->tididx],
1245                         tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
1246                         tididx = EXP_TID_GET(tidval, IDX),
1247                         tidctrl = EXP_TID_GET(tidval, CTRL),
1248                         tidoff;
1249                 __le32 kval = hdr->kdeth.ver_tid_offset;
1250
1251                 tidoff = KDETH_GET(kval, OFFSET) *
1252                           (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
1253                            KDETH_OM_LARGE : KDETH_OM_SMALL);
1254                 /*
1255                  * Expected receive packets have the following
1256                  * additional checks:
1257                  *     - offset is not larger than the TID size
1258                  *     - TIDCtrl values match between header and TID array
1259                  *     - TID indexes match between header and TID array
1260                  */
1261                 if ((tidoff + datalen > tidlen) ||
1262                     KDETH_GET(kval, TIDCTRL) != tidctrl ||
1263                     KDETH_GET(kval, TID) != tididx)
1264                         return -EINVAL;
1265         }
1266         return 0;
1267 }
1268
1269 /*
1270  * Correctly set the BTH.PSN field based on type of
1271  * transfer - eager packets can just increment the PSN but
1272  * expected packets encode generation and sequence in the
1273  * BTH.PSN field so just incrementing will result in errors.
1274  */
1275 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
1276 {
1277         u32 val = be32_to_cpu(bthpsn),
1278                 mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull :
1279                         0xffffffull),
1280                 psn = val & mask;
1281         if (expct)
1282                 psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK);
1283         else
1284                 psn = psn + frags;
1285         return psn & mask;
1286 }
1287
1288 static int set_txreq_header(struct user_sdma_request *req,
1289                             struct user_sdma_txreq *tx, u32 datalen)
1290 {
1291         struct hfi1_user_sdma_pkt_q *pq = req->pq;
1292         struct hfi1_pkt_header *hdr = &tx->hdr;
1293         u8 omfactor; /* KDETH.OM */
1294         u16 pbclen;
1295         int ret;
1296         u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
1297
1298         /* Copy the header template to the request before modification */
1299         memcpy(hdr, &req->hdr, sizeof(*hdr));
1300
1301         /*
1302          * Check if the PBC and LRH length are mismatched. If so
1303          * adjust both in the header.
1304          */
1305         pbclen = le16_to_cpu(hdr->pbc[0]);
1306         if (PBC2LRH(pbclen) != lrhlen) {
1307                 pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
1308                 hdr->pbc[0] = cpu_to_le16(pbclen);
1309                 hdr->lrh[2] = cpu_to_be16(lrhlen >> 2);
1310                 /*
1311                  * Third packet
1312                  * This is the first packet in the sequence that has
1313                  * a "static" size that can be used for the rest of
1314                  * the packets (besides the last one).
1315                  */
1316                 if (unlikely(req->seqnum == 2)) {
1317                         /*
1318                          * From this point on the lengths in both the
1319                          * PBC and LRH are the same until the last
1320                          * packet.
1321                          * Adjust the template so we don't have to update
1322                          * every packet
1323                          */
1324                         req->hdr.pbc[0] = hdr->pbc[0];
1325                         req->hdr.lrh[2] = hdr->lrh[2];
1326                 }
1327         }
1328         /*
1329          * We only have to modify the header if this is not the
1330          * first packet in the request. Otherwise, we use the
1331          * header given to us.
1332          */
1333         if (unlikely(!req->seqnum)) {
1334                 ret = check_header_template(req, hdr, lrhlen, datalen);
1335                 if (ret)
1336                         return ret;
1337                 goto done;
1338         }
1339
1340         hdr->bth[2] = cpu_to_be32(
1341                 set_pkt_bth_psn(hdr->bth[2],
1342                                 (req_opcode(req->info.ctrl) == EXPECTED),
1343                                 req->seqnum));
1344
1345         /* Set ACK request on last packet */
1346         if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
1347                 hdr->bth[2] |= cpu_to_be32(1UL << 31);
1348
1349         /* Set the new offset */
1350         hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
1351         /* Expected packets have to fill in the new TID information */
1352         if (req_opcode(req->info.ctrl) == EXPECTED) {
1353                 tidval = req->tids[req->tididx];
1354                 /*
1355                  * If the offset puts us at the end of the current TID,
1356                  * advance everything.
1357                  */
1358                 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1359                                          PAGE_SIZE)) {
1360                         req->tidoffset = 0;
1361                         /*
1362                          * Since we don't copy all the TIDs, all at once,
1363                          * we have to check again.
1364                          */
1365                         if (++req->tididx > req->n_tids - 1 ||
1366                             !req->tids[req->tididx]) {
1367                                 return -EINVAL;
1368                         }
1369                         tidval = req->tids[req->tididx];
1370                 }
1371                 omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
1372                         KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE_SHIFT :
1373                         KDETH_OM_SMALL_SHIFT;
1374                 /* Set KDETH.TIDCtrl based on value for this TID. */
1375                 KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
1376                           EXP_TID_GET(tidval, CTRL));
1377                 /* Set KDETH.TID based on value for this TID */
1378                 KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
1379                           EXP_TID_GET(tidval, IDX));
1380                 /* Clear KDETH.SH when DISABLE_SH flag is set */
1381                 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH))
1382                         KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
1383                 /*
1384                  * Set the KDETH.OFFSET and KDETH.OM based on size of
1385                  * transfer.
1386                  */
1387                 SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
1388                          req->tidoffset, req->tidoffset >> omfactor,
1389                          omfactor != KDETH_OM_SMALL_SHIFT);
1390                 KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
1391                           req->tidoffset >> omfactor);
1392                 KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
1393                           omfactor != KDETH_OM_SMALL_SHIFT);
1394         }
1395 done:
1396         trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
1397                                     req->info.comp_idx, hdr, tidval);
1398         return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
1399 }
1400
1401 static int set_txreq_header_ahg(struct user_sdma_request *req,
1402                                 struct user_sdma_txreq *tx, u32 datalen)
1403 {
1404         u32 ahg[AHG_KDETH_ARRAY_SIZE];
1405         int diff = 0;
1406         u8 omfactor; /* KDETH.OM */
1407         struct hfi1_user_sdma_pkt_q *pq = req->pq;
1408         struct hfi1_pkt_header *hdr = &req->hdr;
1409         u16 pbclen = le16_to_cpu(hdr->pbc[0]);
1410         u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
1411
1412         if (PBC2LRH(pbclen) != lrhlen) {
1413                 /* PBC.PbcLengthDWs */
1414                 AHG_HEADER_SET(ahg, diff, 0, 0, 12,
1415                                cpu_to_le16(LRH2PBC(lrhlen)));
1416                 /* LRH.PktLen (we need the full 16 bits due to byte swap) */
1417                 AHG_HEADER_SET(ahg, diff, 3, 0, 16,
1418                                cpu_to_be16(lrhlen >> 2));
1419         }
1420
1421         /*
1422          * Do the common updates
1423          */
1424         /* BTH.PSN and BTH.A */
1425         val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
1426                 (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
1427         if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
1428                 val32 |= 1UL << 31;
1429         AHG_HEADER_SET(ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
1430         AHG_HEADER_SET(ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
1431         /* KDETH.Offset */
1432         AHG_HEADER_SET(ahg, diff, 15, 0, 16,
1433                        cpu_to_le16(req->koffset & 0xffff));
1434         AHG_HEADER_SET(ahg, diff, 15, 16, 16, cpu_to_le16(req->koffset >> 16));
1435         if (req_opcode(req->info.ctrl) == EXPECTED) {
1436                 __le16 val;
1437
1438                 tidval = req->tids[req->tididx];
1439
1440                 /*
1441                  * If the offset puts us at the end of the current TID,
1442                  * advance everything.
1443                  */
1444                 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1445                                          PAGE_SIZE)) {
1446                         req->tidoffset = 0;
1447                         /*
1448                          * Since we don't copy all the TIDs, all at once,
1449                          * we have to check again.
1450                          */
1451                         if (++req->tididx > req->n_tids - 1 ||
1452                             !req->tids[req->tididx])
1453                                 return -EINVAL;
1454                         tidval = req->tids[req->tididx];
1455                 }
1456                 omfactor = ((EXP_TID_GET(tidval, LEN) *
1457                                   PAGE_SIZE) >=
1458                                  KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
1459                                  KDETH_OM_SMALL_SHIFT;
1460                 /* KDETH.OM and KDETH.OFFSET (TID) */
1461                 AHG_HEADER_SET(ahg, diff, 7, 0, 16,
1462                                ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
1463                                 ((req->tidoffset >> omfactor)
1464                                  & 0x7fff)));
1465                 /* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
1466                 val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
1467                                    (EXP_TID_GET(tidval, IDX) & 0x3ff));
1468
1469                 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) {
1470                         val |= cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
1471                                                       INTR) <<
1472                                             AHG_KDETH_INTR_SHIFT));
1473                 } else {
1474                         val |= KDETH_GET(hdr->kdeth.ver_tid_offset, SH) ?
1475                                cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT) :
1476                                cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
1477                                                       INTR) <<
1478                                              AHG_KDETH_INTR_SHIFT));
1479                 }
1480
1481                 AHG_HEADER_SET(ahg, diff, 7, 16, 14, val);
1482         }
1483         if (diff < 0)
1484                 return diff;
1485
1486         trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
1487                                         req->info.comp_idx, req->sde->this_idx,
1488                                         req->ahg_idx, ahg, diff, tidval);
1489         sdma_txinit_ahg(&tx->txreq,
1490                         SDMA_TXREQ_F_USE_AHG,
1491                         datalen, req->ahg_idx, diff,
1492                         ahg, sizeof(req->hdr),
1493                         user_sdma_txreq_cb);
1494
1495         return diff;
1496 }
1497
1498 /*
1499  * SDMA tx request completion callback. Called when the SDMA progress
1500  * state machine gets notification that the SDMA descriptors for this
1501  * tx request have been processed by the DMA engine. Called in
1502  * interrupt context.
1503  */
1504 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
1505 {
1506         struct user_sdma_txreq *tx =
1507                 container_of(txreq, struct user_sdma_txreq, txreq);
1508         struct user_sdma_request *req;
1509         struct hfi1_user_sdma_pkt_q *pq;
1510         struct hfi1_user_sdma_comp_q *cq;
1511         u16 idx;
1512
1513         if (!tx->req)
1514                 return;
1515
1516         req = tx->req;
1517         pq = req->pq;
1518         cq = req->cq;
1519
1520         if (status != SDMA_TXREQ_S_OK) {
1521                 SDMA_DBG(req, "SDMA completion with error %d",
1522                          status);
1523                 WRITE_ONCE(req->has_error, 1);
1524         }
1525
1526         req->seqcomp = tx->seqnum;
1527         kmem_cache_free(pq->txreq_cache, tx);
1528         tx = NULL;
1529
1530         idx = req->info.comp_idx;
1531         if (req->status == -1 && status == SDMA_TXREQ_S_OK) {
1532                 if (req->seqcomp == req->info.npkts - 1) {
1533                         req->status = 0;
1534                         user_sdma_free_request(req, false);
1535                         pq_update(pq);
1536                         set_comp_state(pq, cq, idx, COMPLETE, 0);
1537                 }
1538         } else {
1539                 if (status != SDMA_TXREQ_S_OK)
1540                         req->status = status;
1541                 if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
1542                     (READ_ONCE(req->done) ||
1543                      READ_ONCE(req->has_error))) {
1544                         user_sdma_free_request(req, false);
1545                         pq_update(pq);
1546                         set_comp_state(pq, cq, idx, ERROR, req->status);
1547                 }
1548         }
1549 }
1550
1551 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
1552 {
1553         if (atomic_dec_and_test(&pq->n_reqs)) {
1554                 xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
1555                 wake_up(&pq->wait);
1556         }
1557 }
1558
1559 static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
1560 {
1561         if (!list_empty(&req->txps)) {
1562                 struct sdma_txreq *t, *p;
1563
1564                 list_for_each_entry_safe(t, p, &req->txps, list) {
1565                         struct user_sdma_txreq *tx =
1566                                 container_of(t, struct user_sdma_txreq, txreq);
1567                         list_del_init(&t->list);
1568                         sdma_txclean(req->pq->dd, t);
1569                         kmem_cache_free(req->pq->txreq_cache, tx);
1570                 }
1571         }
1572         if (req->data_iovs) {
1573                 struct sdma_mmu_node *node;
1574                 int i;
1575
1576                 for (i = 0; i < req->data_iovs; i++) {
1577                         node = req->iovs[i].node;
1578                         if (!node)
1579                                 continue;
1580
1581                         if (unpin)
1582                                 hfi1_mmu_rb_remove(req->pq->handler,
1583                                                    &node->rb);
1584                         else
1585                                 atomic_dec(&node->refcount);
1586                 }
1587         }
1588         kfree(req->tids);
1589         clear_bit(req->info.comp_idx, req->pq->req_in_use);
1590 }
1591
1592 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
1593                                   struct hfi1_user_sdma_comp_q *cq,
1594                                   u16 idx, enum hfi1_sdma_comp_state state,
1595                                   int ret)
1596 {
1597         hfi1_cdbg(SDMA, "[%u:%u:%u:%u] Setting completion status %u %d",
1598                   pq->dd->unit, pq->ctxt, pq->subctxt, idx, state, ret);
1599         if (state == ERROR)
1600                 cq->comps[idx].errcode = -ret;
1601         smp_wmb(); /* make sure errcode is visible first */
1602         cq->comps[idx].status = state;
1603         trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
1604                                         idx, state, ret);
1605 }
1606
1607 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
1608                            unsigned long len)
1609 {
1610         return (bool)(node->addr == addr);
1611 }
1612
1613 static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode)
1614 {
1615         struct sdma_mmu_node *node =
1616                 container_of(mnode, struct sdma_mmu_node, rb);
1617
1618         atomic_inc(&node->refcount);
1619         return 0;
1620 }
1621
1622 /*
1623  * Return 1 to remove the node from the rb tree and call the remove op.
1624  *
1625  * Called with the rb tree lock held.
1626  */
1627 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
1628                          void *evict_arg, bool *stop)
1629 {
1630         struct sdma_mmu_node *node =
1631                 container_of(mnode, struct sdma_mmu_node, rb);
1632         struct evict_data *evict_data = evict_arg;
1633
1634         /* is this node still being used? */
1635         if (atomic_read(&node->refcount))
1636                 return 0; /* keep this node */
1637
1638         /* this node will be evicted, add its pages to our count */
1639         evict_data->cleared += node->npages;
1640
1641         /* have enough pages been cleared? */
1642         if (evict_data->cleared >= evict_data->target)
1643                 *stop = true;
1644
1645         return 1; /* remove this node */
1646 }
1647
1648 static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
1649 {
1650         struct sdma_mmu_node *node =
1651                 container_of(mnode, struct sdma_mmu_node, rb);
1652
1653         atomic_sub(node->npages, &node->pq->n_locked);
1654
1655         unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
1656
1657         kfree(node);
1658 }
1659
1660 static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
1661 {
1662         struct sdma_mmu_node *node =
1663                 container_of(mnode, struct sdma_mmu_node, rb);
1664
1665         if (!atomic_read(&node->refcount))
1666                 return 1;
1667         return 0;
1668 }