2 * Copyright(c) 2015 - 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/types.h>
49 #include <linux/device.h>
50 #include <linux/dmapool.h>
51 #include <linux/slab.h>
52 #include <linux/list.h>
53 #include <linux/highmem.h>
55 #include <linux/uio.h>
56 #include <linux/rbtree.h>
57 #include <linux/spinlock.h>
58 #include <linux/delay.h>
59 #include <linux/kthread.h>
60 #include <linux/mmu_context.h>
61 #include <linux/module.h>
62 #include <linux/vmalloc.h>
63 #include <linux/string.h>
67 #include "user_sdma.h"
68 #include "verbs.h" /* for the headers */
69 #include "common.h" /* for struct hfi1_tid_info */
73 static uint hfi1_sdma_comp_ring_size = 128;
74 module_param_named(sdma_comp_size, hfi1_sdma_comp_ring_size, uint, S_IRUGO);
75 MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 128");
77 /* The maximum number of Data io vectors per message/request */
78 #define MAX_VECTORS_PER_REQ 8
80 * Maximum number of packet to send from each message/request
81 * before moving to the next one.
83 #define MAX_PKTS_PER_QUEUE 16
85 #define num_pages(x) (1 + ((((x) - 1) & PAGE_MASK) >> PAGE_SHIFT))
87 #define req_opcode(x) \
88 (((x) >> HFI1_SDMA_REQ_OPCODE_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
89 #define req_version(x) \
90 (((x) >> HFI1_SDMA_REQ_VERSION_SHIFT) & HFI1_SDMA_REQ_OPCODE_MASK)
91 #define req_iovcnt(x) \
92 (((x) >> HFI1_SDMA_REQ_IOVCNT_SHIFT) & HFI1_SDMA_REQ_IOVCNT_MASK)
94 /* Number of BTH.PSN bits used for sequence number in expected rcvs */
95 #define BTH_SEQ_MASK 0x7ffull
97 #define AHG_KDETH_INTR_SHIFT 12
98 #define AHG_KDETH_SH_SHIFT 13
99 #define AHG_KDETH_ARRAY_SIZE 9
101 #define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
102 #define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
104 #define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
106 if ((idx) < ARRAY_SIZE((arr))) \
107 (arr)[(idx++)] = sdma_build_ahg_descriptor( \
108 (__force u16)(value), (dw), (bit), \
114 /* Tx request flag bits */
115 #define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
116 #define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
118 #define SDMA_PKT_Q_INACTIVE BIT(0)
119 #define SDMA_PKT_Q_ACTIVE BIT(1)
120 #define SDMA_PKT_Q_DEFERRED BIT(2)
123 * Maximum retry attempts to submit a TX request
124 * before putting the process to sleep.
126 #define MAX_DEFER_RETRY_COUNT 1
128 static unsigned initial_pkt_count = 8;
130 #define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
132 struct sdma_mmu_node;
134 struct user_sdma_iovec {
135 struct list_head list;
137 /* number of pages in this vector */
139 /* array of pinned pages for this vector */
142 * offset into the virtual address space of the vector at
143 * which we last left off.
146 struct sdma_mmu_node *node;
149 struct sdma_mmu_node {
150 struct mmu_rb_node rb;
151 struct hfi1_user_sdma_pkt_q *pq;
157 /* evict operation argument */
159 u32 cleared; /* count evicted so far */
160 u32 target; /* target count to evict */
163 struct user_sdma_request {
164 /* This is the original header from user space */
165 struct hfi1_pkt_header hdr;
167 /* Read mostly fields */
168 struct hfi1_user_sdma_pkt_q *pq ____cacheline_aligned_in_smp;
169 struct hfi1_user_sdma_comp_q *cq;
171 * Pointer to the SDMA engine for this request.
172 * Since different request could be on different VLs,
173 * each request will need it's own engine pointer.
175 struct sdma_engine *sde;
176 struct sdma_req_info info;
177 /* TID array values copied from the tid_iov vector */
179 /* total length of the data in the request */
181 /* number of elements copied to the tids array */
184 * We copy the iovs for this request (based on
185 * info.iovcnt). These are only the data vectors
190 /* Writeable fields shared with interrupt */
191 u64 seqcomp ____cacheline_aligned_in_smp;
193 /* status of the last txreq completed */
196 /* Send side fields */
197 struct list_head txps ____cacheline_aligned_in_smp;
200 * KDETH.OFFSET (TID) field
201 * The offset can cover multiple packets, depending on the
202 * size of the TID entry.
206 * KDETH.Offset (Eager) field
207 * We need to remember the initial value so the headers
208 * can be updated properly.
212 /* TID index copied from the tid_iov vector */
214 /* progress index moving along the iovs array */
219 struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
220 } ____cacheline_aligned_in_smp;
223 * A single txreq could span up to 3 physical pages when the MTU
224 * is sufficiently large (> 4K). Each of the IOV pointers also
225 * needs it's own set of flags so the vector has been handled
226 * independently of each other.
228 struct user_sdma_txreq {
229 /* Packet header for the txreq */
230 struct hfi1_pkt_header hdr;
231 struct sdma_txreq txreq;
232 struct list_head list;
233 struct user_sdma_request *req;
239 #define SDMA_DBG(req, fmt, ...) \
240 hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
241 (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
243 #define SDMA_Q_DBG(pq, fmt, ...) \
244 hfi1_cdbg(SDMA, "[%u:%u:%u] " fmt, (pq)->dd->unit, (pq)->ctxt, \
245 (pq)->subctxt, ##__VA_ARGS__)
247 static int user_sdma_send_pkts(struct user_sdma_request *req,
249 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
250 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
251 static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
252 static int pin_vector_pages(struct user_sdma_request *req,
253 struct user_sdma_iovec *iovec);
254 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
255 unsigned start, unsigned npages);
256 static int check_header_template(struct user_sdma_request *req,
257 struct hfi1_pkt_header *hdr, u32 lrhlen,
259 static int set_txreq_header(struct user_sdma_request *req,
260 struct user_sdma_txreq *tx, u32 datalen);
261 static int set_txreq_header_ahg(struct user_sdma_request *req,
262 struct user_sdma_txreq *tx, u32 len);
263 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
264 struct hfi1_user_sdma_comp_q *cq,
265 u16 idx, enum hfi1_sdma_comp_state state,
267 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags);
268 static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
270 static int defer_packet_queue(
271 struct sdma_engine *sde,
273 struct sdma_txreq *txreq,
276 static void activate_packet_queue(struct iowait *wait, int reason);
277 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
279 static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode);
280 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
281 void *arg2, bool *stop);
282 static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
283 static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
285 static struct mmu_rb_ops sdma_rb_ops = {
286 .filter = sdma_rb_filter,
287 .insert = sdma_rb_insert,
288 .evict = sdma_rb_evict,
289 .remove = sdma_rb_remove,
290 .invalidate = sdma_rb_invalidate
293 static int defer_packet_queue(
294 struct sdma_engine *sde,
296 struct sdma_txreq *txreq,
300 struct hfi1_user_sdma_pkt_q *pq =
301 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
302 struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
303 struct user_sdma_txreq *tx =
304 container_of(txreq, struct user_sdma_txreq, txreq);
306 if (sdma_progress(sde, seq, txreq)) {
307 if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
311 * We are assuming that if the list is enqueued somewhere, it
312 * is to the dmawait list since that is the only place where
313 * it is supposed to be enqueued.
315 xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
316 write_seqlock(&dev->iowait_lock);
317 if (list_empty(&pq->busy.list))
318 iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
319 write_sequnlock(&dev->iowait_lock);
325 static void activate_packet_queue(struct iowait *wait, int reason)
327 struct hfi1_user_sdma_pkt_q *pq =
328 container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
329 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
330 wake_up(&wait->wait_dma);
333 int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
334 struct hfi1_filedata *fd)
338 struct hfi1_devdata *dd;
339 struct hfi1_user_sdma_comp_q *cq;
340 struct hfi1_user_sdma_pkt_q *pq;
345 if (!hfi1_sdma_comp_ring_size)
350 pq = kzalloc(sizeof(*pq), GFP_KERNEL);
355 pq->ctxt = uctxt->ctxt;
356 pq->subctxt = fd->subctxt;
357 pq->n_max_reqs = hfi1_sdma_comp_ring_size;
358 pq->state = SDMA_PKT_Q_INACTIVE;
359 atomic_set(&pq->n_reqs, 0);
360 init_waitqueue_head(&pq->wait);
361 atomic_set(&pq->n_locked, 0);
364 iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
365 activate_packet_queue, NULL);
368 pq->reqs = kcalloc(hfi1_sdma_comp_ring_size,
374 pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size),
375 sizeof(*pq->req_in_use),
378 goto pq_reqs_no_in_use;
380 snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt,
382 pq->txreq_cache = kmem_cache_create(buf,
383 sizeof(struct user_sdma_txreq),
387 if (!pq->txreq_cache) {
388 dd_dev_err(dd, "[%u] Failed to allocate TxReq cache\n",
393 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
397 cq->comps = vmalloc_user(PAGE_ALIGN(sizeof(*cq->comps)
398 * hfi1_sdma_comp_ring_size));
402 cq->nentries = hfi1_sdma_comp_ring_size;
404 ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
407 dd_dev_err(dd, "Failed to register with MMU %d", ret);
421 kmem_cache_destroy(pq->txreq_cache);
423 kfree(pq->req_in_use);
432 int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
433 struct hfi1_ctxtdata *uctxt)
435 struct hfi1_user_sdma_pkt_q *pq;
437 hfi1_cdbg(SDMA, "[%u:%u:%u] Freeing user SDMA queues", uctxt->dd->unit,
438 uctxt->ctxt, fd->subctxt);
442 hfi1_mmu_rb_unregister(pq->handler);
443 iowait_sdma_drain(&pq->busy);
444 /* Wait until all requests have been freed. */
445 wait_event_interruptible(
447 (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
449 kfree(pq->req_in_use);
450 kmem_cache_destroy(pq->txreq_cache);
455 vfree(fd->cq->comps);
462 static u8 dlid_to_selector(u16 dlid)
464 static u8 mapping[256];
465 static int initialized;
470 memset(mapping, 0xFF, 256);
474 hash = ((dlid >> 8) ^ dlid) & 0xFF;
475 if (mapping[hash] == 0xFF) {
476 mapping[hash] = next;
477 next = (next + 1) & 0x7F;
480 return mapping[hash];
483 int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
484 struct iovec *iovec, unsigned long dim,
485 unsigned long *count)
488 struct hfi1_ctxtdata *uctxt = fd->uctxt;
489 struct hfi1_user_sdma_pkt_q *pq = fd->pq;
490 struct hfi1_user_sdma_comp_q *cq = fd->cq;
491 struct hfi1_devdata *dd = pq->dd;
492 unsigned long idx = 0;
493 u8 pcount = initial_pkt_count;
494 struct sdma_req_info info;
495 struct user_sdma_request *req;
503 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
506 "[%u:%u:%u] First vector not big enough for header %lu/%lu",
507 dd->unit, uctxt->ctxt, fd->subctxt,
508 iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr));
511 ret = copy_from_user(&info, iovec[idx].iov_base, sizeof(info));
513 hfi1_cdbg(SDMA, "[%u:%u:%u] Failed to copy info QW (%d)",
514 dd->unit, uctxt->ctxt, fd->subctxt, ret);
518 trace_hfi1_sdma_user_reqinfo(dd, uctxt->ctxt, fd->subctxt,
521 if (info.comp_idx >= hfi1_sdma_comp_ring_size) {
523 "[%u:%u:%u:%u] Invalid comp index",
524 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
529 * Sanity check the header io vector count. Need at least 1 vector
530 * (header) and cannot be larger than the actual io vector count.
532 if (req_iovcnt(info.ctrl) < 1 || req_iovcnt(info.ctrl) > dim) {
534 "[%u:%u:%u:%u] Invalid iov count %d, dim %ld",
535 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx,
536 req_iovcnt(info.ctrl), dim);
540 if (!info.fragsize) {
542 "[%u:%u:%u:%u] Request does not specify fragsize",
543 dd->unit, uctxt->ctxt, fd->subctxt, info.comp_idx);
547 /* Try to claim the request. */
548 if (test_and_set_bit(info.comp_idx, pq->req_in_use)) {
549 hfi1_cdbg(SDMA, "[%u:%u:%u] Entry %u is in use",
550 dd->unit, uctxt->ctxt, fd->subctxt,
555 * All safety checks have been done and this request has been claimed.
557 hfi1_cdbg(SDMA, "[%u:%u:%u] Using req/comp entry %u\n", dd->unit,
558 uctxt->ctxt, fd->subctxt, info.comp_idx);
559 req = pq->reqs + info.comp_idx;
560 req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */
570 req->seqsubmitted = 0;
574 INIT_LIST_HEAD(&req->txps);
576 memcpy(&req->info, &info, sizeof(info));
578 if (req_opcode(info.ctrl) == EXPECTED) {
579 /* expected must have a TID info and at least one data vector */
580 if (req->data_iovs < 2) {
582 "Not enough vectors for expected request");
589 if (!info.npkts || req->data_iovs > MAX_VECTORS_PER_REQ) {
590 SDMA_DBG(req, "Too many vectors (%u/%u)", req->data_iovs,
591 MAX_VECTORS_PER_REQ);
595 /* Copy the header from the user buffer */
596 ret = copy_from_user(&req->hdr, iovec[idx].iov_base + sizeof(info),
599 SDMA_DBG(req, "Failed to copy header template (%d)", ret);
604 /* If Static rate control is not enabled, sanitize the header. */
605 if (!HFI1_CAP_IS_USET(STATIC_RATE_CTRL))
608 /* Validate the opcode. Do not trust packets from user space blindly. */
609 opcode = (be32_to_cpu(req->hdr.bth[0]) >> 24) & 0xff;
610 if ((opcode & USER_OPCODE_CHECK_MASK) !=
611 USER_OPCODE_CHECK_VAL) {
612 SDMA_DBG(req, "Invalid opcode (%d)", opcode);
617 * Validate the vl. Do not trust packets from user space blindly.
618 * VL comes from PBC, SC comes from LRH, and the VL needs to
619 * match the SC look up.
621 vl = (le16_to_cpu(req->hdr.pbc[0]) >> 12) & 0xF;
622 sc = (((be16_to_cpu(req->hdr.lrh[0]) >> 12) & 0xF) |
623 (((le16_to_cpu(req->hdr.pbc[1]) >> 14) & 0x1) << 4));
624 if (vl >= dd->pport->vls_operational ||
625 vl != sc_to_vlt(dd, sc)) {
626 SDMA_DBG(req, "Invalid SC(%u)/VL(%u)", sc, vl);
631 /* Checking P_KEY for requests from user-space */
632 pkey = (u16)be32_to_cpu(req->hdr.bth[0]);
633 slid = be16_to_cpu(req->hdr.lrh[3]);
634 if (egress_pkey_check(dd->pport, slid, pkey, sc, PKEY_CHECK_INVALID)) {
640 * Also should check the BTH.lnh. If it says the next header is GRH then
641 * the RXE parsing will be off and will land in the middle of the KDETH
642 * or miss it entirely.
644 if ((be16_to_cpu(req->hdr.lrh[0]) & 0x3) == HFI1_LRH_GRH) {
645 SDMA_DBG(req, "User tried to pass in a GRH");
650 req->koffset = le32_to_cpu(req->hdr.kdeth.swdata[6]);
652 * Calculate the initial TID offset based on the values of
653 * KDETH.OFFSET and KDETH.OM that are passed in.
655 req->tidoffset = KDETH_GET(req->hdr.kdeth.ver_tid_offset, OFFSET) *
656 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
657 KDETH_OM_LARGE : KDETH_OM_SMALL);
658 SDMA_DBG(req, "Initial TID offset %u", req->tidoffset);
661 /* Save all the IO vector structures */
662 for (i = 0; i < req->data_iovs; i++) {
663 req->iovs[i].offset = 0;
664 INIT_LIST_HEAD(&req->iovs[i].list);
665 memcpy(&req->iovs[i].iov,
667 sizeof(req->iovs[i].iov));
668 ret = pin_vector_pages(req, &req->iovs[i]);
674 req->data_len += req->iovs[i].iov.iov_len;
676 SDMA_DBG(req, "total data length %u", req->data_len);
678 if (pcount > req->info.npkts)
679 pcount = req->info.npkts;
682 * User space will provide the TID info only when the
683 * request type is EXPECTED. This is true even if there is
684 * only one packet in the request and the header is already
685 * setup. The reason for the singular TID case is that the
686 * driver needs to perform safety checks.
688 if (req_opcode(req->info.ctrl) == EXPECTED) {
689 u16 ntids = iovec[idx].iov_len / sizeof(*req->tids);
692 if (!ntids || ntids > MAX_TID_PAIR_ENTRIES) {
698 * We have to copy all of the tids because they may vary
699 * in size and, therefore, the TID count might not be
700 * equal to the pkt count. However, there is no way to
701 * tell at this point.
703 tmp = memdup_user(iovec[idx].iov_base,
704 ntids * sizeof(*req->tids));
707 SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
717 dlid = be16_to_cpu(req->hdr.lrh[1]);
718 selector = dlid_to_selector(dlid);
719 selector += uctxt->ctxt + fd->subctxt;
720 req->sde = sdma_select_user_engine(dd, selector, vl);
722 if (!req->sde || !sdma_running(req->sde)) {
727 /* We don't need an AHG entry if the request contains only one packet */
728 if (req->info.npkts > 1 && HFI1_CAP_IS_USET(SDMA_AHG))
729 req->ahg_idx = sdma_ahg_alloc(req->sde);
731 set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
732 atomic_inc(&pq->n_reqs);
734 /* Send the first N packets in the request to buy us some time */
735 ret = user_sdma_send_pkts(req, pcount);
736 if (unlikely(ret < 0 && ret != -EBUSY)) {
742 * It is possible that the SDMA engine would have processed all the
743 * submitted packets by the time we get here. Therefore, only set
744 * packet queue state to ACTIVE if there are still uncompleted
747 if (atomic_read(&pq->n_reqs))
748 xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
751 * This is a somewhat blocking send implementation.
752 * The driver will block the caller until all packets of the
753 * request have been submitted to the SDMA engine. However, it
754 * will not wait for send completions.
756 while (req->seqsubmitted != req->info.npkts) {
757 ret = user_sdma_send_pkts(req, pcount);
761 WRITE_ONCE(req->has_error, 1);
762 if (ACCESS_ONCE(req->seqcomp) ==
763 req->seqsubmitted - 1)
767 wait_event_interruptible_timeout(
769 (pq->state == SDMA_PKT_Q_ACTIVE),
771 SDMA_IOWAIT_TIMEOUT));
777 user_sdma_free_request(req, true);
780 set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
784 static inline u32 compute_data_length(struct user_sdma_request *req,
785 struct user_sdma_txreq *tx)
788 * Determine the proper size of the packet data.
789 * The size of the data of the first packet is in the header
790 * template. However, it includes the header and ICRC, which need
792 * The minimum representable packet data length in a header is 4 bytes,
793 * therefore, when the data length request is less than 4 bytes, there's
794 * only one packet, and the packet data length is equal to that of the
795 * request data length.
796 * The size of the remaining packets is the minimum of the frag
797 * size (MTU) or remaining data in the request.
802 if (req->data_len < sizeof(u32))
805 len = ((be16_to_cpu(req->hdr.lrh[2]) << 2) -
806 (sizeof(tx->hdr) - 4));
807 } else if (req_opcode(req->info.ctrl) == EXPECTED) {
808 u32 tidlen = EXP_TID_GET(req->tids[req->tididx], LEN) *
811 * Get the data length based on the remaining space in the
814 len = min(tidlen - req->tidoffset, (u32)req->info.fragsize);
815 /* If we've filled up the TID pair, move to the next one. */
816 if (unlikely(!len) && ++req->tididx < req->n_tids &&
817 req->tids[req->tididx]) {
818 tidlen = EXP_TID_GET(req->tids[req->tididx],
821 len = min_t(u32, tidlen, req->info.fragsize);
824 * Since the TID pairs map entire pages, make sure that we
825 * are not going to try to send more data that we have
828 len = min(len, req->data_len - req->sent);
830 len = min(req->data_len - req->sent, (u32)req->info.fragsize);
832 SDMA_DBG(req, "Data Length = %u", len);
836 static inline u32 pad_len(u32 len)
838 if (len & (sizeof(u32) - 1))
839 len += sizeof(u32) - (len & (sizeof(u32) - 1));
843 static inline u32 get_lrh_len(struct hfi1_pkt_header hdr, u32 len)
845 /* (Size of complete header - size of PBC) + 4B ICRC + data length */
846 return ((sizeof(hdr) - sizeof(hdr.pbc)) + 4 + len);
849 static int user_sdma_txadd_ahg(struct user_sdma_request *req,
850 struct user_sdma_txreq *tx,
854 u16 pbclen = le16_to_cpu(req->hdr.pbc[0]);
855 u32 lrhlen = get_lrh_len(req->hdr, pad_len(datalen));
856 struct hfi1_user_sdma_pkt_q *pq = req->pq;
859 * Copy the request header into the tx header
860 * because the HW needs a cacheline-aligned
862 * This copy can be optimized out if the hdr
863 * member of user_sdma_request were also
866 memcpy(&tx->hdr, &req->hdr, sizeof(tx->hdr));
867 if (PBC2LRH(pbclen) != lrhlen) {
868 pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
869 tx->hdr.pbc[0] = cpu_to_le16(pbclen);
871 ret = check_header_template(req, &tx->hdr, lrhlen, datalen);
874 ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY,
875 sizeof(tx->hdr) + datalen, req->ahg_idx,
876 0, NULL, 0, user_sdma_txreq_cb);
879 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr));
881 sdma_txclean(pq->dd, &tx->txreq);
885 static int user_sdma_txadd(struct user_sdma_request *req,
886 struct user_sdma_txreq *tx,
887 struct user_sdma_iovec *iovec, u32 datalen,
888 u32 *queued_ptr, u32 *data_sent_ptr,
892 unsigned int pageidx, len;
893 unsigned long base, offset;
894 u64 iov_offset = *iov_offset_ptr;
895 u32 queued = *queued_ptr, data_sent = *data_sent_ptr;
896 struct hfi1_user_sdma_pkt_q *pq = req->pq;
898 base = (unsigned long)iovec->iov.iov_base;
899 offset = offset_in_page(base + iovec->offset + iov_offset);
900 pageidx = (((iovec->offset + iov_offset + base) - (base & PAGE_MASK)) >>
902 len = offset + req->info.fragsize > PAGE_SIZE ?
903 PAGE_SIZE - offset : req->info.fragsize;
904 len = min((datalen - queued), len);
905 ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx],
908 SDMA_DBG(req, "SDMA txreq add page failed %d\n", ret);
914 if (unlikely(queued < datalen && pageidx == iovec->npages &&
915 req->iov_idx < req->data_iovs - 1)) {
916 iovec->offset += iov_offset;
917 iovec = &req->iovs[++req->iov_idx];
921 *queued_ptr = queued;
922 *data_sent_ptr = data_sent;
923 *iov_offset_ptr = iov_offset;
927 static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
931 struct user_sdma_txreq *tx = NULL;
932 struct hfi1_user_sdma_pkt_q *pq = NULL;
933 struct user_sdma_iovec *iovec = NULL;
940 /* If tx completion has reported an error, we are done. */
941 if (READ_ONCE(req->has_error))
945 * Check if we might have sent the entire request already
947 if (unlikely(req->seqnum == req->info.npkts)) {
948 if (!list_empty(&req->txps))
953 if (!maxpkts || maxpkts > req->info.npkts - req->seqnum)
954 maxpkts = req->info.npkts - req->seqnum;
956 while (npkts < maxpkts) {
957 u32 datalen = 0, queued = 0, data_sent = 0;
961 * Check whether any of the completions have come back
962 * with errors. If so, we are not going to process any
963 * more packets from this request.
965 if (READ_ONCE(req->has_error))
968 tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL);
975 INIT_LIST_HEAD(&tx->list);
978 * For the last packet set the ACK request
979 * and disable header suppression.
981 if (req->seqnum == req->info.npkts - 1)
982 tx->flags |= (TXREQ_FLAGS_REQ_ACK |
983 TXREQ_FLAGS_REQ_DISABLE_SH);
986 * Calculate the payload size - this is min of the fragment
987 * (MTU) size or the remaining bytes in the request but only
988 * if we have payload data.
991 iovec = &req->iovs[req->iov_idx];
992 if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
993 if (++req->iov_idx == req->data_iovs) {
997 iovec = &req->iovs[req->iov_idx];
998 WARN_ON(iovec->offset);
1001 datalen = compute_data_length(req, tx);
1004 * Disable header suppression for the payload <= 8DWS.
1005 * If there is an uncorrectable error in the receive
1006 * data FIFO when the received payload size is less than
1007 * or equal to 8DWS then the RxDmaDataFifoRdUncErr is
1008 * not reported.There is set RHF.EccErr if the header
1009 * is not suppressed.
1013 "Request has data but pkt len is 0");
1016 } else if (datalen <= 32) {
1017 tx->flags |= TXREQ_FLAGS_REQ_DISABLE_SH;
1021 if (req->ahg_idx >= 0) {
1023 ret = user_sdma_txadd_ahg(req, tx, datalen);
1029 changes = set_txreq_header_ahg(req, tx,
1035 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
1036 datalen, user_sdma_txreq_cb);
1040 * Modify the header for this packet. This only needs
1041 * to be done if we are not going to use AHG. Otherwise,
1042 * the HW will do it based on the changes we gave it
1043 * during sdma_txinit_ahg().
1045 ret = set_txreq_header(req, tx, datalen);
1051 * If the request contains any data vectors, add up to
1052 * fragsize bytes to the descriptor.
1054 while (queued < datalen &&
1055 (req->sent + data_sent) < req->data_len) {
1056 ret = user_sdma_txadd(req, tx, iovec, datalen,
1057 &queued, &data_sent, &iov_offset);
1062 * The txreq was submitted successfully so we can update
1065 req->koffset += datalen;
1066 if (req_opcode(req->info.ctrl) == EXPECTED)
1067 req->tidoffset += datalen;
1068 req->sent += data_sent;
1070 iovec->offset += iov_offset;
1071 list_add_tail(&tx->txreq.list, &req->txps);
1073 * It is important to increment this here as it is used to
1074 * generate the BTH.PSN and, therefore, can't be bulk-updated
1075 * outside of the loop.
1077 tx->seqnum = req->seqnum++;
1081 ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
1082 req->seqsubmitted += count;
1083 if (req->seqsubmitted == req->info.npkts) {
1084 WRITE_ONCE(req->done, 1);
1086 * The txreq has already been submitted to the HW queue
1087 * so we can free the AHG entry now. Corruption will not
1088 * happen due to the sequential manner in which
1089 * descriptors are processed.
1091 if (req->ahg_idx >= 0)
1092 sdma_ahg_free(req->sde, req->ahg_idx);
1097 sdma_txclean(pq->dd, &tx->txreq);
1099 kmem_cache_free(pq->txreq_cache, tx);
1103 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
1105 struct evict_data evict_data;
1107 evict_data.cleared = 0;
1108 evict_data.target = npages;
1109 hfi1_mmu_rb_evict(pq->handler, &evict_data);
1110 return evict_data.cleared;
1113 static int pin_sdma_pages(struct user_sdma_request *req,
1114 struct user_sdma_iovec *iovec,
1115 struct sdma_mmu_node *node,
1118 int pinned, cleared;
1119 struct page **pages;
1120 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1122 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
1124 SDMA_DBG(req, "Failed page array alloc");
1127 memcpy(pages, node->pages, node->npages * sizeof(*pages));
1129 npages -= node->npages;
1131 if (!hfi1_can_pin_pages(pq->dd, pq->mm,
1132 atomic_read(&pq->n_locked), npages)) {
1133 cleared = sdma_cache_evict(pq, npages);
1134 if (cleared >= npages)
1137 pinned = hfi1_acquire_user_pages(pq->mm,
1138 ((unsigned long)iovec->iov.iov_base +
1139 (node->npages * PAGE_SIZE)), npages, 0,
1140 pages + node->npages);
1145 if (pinned != npages) {
1146 unpin_vector_pages(pq->mm, pages, node->npages, pinned);
1150 node->rb.len = iovec->iov.iov_len;
1151 node->pages = pages;
1152 atomic_add(pinned, &pq->n_locked);
1156 static void unpin_sdma_pages(struct sdma_mmu_node *node)
1159 unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
1160 atomic_sub(node->npages, &node->pq->n_locked);
1164 static int pin_vector_pages(struct user_sdma_request *req,
1165 struct user_sdma_iovec *iovec)
1167 int ret = 0, pinned, npages;
1168 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1169 struct sdma_mmu_node *node = NULL;
1170 struct mmu_rb_node *rb_node;
1175 hfi1_mmu_rb_remove_unless_exact(pq->handler,
1177 iovec->iov.iov_base,
1178 iovec->iov.iov_len, &rb_node);
1180 node = container_of(rb_node, struct sdma_mmu_node, rb);
1182 atomic_inc(&node->refcount);
1183 iovec->pages = node->pages;
1184 iovec->npages = node->npages;
1191 node = kzalloc(sizeof(*node), GFP_KERNEL);
1195 node->rb.addr = (unsigned long)iovec->iov.iov_base;
1197 atomic_set(&node->refcount, 0);
1201 npages = num_user_pages((unsigned long)iov->iov_base, iov->iov_len);
1202 if (node->npages < npages) {
1203 pinned = pin_sdma_pages(req, iovec, node, npages);
1208 node->npages += pinned;
1209 npages = node->npages;
1211 iovec->pages = node->pages;
1212 iovec->npages = npages;
1215 ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb);
1222 unpin_sdma_pages(node);
1227 static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
1228 unsigned start, unsigned npages)
1230 hfi1_release_user_pages(mm, pages + start, npages, false);
1234 static int check_header_template(struct user_sdma_request *req,
1235 struct hfi1_pkt_header *hdr, u32 lrhlen,
1239 * Perform safety checks for any type of packet:
1240 * - transfer size is multiple of 64bytes
1241 * - packet length is multiple of 4 bytes
1242 * - packet length is not larger than MTU size
1244 * These checks are only done for the first packet of the
1245 * transfer since the header is "given" to us by user space.
1246 * For the remainder of the packets we compute the values.
1248 if (req->info.fragsize % PIO_BLOCK_SIZE || lrhlen & 0x3 ||
1249 lrhlen > get_lrh_len(*hdr, req->info.fragsize))
1252 if (req_opcode(req->info.ctrl) == EXPECTED) {
1254 * The header is checked only on the first packet. Furthermore,
1255 * we ensure that at least one TID entry is copied when the
1256 * request is submitted. Therefore, we don't have to verify that
1257 * tididx points to something sane.
1259 u32 tidval = req->tids[req->tididx],
1260 tidlen = EXP_TID_GET(tidval, LEN) * PAGE_SIZE,
1261 tididx = EXP_TID_GET(tidval, IDX),
1262 tidctrl = EXP_TID_GET(tidval, CTRL),
1264 __le32 kval = hdr->kdeth.ver_tid_offset;
1266 tidoff = KDETH_GET(kval, OFFSET) *
1267 (KDETH_GET(req->hdr.kdeth.ver_tid_offset, OM) ?
1268 KDETH_OM_LARGE : KDETH_OM_SMALL);
1270 * Expected receive packets have the following
1271 * additional checks:
1272 * - offset is not larger than the TID size
1273 * - TIDCtrl values match between header and TID array
1274 * - TID indexes match between header and TID array
1276 if ((tidoff + datalen > tidlen) ||
1277 KDETH_GET(kval, TIDCTRL) != tidctrl ||
1278 KDETH_GET(kval, TID) != tididx)
1285 * Correctly set the BTH.PSN field based on type of
1286 * transfer - eager packets can just increment the PSN but
1287 * expected packets encode generation and sequence in the
1288 * BTH.PSN field so just incrementing will result in errors.
1290 static inline u32 set_pkt_bth_psn(__be32 bthpsn, u8 expct, u32 frags)
1292 u32 val = be32_to_cpu(bthpsn),
1293 mask = (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffffull :
1297 psn = (psn & ~BTH_SEQ_MASK) | ((psn + frags) & BTH_SEQ_MASK);
1303 static int set_txreq_header(struct user_sdma_request *req,
1304 struct user_sdma_txreq *tx, u32 datalen)
1306 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1307 struct hfi1_pkt_header *hdr = &tx->hdr;
1308 u8 omfactor; /* KDETH.OM */
1311 u32 tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
1313 /* Copy the header template to the request before modification */
1314 memcpy(hdr, &req->hdr, sizeof(*hdr));
1317 * Check if the PBC and LRH length are mismatched. If so
1318 * adjust both in the header.
1320 pbclen = le16_to_cpu(hdr->pbc[0]);
1321 if (PBC2LRH(pbclen) != lrhlen) {
1322 pbclen = (pbclen & 0xf000) | LRH2PBC(lrhlen);
1323 hdr->pbc[0] = cpu_to_le16(pbclen);
1324 hdr->lrh[2] = cpu_to_be16(lrhlen >> 2);
1327 * This is the first packet in the sequence that has
1328 * a "static" size that can be used for the rest of
1329 * the packets (besides the last one).
1331 if (unlikely(req->seqnum == 2)) {
1333 * From this point on the lengths in both the
1334 * PBC and LRH are the same until the last
1336 * Adjust the template so we don't have to update
1339 req->hdr.pbc[0] = hdr->pbc[0];
1340 req->hdr.lrh[2] = hdr->lrh[2];
1344 * We only have to modify the header if this is not the
1345 * first packet in the request. Otherwise, we use the
1346 * header given to us.
1348 if (unlikely(!req->seqnum)) {
1349 ret = check_header_template(req, hdr, lrhlen, datalen);
1355 hdr->bth[2] = cpu_to_be32(
1356 set_pkt_bth_psn(hdr->bth[2],
1357 (req_opcode(req->info.ctrl) == EXPECTED),
1360 /* Set ACK request on last packet */
1361 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
1362 hdr->bth[2] |= cpu_to_be32(1UL << 31);
1364 /* Set the new offset */
1365 hdr->kdeth.swdata[6] = cpu_to_le32(req->koffset);
1366 /* Expected packets have to fill in the new TID information */
1367 if (req_opcode(req->info.ctrl) == EXPECTED) {
1368 tidval = req->tids[req->tididx];
1370 * If the offset puts us at the end of the current TID,
1371 * advance everything.
1373 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1377 * Since we don't copy all the TIDs, all at once,
1378 * we have to check again.
1380 if (++req->tididx > req->n_tids - 1 ||
1381 !req->tids[req->tididx]) {
1384 tidval = req->tids[req->tididx];
1386 omfactor = EXP_TID_GET(tidval, LEN) * PAGE_SIZE >=
1387 KDETH_OM_MAX_SIZE ? KDETH_OM_LARGE_SHIFT :
1388 KDETH_OM_SMALL_SHIFT;
1389 /* Set KDETH.TIDCtrl based on value for this TID. */
1390 KDETH_SET(hdr->kdeth.ver_tid_offset, TIDCTRL,
1391 EXP_TID_GET(tidval, CTRL));
1392 /* Set KDETH.TID based on value for this TID */
1393 KDETH_SET(hdr->kdeth.ver_tid_offset, TID,
1394 EXP_TID_GET(tidval, IDX));
1395 /* Clear KDETH.SH when DISABLE_SH flag is set */
1396 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH))
1397 KDETH_SET(hdr->kdeth.ver_tid_offset, SH, 0);
1399 * Set the KDETH.OFFSET and KDETH.OM based on size of
1402 SDMA_DBG(req, "TID offset %ubytes %uunits om%u",
1403 req->tidoffset, req->tidoffset >> omfactor,
1404 omfactor != KDETH_OM_SMALL_SHIFT);
1405 KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET,
1406 req->tidoffset >> omfactor);
1407 KDETH_SET(hdr->kdeth.ver_tid_offset, OM,
1408 omfactor != KDETH_OM_SMALL_SHIFT);
1411 trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt,
1412 req->info.comp_idx, hdr, tidval);
1413 return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr));
1416 static int set_txreq_header_ahg(struct user_sdma_request *req,
1417 struct user_sdma_txreq *tx, u32 datalen)
1419 u32 ahg[AHG_KDETH_ARRAY_SIZE];
1421 u8 omfactor; /* KDETH.OM */
1422 struct hfi1_user_sdma_pkt_q *pq = req->pq;
1423 struct hfi1_pkt_header *hdr = &req->hdr;
1424 u16 pbclen = le16_to_cpu(hdr->pbc[0]);
1425 u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
1427 if (PBC2LRH(pbclen) != lrhlen) {
1428 /* PBC.PbcLengthDWs */
1429 AHG_HEADER_SET(ahg, diff, 0, 0, 12,
1430 cpu_to_le16(LRH2PBC(lrhlen)));
1431 /* LRH.PktLen (we need the full 16 bits due to byte swap) */
1432 AHG_HEADER_SET(ahg, diff, 3, 0, 16,
1433 cpu_to_be16(lrhlen >> 2));
1437 * Do the common updates
1439 /* BTH.PSN and BTH.A */
1440 val32 = (be32_to_cpu(hdr->bth[2]) + req->seqnum) &
1441 (HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
1442 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
1444 AHG_HEADER_SET(ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
1445 AHG_HEADER_SET(ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
1447 AHG_HEADER_SET(ahg, diff, 15, 0, 16,
1448 cpu_to_le16(req->koffset & 0xffff));
1449 AHG_HEADER_SET(ahg, diff, 15, 16, 16, cpu_to_le16(req->koffset >> 16));
1450 if (req_opcode(req->info.ctrl) == EXPECTED) {
1453 tidval = req->tids[req->tididx];
1456 * If the offset puts us at the end of the current TID,
1457 * advance everything.
1459 if ((req->tidoffset) == (EXP_TID_GET(tidval, LEN) *
1463 * Since we don't copy all the TIDs, all at once,
1464 * we have to check again.
1466 if (++req->tididx > req->n_tids - 1 ||
1467 !req->tids[req->tididx])
1469 tidval = req->tids[req->tididx];
1471 omfactor = ((EXP_TID_GET(tidval, LEN) *
1473 KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
1474 KDETH_OM_SMALL_SHIFT;
1475 /* KDETH.OM and KDETH.OFFSET (TID) */
1476 AHG_HEADER_SET(ahg, diff, 7, 0, 16,
1477 ((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
1478 ((req->tidoffset >> omfactor)
1480 /* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
1481 val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
1482 (EXP_TID_GET(tidval, IDX) & 0x3ff));
1484 if (unlikely(tx->flags & TXREQ_FLAGS_REQ_DISABLE_SH)) {
1485 val |= cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
1487 AHG_KDETH_INTR_SHIFT));
1489 val |= KDETH_GET(hdr->kdeth.ver_tid_offset, SH) ?
1490 cpu_to_le16(0x1 << AHG_KDETH_SH_SHIFT) :
1491 cpu_to_le16((KDETH_GET(hdr->kdeth.ver_tid_offset,
1493 AHG_KDETH_INTR_SHIFT));
1496 AHG_HEADER_SET(ahg, diff, 7, 16, 14, val);
1501 trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
1502 req->info.comp_idx, req->sde->this_idx,
1503 req->ahg_idx, ahg, diff, tidval);
1504 sdma_txinit_ahg(&tx->txreq,
1505 SDMA_TXREQ_F_USE_AHG,
1506 datalen, req->ahg_idx, diff,
1507 ahg, sizeof(req->hdr),
1508 user_sdma_txreq_cb);
1514 * SDMA tx request completion callback. Called when the SDMA progress
1515 * state machine gets notification that the SDMA descriptors for this
1516 * tx request have been processed by the DMA engine. Called in
1517 * interrupt context.
1519 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
1521 struct user_sdma_txreq *tx =
1522 container_of(txreq, struct user_sdma_txreq, txreq);
1523 struct user_sdma_request *req;
1524 struct hfi1_user_sdma_pkt_q *pq;
1525 struct hfi1_user_sdma_comp_q *cq;
1535 if (status != SDMA_TXREQ_S_OK) {
1536 SDMA_DBG(req, "SDMA completion with error %d",
1538 WRITE_ONCE(req->has_error, 1);
1541 req->seqcomp = tx->seqnum;
1542 kmem_cache_free(pq->txreq_cache, tx);
1545 idx = req->info.comp_idx;
1546 if (req->status == -1 && status == SDMA_TXREQ_S_OK) {
1547 if (req->seqcomp == req->info.npkts - 1) {
1549 user_sdma_free_request(req, false);
1551 set_comp_state(pq, cq, idx, COMPLETE, 0);
1554 if (status != SDMA_TXREQ_S_OK)
1555 req->status = status;
1556 if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) &&
1557 (READ_ONCE(req->done) ||
1558 READ_ONCE(req->has_error))) {
1559 user_sdma_free_request(req, false);
1561 set_comp_state(pq, cq, idx, ERROR, req->status);
1566 static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
1568 if (atomic_dec_and_test(&pq->n_reqs)) {
1569 xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
1574 static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
1576 if (!list_empty(&req->txps)) {
1577 struct sdma_txreq *t, *p;
1579 list_for_each_entry_safe(t, p, &req->txps, list) {
1580 struct user_sdma_txreq *tx =
1581 container_of(t, struct user_sdma_txreq, txreq);
1582 list_del_init(&t->list);
1583 sdma_txclean(req->pq->dd, t);
1584 kmem_cache_free(req->pq->txreq_cache, tx);
1587 if (req->data_iovs) {
1588 struct sdma_mmu_node *node;
1591 for (i = 0; i < req->data_iovs; i++) {
1592 node = req->iovs[i].node;
1597 hfi1_mmu_rb_remove(req->pq->handler,
1600 atomic_dec(&node->refcount);
1604 clear_bit(req->info.comp_idx, req->pq->req_in_use);
1607 static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
1608 struct hfi1_user_sdma_comp_q *cq,
1609 u16 idx, enum hfi1_sdma_comp_state state,
1612 hfi1_cdbg(SDMA, "[%u:%u:%u:%u] Setting completion status %u %d",
1613 pq->dd->unit, pq->ctxt, pq->subctxt, idx, state, ret);
1615 cq->comps[idx].errcode = -ret;
1616 smp_wmb(); /* make sure errcode is visible first */
1617 cq->comps[idx].status = state;
1618 trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
1622 static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
1625 return (bool)(node->addr == addr);
1628 static int sdma_rb_insert(void *arg, struct mmu_rb_node *mnode)
1630 struct sdma_mmu_node *node =
1631 container_of(mnode, struct sdma_mmu_node, rb);
1633 atomic_inc(&node->refcount);
1638 * Return 1 to remove the node from the rb tree and call the remove op.
1640 * Called with the rb tree lock held.
1642 static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
1643 void *evict_arg, bool *stop)
1645 struct sdma_mmu_node *node =
1646 container_of(mnode, struct sdma_mmu_node, rb);
1647 struct evict_data *evict_data = evict_arg;
1649 /* is this node still being used? */
1650 if (atomic_read(&node->refcount))
1651 return 0; /* keep this node */
1653 /* this node will be evicted, add its pages to our count */
1654 evict_data->cleared += node->npages;
1656 /* have enough pages been cleared? */
1657 if (evict_data->cleared >= evict_data->target)
1660 return 1; /* remove this node */
1663 static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
1665 struct sdma_mmu_node *node =
1666 container_of(mnode, struct sdma_mmu_node, rb);
1668 unpin_sdma_pages(node);
1672 static int sdma_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
1674 struct sdma_mmu_node *node =
1675 container_of(mnode, struct sdma_mmu_node, rb);
1677 if (!atomic_read(&node->refcount))