2 * Copyright(c) 2015-2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/string.h>
50 #include "user_exp_rcv.h"
55 struct mmu_rb_node mmu;
57 struct tid_group *grp;
62 struct page *pages[0];
70 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
71 struct exp_tid_set *set,
72 struct hfi1_filedata *fd);
73 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
74 static int set_rcvarray_entry(struct hfi1_filedata *fd,
75 struct tid_user_buf *tbuf,
76 u32 rcventry, struct tid_group *grp,
77 u16 pageidx, unsigned int npages);
78 static int tid_rb_insert(void *arg, struct mmu_rb_node *node);
79 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
80 struct tid_rb_node *tnode);
81 static void tid_rb_remove(void *arg, struct mmu_rb_node *node);
82 static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode);
83 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
84 struct tid_group *grp,
85 unsigned int start, u16 count,
86 u32 *tidlist, unsigned int *tididx,
87 unsigned int *pmapped);
88 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
89 struct tid_group **grp);
90 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
92 static struct mmu_rb_ops tid_rb_ops = {
93 .insert = tid_rb_insert,
94 .remove = tid_rb_remove,
95 .invalidate = tid_rb_invalidate
99 * Initialize context and file private data needed for Expected
100 * receive caching. This needs to be done after the context has
101 * been configured with the eager/expected RcvEntry counts.
103 int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
104 struct hfi1_ctxtdata *uctxt)
106 struct hfi1_devdata *dd = uctxt->dd;
109 spin_lock_init(&fd->tid_lock);
110 spin_lock_init(&fd->invalid_lock);
112 fd->entry_to_rb = kcalloc(uctxt->expected_count,
113 sizeof(struct rb_node *),
115 if (!fd->entry_to_rb)
118 if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) {
119 fd->invalid_tid_idx = 0;
120 fd->invalid_tids = kcalloc(uctxt->expected_count,
121 sizeof(*fd->invalid_tids),
123 if (!fd->invalid_tids) {
124 kfree(fd->entry_to_rb);
125 fd->entry_to_rb = NULL;
130 * Register MMU notifier callbacks. If the registration
131 * fails, continue without TID caching for this context.
133 ret = hfi1_mmu_rb_register(fd, fd->mm, &tid_rb_ops,
138 "Failed MMU notifier registration %d\n",
145 * PSM does not have a good way to separate, count, and
146 * effectively enforce a limit on RcvArray entries used by
147 * subctxts (when context sharing is used) when TID caching
148 * is enabled. To help with that, we calculate a per-process
149 * RcvArray entry share and enforce that.
150 * If TID caching is not in use, PSM deals with usage on its
151 * own. In that case, we allow any subctxt to take all of the
154 * Make sure that we set the tid counts only after successful
157 spin_lock(&fd->tid_lock);
158 if (uctxt->subctxt_cnt && fd->handler) {
161 fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
162 remainder = uctxt->expected_count % uctxt->subctxt_cnt;
163 if (remainder && fd->subctxt < remainder)
166 fd->tid_limit = uctxt->expected_count;
168 spin_unlock(&fd->tid_lock);
173 void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
175 struct hfi1_ctxtdata *uctxt = fd->uctxt;
178 * The notifier would have been removed when the process'es mm
182 hfi1_mmu_rb_unregister(fd->handler);
184 if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
185 unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
186 if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
187 unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
190 kfree(fd->invalid_tids);
191 fd->invalid_tids = NULL;
193 kfree(fd->entry_to_rb);
194 fd->entry_to_rb = NULL;
198 * Release pinned receive buffer pages.
200 * @mapped - true if the pages have been DMA mapped. false otherwise.
201 * @idx - Index of the first page to unpin.
202 * @npages - No of pages to unpin.
204 * If the pages have been DMA mapped (indicated by mapped parameter), their
205 * info will be passed via a struct tid_rb_node. If they haven't been mapped,
206 * their info will be passed via a struct tid_user_buf.
208 static void unpin_rcv_pages(struct hfi1_filedata *fd,
209 struct tid_user_buf *tidbuf,
210 struct tid_rb_node *node,
216 struct hfi1_devdata *dd = fd->uctxt->dd;
219 pci_unmap_single(dd->pcidev, node->dma_addr,
220 node->mmu.len, PCI_DMA_FROMDEVICE);
221 pages = &node->pages[idx];
223 pages = &tidbuf->pages[idx];
225 hfi1_release_user_pages(fd->mm, pages, npages, mapped);
226 fd->tid_n_pinned -= npages;
230 * Pin receive buffer pages.
232 static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
236 unsigned long vaddr = tidbuf->vaddr;
237 struct page **pages = NULL;
238 struct hfi1_devdata *dd = fd->uctxt->dd;
240 /* Get the number of pages the user buffer spans */
241 npages = num_user_pages(vaddr, tidbuf->length);
245 if (npages > fd->uctxt->expected_count) {
246 dd_dev_err(dd, "Expected buffer too big\n");
250 /* Verify that access is OK for the user buffer */
251 if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
252 npages * PAGE_SIZE)) {
253 dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
254 (void *)vaddr, npages);
257 /* Allocate the array of struct page pointers needed for pinning */
258 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
263 * Pin all the pages of the user buffer. If we can't pin all the
264 * pages, accept the amount pinned so far and program only that.
265 * User space knows how to deal with partially programmed buffers.
267 if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
272 pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
277 tidbuf->pages = pages;
278 tidbuf->npages = npages;
279 fd->tid_n_pinned += pinned;
284 * RcvArray entry allocation for Expected Receives is done by the
285 * following algorithm:
287 * The context keeps 3 lists of groups of RcvArray entries:
288 * 1. List of empty groups - tid_group_list
289 * This list is created during user context creation and
290 * contains elements which describe sets (of 8) of empty
292 * 2. List of partially used groups - tid_used_list
293 * This list contains sets of RcvArray entries which are
294 * not completely used up. Another mapping request could
295 * use some of all of the remaining entries.
296 * 3. List of full groups - tid_full_list
297 * This is the list where sets that are completely used
300 * An attempt to optimize the usage of RcvArray entries is
301 * made by finding all sets of physically contiguous pages in a
303 * These physically contiguous sets are further split into
304 * sizes supported by the receive engine of the HFI. The
305 * resulting sets of pages are stored in struct tid_pageset,
306 * which describes the sets as:
307 * * .count - number of pages in this set
308 * * .idx - starting index into struct page ** array
311 * From this point on, the algorithm deals with the page sets
312 * described above. The number of pagesets is divided by the
313 * RcvArray group size to produce the number of full groups
316 * Groups from the 3 lists are manipulated using the following
318 * 1. For each set of 8 pagesets, a complete group from
319 * tid_group_list is taken, programmed, and moved to
320 * the tid_full_list list.
321 * 2. For all remaining pagesets:
322 * 2.1 If the tid_used_list is empty and the tid_group_list
323 * is empty, stop processing pageset and return only
324 * what has been programmed up to this point.
325 * 2.2 If the tid_used_list is empty and the tid_group_list
326 * is not empty, move a group from tid_group_list to
328 * 2.3 For each group is tid_used_group, program as much as
329 * can fit into the group. If the group becomes fully
330 * used, move it to tid_full_list.
332 int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
333 struct hfi1_tid_info *tinfo)
335 int ret = 0, need_group = 0, pinned;
336 struct hfi1_ctxtdata *uctxt = fd->uctxt;
337 struct hfi1_devdata *dd = uctxt->dd;
338 unsigned int ngroups, pageidx = 0, pageset_count,
339 tididx = 0, mapped, mapped_pages = 0;
341 struct tid_user_buf *tidbuf;
343 tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
347 tidbuf->vaddr = tinfo->vaddr;
348 tidbuf->length = tinfo->length;
349 tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
351 if (!tidbuf->psets) {
356 pinned = pin_rcv_pages(fd, tidbuf);
358 kfree(tidbuf->psets);
363 /* Find sets of physically contiguous pages */
364 tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
367 * We don't need to access this under a lock since tid_used is per
368 * process and the same process cannot be in hfi1_user_exp_rcv_clear()
369 * and hfi1_user_exp_rcv_setup() at the same time.
371 spin_lock(&fd->tid_lock);
372 if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
373 pageset_count = fd->tid_limit - fd->tid_used;
375 pageset_count = tidbuf->n_psets;
376 spin_unlock(&fd->tid_lock);
381 ngroups = pageset_count / dd->rcv_entries.group_size;
382 tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
391 * From this point on, we are going to be using shared (between master
392 * and subcontexts) context resources. We need to take the lock.
394 mutex_lock(&uctxt->exp_lock);
396 * The first step is to program the RcvArray entries which are complete
399 while (ngroups && uctxt->tid_group_list.count) {
400 struct tid_group *grp =
401 tid_group_pop(&uctxt->tid_group_list);
403 ret = program_rcvarray(fd, tidbuf, grp,
404 pageidx, dd->rcv_entries.group_size,
405 tidlist, &tididx, &mapped);
407 * If there was a failure to program the RcvArray
408 * entries for the entire group, reset the grp fields
409 * and add the grp back to the free group list.
412 tid_group_add_tail(grp, &uctxt->tid_group_list);
414 "Failed to program RcvArray group %d", ret);
418 tid_group_add_tail(grp, &uctxt->tid_full_list);
421 mapped_pages += mapped;
424 while (pageidx < pageset_count) {
425 struct tid_group *grp, *ptr;
427 * If we don't have any partially used tid groups, check
428 * if we have empty groups. If so, take one from there and
429 * put in the partially used list.
431 if (!uctxt->tid_used_list.count || need_group) {
432 if (!uctxt->tid_group_list.count)
435 grp = tid_group_pop(&uctxt->tid_group_list);
436 tid_group_add_tail(grp, &uctxt->tid_used_list);
440 * There is an optimization opportunity here - instead of
441 * fitting as many page sets as we can, check for a group
442 * later on in the list that could fit all of them.
444 list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
446 unsigned use = min_t(unsigned, pageset_count - pageidx,
447 grp->size - grp->used);
449 ret = program_rcvarray(fd, tidbuf, grp,
450 pageidx, use, tidlist,
454 "Failed to program RcvArray entries %d",
458 } else if (ret > 0) {
459 if (grp->used == grp->size)
461 &uctxt->tid_used_list,
462 &uctxt->tid_full_list);
464 mapped_pages += mapped;
466 /* Check if we are done so we break out early */
467 if (pageidx >= pageset_count)
469 } else if (WARN_ON(ret == 0)) {
471 * If ret is 0, we did not program any entries
472 * into this group, which can only happen if
473 * we've screwed up the accounting somewhere.
474 * Warn and try to continue.
481 mutex_unlock(&uctxt->exp_lock);
483 hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
486 spin_lock(&fd->tid_lock);
487 fd->tid_used += tididx;
488 spin_unlock(&fd->tid_lock);
489 tinfo->tidcnt = tididx;
490 tinfo->length = mapped_pages * PAGE_SIZE;
492 if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist,
493 tidlist, sizeof(tidlist[0]) * tididx)) {
495 * On failure to copy to the user level, we need to undo
496 * everything done so far so we don't leak resources.
498 tinfo->tidlist = (unsigned long)&tidlist;
499 hfi1_user_exp_rcv_clear(fd, tinfo);
507 * If not everything was mapped (due to insufficient RcvArray entries,
508 * for example), unpin all unmapped pages so we can pin them nex time.
510 if (mapped_pages != pinned)
511 unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
512 (pinned - mapped_pages), false);
514 kfree(tidbuf->psets);
516 kfree(tidbuf->pages);
518 return ret > 0 ? 0 : ret;
521 int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
522 struct hfi1_tid_info *tinfo)
525 struct hfi1_ctxtdata *uctxt = fd->uctxt;
529 if (unlikely(tinfo->tidcnt > fd->tid_used))
532 tidinfo = memdup_user((void __user *)(unsigned long)tinfo->tidlist,
533 sizeof(tidinfo[0]) * tinfo->tidcnt);
535 return PTR_ERR(tidinfo);
537 mutex_lock(&uctxt->exp_lock);
538 for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
539 ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
541 hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
546 spin_lock(&fd->tid_lock);
547 fd->tid_used -= tididx;
548 spin_unlock(&fd->tid_lock);
549 tinfo->tidcnt = tididx;
550 mutex_unlock(&uctxt->exp_lock);
556 int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
557 struct hfi1_tid_info *tinfo)
559 struct hfi1_ctxtdata *uctxt = fd->uctxt;
560 unsigned long *ev = uctxt->dd->events +
561 (((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
562 HFI1_MAX_SHARED_CTXTS) + fd->subctxt);
566 if (!fd->invalid_tids)
570 * copy_to_user() can sleep, which will leave the invalid_lock
571 * locked and cause the MMU notifier to be blocked on the lock
573 * Copy the data to a local buffer so we can release the lock.
575 array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
579 spin_lock(&fd->invalid_lock);
580 if (fd->invalid_tid_idx) {
581 memcpy(array, fd->invalid_tids, sizeof(*array) *
582 fd->invalid_tid_idx);
583 memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
584 fd->invalid_tid_idx);
585 tinfo->tidcnt = fd->invalid_tid_idx;
586 fd->invalid_tid_idx = 0;
588 * Reset the user flag while still holding the lock.
589 * Otherwise, PSM can miss events.
591 clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
595 spin_unlock(&fd->invalid_lock);
598 if (copy_to_user((void __user *)tinfo->tidlist,
599 array, sizeof(*array) * tinfo->tidcnt))
607 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
609 unsigned pagecount, pageidx, setcount = 0, i;
610 unsigned long pfn, this_pfn;
611 struct page **pages = tidbuf->pages;
612 struct tid_pageset *list = tidbuf->psets;
618 * Look for sets of physically contiguous pages in the user buffer.
619 * This will allow us to optimize Expected RcvArray entry usage by
620 * using the bigger supported sizes.
622 pfn = page_to_pfn(pages[0]);
623 for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
624 this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
627 * If the pfn's are not sequential, pages are not physically
630 if (this_pfn != ++pfn) {
632 * At this point we have to loop over the set of
633 * physically contiguous pages and break them down it
634 * sizes supported by the HW.
635 * There are two main constraints:
636 * 1. The max buffer size is MAX_EXPECTED_BUFFER.
637 * If the total set size is bigger than that
638 * program only a MAX_EXPECTED_BUFFER chunk.
639 * 2. The buffer size has to be a power of two. If
640 * it is not, round down to the closes power of
641 * 2 and program that size.
644 int maxpages = pagecount;
645 u32 bufsize = pagecount * PAGE_SIZE;
647 if (bufsize > MAX_EXPECTED_BUFFER)
649 MAX_EXPECTED_BUFFER >>
651 else if (!is_power_of_2(bufsize))
653 rounddown_pow_of_two(bufsize) >>
656 list[setcount].idx = pageidx;
657 list[setcount].count = maxpages;
658 pagecount -= maxpages;
673 * program_rcvarray() - program an RcvArray group with receive buffers
674 * @fd: filedata pointer
675 * @tbuf: pointer to struct tid_user_buf that has the user buffer starting
676 * virtual address, buffer length, page pointers, pagesets (array of
677 * struct tid_pageset holding information on physically contiguous
678 * chunks from the user buffer), and other fields.
679 * @grp: RcvArray group
680 * @start: starting index into sets array
681 * @count: number of struct tid_pageset's to program
682 * @tidlist: the array of u32 elements when the information about the
683 * programmed RcvArray entries is to be encoded.
684 * @tididx: starting offset into tidlist
685 * @pmapped: (output parameter) number of pages programmed into the RcvArray
688 * This function will program up to 'count' number of RcvArray entries from the
689 * group 'grp'. To make best use of write-combining writes, the function will
690 * perform writes to the unused RcvArray entries which will be ignored by the
691 * HW. Each RcvArray entry will be programmed with a physically contiguous
692 * buffer chunk from the user's virtual buffer.
695 * -EINVAL if the requested count is larger than the size of the group,
696 * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
697 * number of RcvArray entries programmed.
699 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
700 struct tid_group *grp,
701 unsigned int start, u16 count,
702 u32 *tidlist, unsigned int *tididx,
703 unsigned int *pmapped)
705 struct hfi1_ctxtdata *uctxt = fd->uctxt;
706 struct hfi1_devdata *dd = uctxt->dd;
708 u32 tidinfo = 0, rcventry, useidx = 0;
711 /* Count should never be larger than the group size */
712 if (count > grp->size)
715 /* Find the first unused entry in the group */
716 for (idx = 0; idx < grp->size; idx++) {
717 if (!(grp->map & (1 << idx))) {
721 rcv_array_wc_fill(dd, grp->base + idx);
725 while (idx < count) {
726 u16 npages, pageidx, setidx = start + idx;
730 * If this entry in the group is used, move to the next one.
731 * If we go past the end of the group, exit the loop.
733 if (useidx >= grp->size) {
735 } else if (grp->map & (1 << useidx)) {
736 rcv_array_wc_fill(dd, grp->base + useidx);
741 rcventry = grp->base + useidx;
742 npages = tbuf->psets[setidx].count;
743 pageidx = tbuf->psets[setidx].idx;
745 ret = set_rcvarray_entry(fd, tbuf,
746 rcventry, grp, pageidx,
752 tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
753 EXP_TID_SET(LEN, npages);
754 tidlist[(*tididx)++] = tidinfo;
756 grp->map |= 1 << useidx++;
760 /* Fill the rest of the group with "blank" writes */
761 for (; useidx < grp->size; useidx++)
762 rcv_array_wc_fill(dd, grp->base + useidx);
767 static int set_rcvarray_entry(struct hfi1_filedata *fd,
768 struct tid_user_buf *tbuf,
769 u32 rcventry, struct tid_group *grp,
770 u16 pageidx, unsigned int npages)
773 struct hfi1_ctxtdata *uctxt = fd->uctxt;
774 struct tid_rb_node *node;
775 struct hfi1_devdata *dd = uctxt->dd;
777 struct page **pages = tbuf->pages + pageidx;
780 * Allocate the node first so we can handle a potential
781 * failure before we've programmed anything.
783 node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
788 phys = pci_map_single(dd->pcidev,
789 __va(page_to_phys(pages[0])),
790 npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
791 if (dma_mapping_error(&dd->pcidev->dev, phys)) {
792 dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
798 node->mmu.addr = tbuf->vaddr + (pageidx * PAGE_SIZE);
799 node->mmu.len = npages * PAGE_SIZE;
800 node->phys = page_to_phys(pages[0]);
801 node->npages = npages;
802 node->rcventry = rcventry;
803 node->dma_addr = phys;
806 memcpy(node->pages, pages, sizeof(struct page *) * npages);
809 ret = tid_rb_insert(fd, &node->mmu);
811 ret = hfi1_mmu_rb_insert(fd->handler, &node->mmu);
814 hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
815 node->rcventry, node->mmu.addr, node->phys, ret);
816 pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
821 hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
822 trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
823 node->mmu.addr, node->phys, phys);
827 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
828 struct tid_group **grp)
830 struct hfi1_ctxtdata *uctxt = fd->uctxt;
831 struct hfi1_devdata *dd = uctxt->dd;
832 struct tid_rb_node *node;
833 u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
834 u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
836 if (tididx >= uctxt->expected_count) {
837 dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
838 tididx, uctxt->ctxt);
845 rcventry = tididx + (tidctrl - 1);
847 node = fd->entry_to_rb[rcventry];
848 if (!node || node->rcventry != (uctxt->expected_base + rcventry))
855 cacheless_tid_rb_remove(fd, node);
857 hfi1_mmu_rb_remove(fd->handler, &node->mmu);
862 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
864 struct hfi1_ctxtdata *uctxt = fd->uctxt;
865 struct hfi1_devdata *dd = uctxt->dd;
867 trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
868 node->npages, node->mmu.addr, node->phys,
872 * Make sure device has seen the write before we unpin the
875 hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
877 unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
880 node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
882 if (node->grp->used == node->grp->size - 1)
883 tid_group_move(node->grp, &uctxt->tid_full_list,
884 &uctxt->tid_used_list);
885 else if (!node->grp->used)
886 tid_group_move(node->grp, &uctxt->tid_used_list,
887 &uctxt->tid_group_list);
892 * As a simple helper for hfi1_user_exp_rcv_free, this function deals with
893 * clearing nodes in the non-cached case.
895 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
896 struct exp_tid_set *set,
897 struct hfi1_filedata *fd)
899 struct tid_group *grp, *ptr;
902 list_for_each_entry_safe(grp, ptr, &set->list, list) {
903 list_del_init(&grp->list);
905 for (i = 0; i < grp->size; i++) {
906 if (grp->map & (1 << i)) {
907 u16 rcventry = grp->base + i;
908 struct tid_rb_node *node;
910 node = fd->entry_to_rb[rcventry -
911 uctxt->expected_base];
912 if (!node || node->rcventry != rcventry)
915 cacheless_tid_rb_remove(fd, node);
922 * Always return 0 from this function. A non-zero return indicates that the
923 * remove operation will be called and that memory should be unpinned.
924 * However, the driver cannot unpin out from under PSM. Instead, retain the
925 * memory (by returning 0) and inform PSM that the memory is going away. PSM
926 * will call back later when it has removed the memory from its list.
928 static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
930 struct hfi1_filedata *fdata = arg;
931 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
932 struct tid_rb_node *node =
933 container_of(mnode, struct tid_rb_node, mmu);
938 trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt, node->mmu.addr,
939 node->rcventry, node->npages, node->dma_addr);
942 spin_lock(&fdata->invalid_lock);
943 if (fdata->invalid_tid_idx < uctxt->expected_count) {
944 fdata->invalid_tids[fdata->invalid_tid_idx] =
945 rcventry2tidinfo(node->rcventry - uctxt->expected_base);
946 fdata->invalid_tids[fdata->invalid_tid_idx] |=
947 EXP_TID_SET(LEN, node->npages);
948 if (!fdata->invalid_tid_idx) {
952 * hfi1_set_uevent_bits() sets a user event flag
953 * for all processes. Because calling into the
954 * driver to process TID cache invalidations is
955 * expensive and TID cache invalidations are
956 * handled on a per-process basis, we can
957 * optimize this to set the flag only for the
958 * process in question.
960 ev = uctxt->dd->events +
961 (((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
962 HFI1_MAX_SHARED_CTXTS) + fdata->subctxt);
963 set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
965 fdata->invalid_tid_idx++;
967 spin_unlock(&fdata->invalid_lock);
971 static int tid_rb_insert(void *arg, struct mmu_rb_node *node)
973 struct hfi1_filedata *fdata = arg;
974 struct tid_rb_node *tnode =
975 container_of(node, struct tid_rb_node, mmu);
976 u32 base = fdata->uctxt->expected_base;
978 fdata->entry_to_rb[tnode->rcventry - base] = tnode;
982 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
983 struct tid_rb_node *tnode)
985 u32 base = fdata->uctxt->expected_base;
987 fdata->entry_to_rb[tnode->rcventry - base] = NULL;
988 clear_tid_node(fdata, tnode);
991 static void tid_rb_remove(void *arg, struct mmu_rb_node *node)
993 struct hfi1_filedata *fdata = arg;
994 struct tid_rb_node *tnode =
995 container_of(node, struct tid_rb_node, mmu);
997 cacheless_tid_rb_remove(fdata, tnode);