1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
3 * Copyright(c) 2020 Cornelis Networks, Inc.
4 * Copyright(c) 2015-2018 Intel Corporation.
7 #include <linux/string.h>
10 #include "user_exp_rcv.h"
13 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
14 struct exp_tid_set *set,
15 struct hfi1_filedata *fd);
16 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
17 static int set_rcvarray_entry(struct hfi1_filedata *fd,
18 struct tid_user_buf *tbuf,
19 u32 rcventry, struct tid_group *grp,
20 u16 pageidx, unsigned int npages);
21 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
22 struct tid_rb_node *tnode);
23 static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
24 const struct mmu_notifier_range *range,
25 unsigned long cur_seq);
26 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
27 struct tid_group *grp,
28 unsigned int start, u16 count,
29 u32 *tidlist, unsigned int *tididx,
30 unsigned int *pmapped);
31 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
32 struct tid_group **grp);
33 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
35 static const struct mmu_interval_notifier_ops tid_mn_ops = {
36 .invalidate = tid_rb_invalidate,
40 * Initialize context and file private data needed for Expected
41 * receive caching. This needs to be done after the context has
42 * been configured with the eager/expected RcvEntry counts.
44 int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
45 struct hfi1_ctxtdata *uctxt)
49 fd->entry_to_rb = kcalloc(uctxt->expected_count,
50 sizeof(struct rb_node *),
55 if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) {
56 fd->invalid_tid_idx = 0;
57 fd->invalid_tids = kcalloc(uctxt->expected_count,
58 sizeof(*fd->invalid_tids),
60 if (!fd->invalid_tids) {
61 kfree(fd->entry_to_rb);
62 fd->entry_to_rb = NULL;
69 * PSM does not have a good way to separate, count, and
70 * effectively enforce a limit on RcvArray entries used by
71 * subctxts (when context sharing is used) when TID caching
72 * is enabled. To help with that, we calculate a per-process
73 * RcvArray entry share and enforce that.
74 * If TID caching is not in use, PSM deals with usage on its
75 * own. In that case, we allow any subctxt to take all of the
78 * Make sure that we set the tid counts only after successful
81 spin_lock(&fd->tid_lock);
82 if (uctxt->subctxt_cnt && fd->use_mn) {
85 fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
86 remainder = uctxt->expected_count % uctxt->subctxt_cnt;
87 if (remainder && fd->subctxt < remainder)
90 fd->tid_limit = uctxt->expected_count;
92 spin_unlock(&fd->tid_lock);
97 void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
99 struct hfi1_ctxtdata *uctxt = fd->uctxt;
101 mutex_lock(&uctxt->exp_mutex);
102 if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
103 unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
104 if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
105 unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
106 mutex_unlock(&uctxt->exp_mutex);
108 kfree(fd->invalid_tids);
109 fd->invalid_tids = NULL;
111 kfree(fd->entry_to_rb);
112 fd->entry_to_rb = NULL;
116 * Release pinned receive buffer pages.
118 * @mapped: true if the pages have been DMA mapped. false otherwise.
119 * @idx: Index of the first page to unpin.
120 * @npages: No of pages to unpin.
122 * If the pages have been DMA mapped (indicated by mapped parameter), their
123 * info will be passed via a struct tid_rb_node. If they haven't been mapped,
124 * their info will be passed via a struct tid_user_buf.
126 static void unpin_rcv_pages(struct hfi1_filedata *fd,
127 struct tid_user_buf *tidbuf,
128 struct tid_rb_node *node,
134 struct hfi1_devdata *dd = fd->uctxt->dd;
135 struct mm_struct *mm;
138 dma_unmap_single(&dd->pcidev->dev, node->dma_addr,
139 node->npages * PAGE_SIZE, DMA_FROM_DEVICE);
140 pages = &node->pages[idx];
141 mm = mm_from_tid_node(node);
143 pages = &tidbuf->pages[idx];
146 hfi1_release_user_pages(mm, pages, npages, mapped);
147 fd->tid_n_pinned -= npages;
151 * Pin receive buffer pages.
153 static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
157 unsigned long vaddr = tidbuf->vaddr;
158 struct page **pages = NULL;
159 struct hfi1_devdata *dd = fd->uctxt->dd;
161 /* Get the number of pages the user buffer spans */
162 npages = num_user_pages(vaddr, tidbuf->length);
166 if (npages > fd->uctxt->expected_count) {
167 dd_dev_err(dd, "Expected buffer too big\n");
171 /* Allocate the array of struct page pointers needed for pinning */
172 pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
177 * Pin all the pages of the user buffer. If we can't pin all the
178 * pages, accept the amount pinned so far and program only that.
179 * User space knows how to deal with partially programmed buffers.
181 if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
186 pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
191 tidbuf->pages = pages;
192 tidbuf->npages = npages;
193 fd->tid_n_pinned += pinned;
198 * RcvArray entry allocation for Expected Receives is done by the
199 * following algorithm:
201 * The context keeps 3 lists of groups of RcvArray entries:
202 * 1. List of empty groups - tid_group_list
203 * This list is created during user context creation and
204 * contains elements which describe sets (of 8) of empty
206 * 2. List of partially used groups - tid_used_list
207 * This list contains sets of RcvArray entries which are
208 * not completely used up. Another mapping request could
209 * use some of all of the remaining entries.
210 * 3. List of full groups - tid_full_list
211 * This is the list where sets that are completely used
214 * An attempt to optimize the usage of RcvArray entries is
215 * made by finding all sets of physically contiguous pages in a
217 * These physically contiguous sets are further split into
218 * sizes supported by the receive engine of the HFI. The
219 * resulting sets of pages are stored in struct tid_pageset,
220 * which describes the sets as:
221 * * .count - number of pages in this set
222 * * .idx - starting index into struct page ** array
225 * From this point on, the algorithm deals with the page sets
226 * described above. The number of pagesets is divided by the
227 * RcvArray group size to produce the number of full groups
230 * Groups from the 3 lists are manipulated using the following
232 * 1. For each set of 8 pagesets, a complete group from
233 * tid_group_list is taken, programmed, and moved to
234 * the tid_full_list list.
235 * 2. For all remaining pagesets:
236 * 2.1 If the tid_used_list is empty and the tid_group_list
237 * is empty, stop processing pageset and return only
238 * what has been programmed up to this point.
239 * 2.2 If the tid_used_list is empty and the tid_group_list
240 * is not empty, move a group from tid_group_list to
242 * 2.3 For each group is tid_used_group, program as much as
243 * can fit into the group. If the group becomes fully
244 * used, move it to tid_full_list.
246 int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
247 struct hfi1_tid_info *tinfo)
249 int ret = 0, need_group = 0, pinned;
250 struct hfi1_ctxtdata *uctxt = fd->uctxt;
251 struct hfi1_devdata *dd = uctxt->dd;
252 unsigned int ngroups, pageidx = 0, pageset_count,
253 tididx = 0, mapped, mapped_pages = 0;
255 struct tid_user_buf *tidbuf;
257 if (!PAGE_ALIGNED(tinfo->vaddr))
259 if (tinfo->length == 0)
262 tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
266 tidbuf->vaddr = tinfo->vaddr;
267 tidbuf->length = tinfo->length;
268 tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
270 if (!tidbuf->psets) {
275 pinned = pin_rcv_pages(fd, tidbuf);
277 kfree(tidbuf->psets);
282 /* Find sets of physically contiguous pages */
283 tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
286 * We don't need to access this under a lock since tid_used is per
287 * process and the same process cannot be in hfi1_user_exp_rcv_clear()
288 * and hfi1_user_exp_rcv_setup() at the same time.
290 spin_lock(&fd->tid_lock);
291 if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
292 pageset_count = fd->tid_limit - fd->tid_used;
294 pageset_count = tidbuf->n_psets;
295 spin_unlock(&fd->tid_lock);
300 ngroups = pageset_count / dd->rcv_entries.group_size;
301 tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
310 * From this point on, we are going to be using shared (between master
311 * and subcontexts) context resources. We need to take the lock.
313 mutex_lock(&uctxt->exp_mutex);
315 * The first step is to program the RcvArray entries which are complete
318 while (ngroups && uctxt->tid_group_list.count) {
319 struct tid_group *grp =
320 tid_group_pop(&uctxt->tid_group_list);
322 ret = program_rcvarray(fd, tidbuf, grp,
323 pageidx, dd->rcv_entries.group_size,
324 tidlist, &tididx, &mapped);
326 * If there was a failure to program the RcvArray
327 * entries for the entire group, reset the grp fields
328 * and add the grp back to the free group list.
331 tid_group_add_tail(grp, &uctxt->tid_group_list);
333 "Failed to program RcvArray group %d", ret);
337 tid_group_add_tail(grp, &uctxt->tid_full_list);
340 mapped_pages += mapped;
343 while (pageidx < pageset_count) {
344 struct tid_group *grp, *ptr;
346 * If we don't have any partially used tid groups, check
347 * if we have empty groups. If so, take one from there and
348 * put in the partially used list.
350 if (!uctxt->tid_used_list.count || need_group) {
351 if (!uctxt->tid_group_list.count)
354 grp = tid_group_pop(&uctxt->tid_group_list);
355 tid_group_add_tail(grp, &uctxt->tid_used_list);
359 * There is an optimization opportunity here - instead of
360 * fitting as many page sets as we can, check for a group
361 * later on in the list that could fit all of them.
363 list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
365 unsigned use = min_t(unsigned, pageset_count - pageidx,
366 grp->size - grp->used);
368 ret = program_rcvarray(fd, tidbuf, grp,
369 pageidx, use, tidlist,
373 "Failed to program RcvArray entries %d",
376 } else if (ret > 0) {
377 if (grp->used == grp->size)
379 &uctxt->tid_used_list,
380 &uctxt->tid_full_list);
382 mapped_pages += mapped;
384 /* Check if we are done so we break out early */
385 if (pageidx >= pageset_count)
387 } else if (WARN_ON(ret == 0)) {
389 * If ret is 0, we did not program any entries
390 * into this group, which can only happen if
391 * we've screwed up the accounting somewhere.
392 * Warn and try to continue.
399 mutex_unlock(&uctxt->exp_mutex);
401 hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
404 spin_lock(&fd->tid_lock);
405 fd->tid_used += tididx;
406 spin_unlock(&fd->tid_lock);
407 tinfo->tidcnt = tididx;
408 tinfo->length = mapped_pages * PAGE_SIZE;
410 if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
411 tidlist, sizeof(tidlist[0]) * tididx)) {
413 * On failure to copy to the user level, we need to undo
414 * everything done so far so we don't leak resources.
416 tinfo->tidlist = (unsigned long)&tidlist;
417 hfi1_user_exp_rcv_clear(fd, tinfo);
425 * If not everything was mapped (due to insufficient RcvArray entries,
426 * for example), unpin all unmapped pages so we can pin them nex time.
428 if (mapped_pages != pinned)
429 unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
430 (pinned - mapped_pages), false);
432 kfree(tidbuf->psets);
434 kfree(tidbuf->pages);
436 return ret > 0 ? 0 : ret;
439 int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
440 struct hfi1_tid_info *tinfo)
443 struct hfi1_ctxtdata *uctxt = fd->uctxt;
447 if (unlikely(tinfo->tidcnt > fd->tid_used))
450 tidinfo = memdup_user(u64_to_user_ptr(tinfo->tidlist),
451 sizeof(tidinfo[0]) * tinfo->tidcnt);
453 return PTR_ERR(tidinfo);
455 mutex_lock(&uctxt->exp_mutex);
456 for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
457 ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
459 hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
464 spin_lock(&fd->tid_lock);
465 fd->tid_used -= tididx;
466 spin_unlock(&fd->tid_lock);
467 tinfo->tidcnt = tididx;
468 mutex_unlock(&uctxt->exp_mutex);
474 int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
475 struct hfi1_tid_info *tinfo)
477 struct hfi1_ctxtdata *uctxt = fd->uctxt;
478 unsigned long *ev = uctxt->dd->events +
479 (uctxt_offset(uctxt) + fd->subctxt);
484 * copy_to_user() can sleep, which will leave the invalid_lock
485 * locked and cause the MMU notifier to be blocked on the lock
487 * Copy the data to a local buffer so we can release the lock.
489 array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
493 spin_lock(&fd->invalid_lock);
494 if (fd->invalid_tid_idx) {
495 memcpy(array, fd->invalid_tids, sizeof(*array) *
496 fd->invalid_tid_idx);
497 memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
498 fd->invalid_tid_idx);
499 tinfo->tidcnt = fd->invalid_tid_idx;
500 fd->invalid_tid_idx = 0;
502 * Reset the user flag while still holding the lock.
503 * Otherwise, PSM can miss events.
505 clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
509 spin_unlock(&fd->invalid_lock);
512 if (copy_to_user((void __user *)tinfo->tidlist,
513 array, sizeof(*array) * tinfo->tidcnt))
521 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
523 unsigned pagecount, pageidx, setcount = 0, i;
524 unsigned long pfn, this_pfn;
525 struct page **pages = tidbuf->pages;
526 struct tid_pageset *list = tidbuf->psets;
532 * Look for sets of physically contiguous pages in the user buffer.
533 * This will allow us to optimize Expected RcvArray entry usage by
534 * using the bigger supported sizes.
536 pfn = page_to_pfn(pages[0]);
537 for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
538 this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
541 * If the pfn's are not sequential, pages are not physically
544 if (this_pfn != ++pfn) {
546 * At this point we have to loop over the set of
547 * physically contiguous pages and break them down it
548 * sizes supported by the HW.
549 * There are two main constraints:
550 * 1. The max buffer size is MAX_EXPECTED_BUFFER.
551 * If the total set size is bigger than that
552 * program only a MAX_EXPECTED_BUFFER chunk.
553 * 2. The buffer size has to be a power of two. If
554 * it is not, round down to the closes power of
555 * 2 and program that size.
558 int maxpages = pagecount;
559 u32 bufsize = pagecount * PAGE_SIZE;
561 if (bufsize > MAX_EXPECTED_BUFFER)
563 MAX_EXPECTED_BUFFER >>
565 else if (!is_power_of_2(bufsize))
567 rounddown_pow_of_two(bufsize) >>
570 list[setcount].idx = pageidx;
571 list[setcount].count = maxpages;
572 pagecount -= maxpages;
587 * program_rcvarray() - program an RcvArray group with receive buffers
588 * @fd: filedata pointer
589 * @tbuf: pointer to struct tid_user_buf that has the user buffer starting
590 * virtual address, buffer length, page pointers, pagesets (array of
591 * struct tid_pageset holding information on physically contiguous
592 * chunks from the user buffer), and other fields.
593 * @grp: RcvArray group
594 * @start: starting index into sets array
595 * @count: number of struct tid_pageset's to program
596 * @tidlist: the array of u32 elements when the information about the
597 * programmed RcvArray entries is to be encoded.
598 * @tididx: starting offset into tidlist
599 * @pmapped: (output parameter) number of pages programmed into the RcvArray
602 * This function will program up to 'count' number of RcvArray entries from the
603 * group 'grp'. To make best use of write-combining writes, the function will
604 * perform writes to the unused RcvArray entries which will be ignored by the
605 * HW. Each RcvArray entry will be programmed with a physically contiguous
606 * buffer chunk from the user's virtual buffer.
609 * -EINVAL if the requested count is larger than the size of the group,
610 * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
611 * number of RcvArray entries programmed.
613 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
614 struct tid_group *grp,
615 unsigned int start, u16 count,
616 u32 *tidlist, unsigned int *tididx,
617 unsigned int *pmapped)
619 struct hfi1_ctxtdata *uctxt = fd->uctxt;
620 struct hfi1_devdata *dd = uctxt->dd;
622 u32 tidinfo = 0, rcventry, useidx = 0;
625 /* Count should never be larger than the group size */
626 if (count > grp->size)
629 /* Find the first unused entry in the group */
630 for (idx = 0; idx < grp->size; idx++) {
631 if (!(grp->map & (1 << idx))) {
635 rcv_array_wc_fill(dd, grp->base + idx);
639 while (idx < count) {
640 u16 npages, pageidx, setidx = start + idx;
644 * If this entry in the group is used, move to the next one.
645 * If we go past the end of the group, exit the loop.
647 if (useidx >= grp->size) {
649 } else if (grp->map & (1 << useidx)) {
650 rcv_array_wc_fill(dd, grp->base + useidx);
655 rcventry = grp->base + useidx;
656 npages = tbuf->psets[setidx].count;
657 pageidx = tbuf->psets[setidx].idx;
659 ret = set_rcvarray_entry(fd, tbuf,
660 rcventry, grp, pageidx,
666 tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
667 EXP_TID_SET(LEN, npages);
668 tidlist[(*tididx)++] = tidinfo;
670 grp->map |= 1 << useidx++;
674 /* Fill the rest of the group with "blank" writes */
675 for (; useidx < grp->size; useidx++)
676 rcv_array_wc_fill(dd, grp->base + useidx);
681 static int set_rcvarray_entry(struct hfi1_filedata *fd,
682 struct tid_user_buf *tbuf,
683 u32 rcventry, struct tid_group *grp,
684 u16 pageidx, unsigned int npages)
687 struct hfi1_ctxtdata *uctxt = fd->uctxt;
688 struct tid_rb_node *node;
689 struct hfi1_devdata *dd = uctxt->dd;
691 struct page **pages = tbuf->pages + pageidx;
694 * Allocate the node first so we can handle a potential
695 * failure before we've programmed anything.
697 node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
702 phys = dma_map_single(&dd->pcidev->dev, __va(page_to_phys(pages[0])),
703 npages * PAGE_SIZE, DMA_FROM_DEVICE);
704 if (dma_mapping_error(&dd->pcidev->dev, phys)) {
705 dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
712 node->phys = page_to_phys(pages[0]);
713 node->npages = npages;
714 node->rcventry = rcventry;
715 node->dma_addr = phys;
718 memcpy(node->pages, pages, sizeof(struct page *) * npages);
721 ret = mmu_interval_notifier_insert(
722 &node->notifier, current->mm,
723 tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
728 * FIXME: This is in the wrong order, the notifier should be
729 * established before the pages are pinned by pin_rcv_pages.
731 mmu_interval_read_begin(&node->notifier);
733 fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
735 hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
736 trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
737 node->notifier.interval_tree.start, node->phys,
742 hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
743 node->rcventry, node->notifier.interval_tree.start,
745 dma_unmap_single(&dd->pcidev->dev, phys, npages * PAGE_SIZE,
751 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
752 struct tid_group **grp)
754 struct hfi1_ctxtdata *uctxt = fd->uctxt;
755 struct hfi1_devdata *dd = uctxt->dd;
756 struct tid_rb_node *node;
757 u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
758 u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
760 if (tididx >= uctxt->expected_count) {
761 dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
762 tididx, uctxt->ctxt);
769 rcventry = tididx + (tidctrl - 1);
771 node = fd->entry_to_rb[rcventry];
772 if (!node || node->rcventry != (uctxt->expected_base + rcventry))
779 mmu_interval_notifier_remove(&node->notifier);
780 cacheless_tid_rb_remove(fd, node);
785 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
787 struct hfi1_ctxtdata *uctxt = fd->uctxt;
788 struct hfi1_devdata *dd = uctxt->dd;
790 trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
792 node->notifier.interval_tree.start, node->phys,
796 * Make sure device has seen the write before we unpin the
799 hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
801 unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
804 node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
806 if (node->grp->used == node->grp->size - 1)
807 tid_group_move(node->grp, &uctxt->tid_full_list,
808 &uctxt->tid_used_list);
809 else if (!node->grp->used)
810 tid_group_move(node->grp, &uctxt->tid_used_list,
811 &uctxt->tid_group_list);
816 * As a simple helper for hfi1_user_exp_rcv_free, this function deals with
817 * clearing nodes in the non-cached case.
819 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
820 struct exp_tid_set *set,
821 struct hfi1_filedata *fd)
823 struct tid_group *grp, *ptr;
826 list_for_each_entry_safe(grp, ptr, &set->list, list) {
827 list_del_init(&grp->list);
829 for (i = 0; i < grp->size; i++) {
830 if (grp->map & (1 << i)) {
831 u16 rcventry = grp->base + i;
832 struct tid_rb_node *node;
834 node = fd->entry_to_rb[rcventry -
835 uctxt->expected_base];
836 if (!node || node->rcventry != rcventry)
840 mmu_interval_notifier_remove(
842 cacheless_tid_rb_remove(fd, node);
848 static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
849 const struct mmu_notifier_range *range,
850 unsigned long cur_seq)
852 struct tid_rb_node *node =
853 container_of(mni, struct tid_rb_node, notifier);
854 struct hfi1_filedata *fdata = node->fdata;
855 struct hfi1_ctxtdata *uctxt = fdata->uctxt;
860 trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
861 node->notifier.interval_tree.start,
862 node->rcventry, node->npages, node->dma_addr);
865 spin_lock(&fdata->invalid_lock);
866 if (fdata->invalid_tid_idx < uctxt->expected_count) {
867 fdata->invalid_tids[fdata->invalid_tid_idx] =
868 rcventry2tidinfo(node->rcventry - uctxt->expected_base);
869 fdata->invalid_tids[fdata->invalid_tid_idx] |=
870 EXP_TID_SET(LEN, node->npages);
871 if (!fdata->invalid_tid_idx) {
875 * hfi1_set_uevent_bits() sets a user event flag
876 * for all processes. Because calling into the
877 * driver to process TID cache invalidations is
878 * expensive and TID cache invalidations are
879 * handled on a per-process basis, we can
880 * optimize this to set the flag only for the
881 * process in question.
883 ev = uctxt->dd->events +
884 (uctxt_offset(uctxt) + fdata->subctxt);
885 set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
887 fdata->invalid_tid_idx++;
889 spin_unlock(&fdata->invalid_lock);
893 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
894 struct tid_rb_node *tnode)
896 u32 base = fdata->uctxt->expected_base;
898 fdata->entry_to_rb[tnode->rcventry - base] = NULL;
899 clear_tid_node(fdata, tnode);