2 * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 * qib_alloc_lkey - allocate an lkey
38 * @mr: memory region that this lkey protects
39 * @dma_region: 0->normal key, 1->restricted DMA key
41 * Returns 0 if successful, otherwise returns -errno.
43 * Increments mr reference count as required.
45 * Sets the lkey field mr for non-dma regions.
49 int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
55 struct qib_ibdev *dev = to_idev(mr->pd->device);
56 struct qib_lkey_table *rkt = &dev->lk_table;
58 spin_lock_irqsave(&rkt->lock, flags);
60 /* special case for dma_mr lkey == 0 */
62 struct qib_mregion *tmr;
64 tmr = rcu_dereference(dev->dma_mr);
67 rcu_assign_pointer(dev->dma_mr, mr);
68 mr->lkey_published = 1;
73 /* Find the next available LKEY */
77 if (rkt->table[r] == NULL)
79 r = (r + 1) & (rkt->max - 1);
83 rkt->next = (r + 1) & (rkt->max - 1);
85 * Make sure lkey is never zero which is reserved to indicate an
89 mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
90 ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
97 rcu_assign_pointer(rkt->table[r], mr);
98 mr->lkey_published = 1;
100 spin_unlock_irqrestore(&rkt->lock, flags);
104 spin_unlock_irqrestore(&rkt->lock, flags);
110 * qib_free_lkey - free an lkey
111 * @mr: mr to free from tables
113 void qib_free_lkey(struct qib_mregion *mr)
118 struct qib_ibdev *dev = to_idev(mr->pd->device);
119 struct qib_lkey_table *rkt = &dev->lk_table;
121 spin_lock_irqsave(&rkt->lock, flags);
122 if (!mr->lkey_published)
125 rcu_assign_pointer(dev->dma_mr, NULL);
127 r = lkey >> (32 - ib_qib_lkey_table_size);
128 rcu_assign_pointer(rkt->table[r], NULL);
131 mr->lkey_published = 0;
133 spin_unlock_irqrestore(&rkt->lock, flags);
137 * qib_lkey_ok - check IB SGE for validity and initialize
138 * @rkt: table containing lkey to check SGE against
139 * @pd: protection domain
140 * @isge: outgoing internal SGE
144 * Return 1 if valid and successful, otherwise returns 0.
146 * increments the reference count upon success
148 * Check the IB SGE for validity and initialize our internal version
151 int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
152 struct qib_sge *isge, struct ib_sge *sge, int acc)
154 struct qib_mregion *mr;
159 * We use LKEY == zero for kernel virtual addresses
160 * (see qib_get_dma_mr and qib_dma.c).
163 if (sge->lkey == 0) {
164 struct qib_ibdev *dev = to_idev(pd->ibpd.device);
168 mr = rcu_dereference(dev->dma_mr);
171 if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
176 isge->vaddr = (void *) sge->addr;
177 isge->length = sge->length;
178 isge->sge_length = sge->length;
183 mr = rcu_dereference(
184 rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]);
185 if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
188 off = sge->addr - mr->user_base;
189 if (unlikely(sge->addr < mr->iova || off + sge->length > mr->length ||
190 (mr->access_flags & acc) == 0))
192 if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
197 if (mr->page_shift) {
199 page sizes are uniform power of 2 so no loop is necessary
200 entries_spanned_by_off is the number of times the loop below
203 size_t entries_spanned_by_off;
205 entries_spanned_by_off = off >> mr->page_shift;
206 off -= (entries_spanned_by_off << mr->page_shift);
207 m = entries_spanned_by_off/QIB_SEGSZ;
208 n = entries_spanned_by_off%QIB_SEGSZ;
212 while (off >= mr->map[m]->segs[n].length) {
213 off -= mr->map[m]->segs[n].length;
215 if (n >= QIB_SEGSZ) {
222 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
223 isge->length = mr->map[m]->segs[n].length - off;
224 isge->sge_length = sge->length;
235 * qib_rkey_ok - check the IB virtual address, length, and RKEY
236 * @qp: qp for validation
238 * @len: length of data
239 * @vaddr: virtual address to place data
240 * @rkey: rkey to check
243 * Return 1 if successful, otherwise 0.
245 * increments the reference count upon success
247 int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
248 u32 len, u64 vaddr, u32 rkey, int acc)
250 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
251 struct qib_mregion *mr;
256 * We use RKEY == zero for kernel virtual addresses
257 * (see qib_get_dma_mr and qib_dma.c).
261 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
262 struct qib_ibdev *dev = to_idev(pd->ibpd.device);
266 mr = rcu_dereference(dev->dma_mr);
269 if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
274 sge->vaddr = (void *) vaddr;
276 sge->sge_length = len;
282 mr = rcu_dereference(
283 rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]);
284 if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
287 off = vaddr - mr->iova;
288 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
289 (mr->access_flags & acc) == 0))
291 if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
296 if (mr->page_shift) {
298 page sizes are uniform power of 2 so no loop is necessary
299 entries_spanned_by_off is the number of times the loop below
302 size_t entries_spanned_by_off;
304 entries_spanned_by_off = off >> mr->page_shift;
305 off -= (entries_spanned_by_off << mr->page_shift);
306 m = entries_spanned_by_off/QIB_SEGSZ;
307 n = entries_spanned_by_off%QIB_SEGSZ;
311 while (off >= mr->map[m]->segs[n].length) {
312 off -= mr->map[m]->segs[n].length;
314 if (n >= QIB_SEGSZ) {
321 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
322 sge->length = mr->map[m]->segs[n].length - off;
323 sge->sge_length = len;
334 * Initialize the memory region specified by the work reqeust.
336 int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
338 struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
339 struct qib_pd *pd = to_ipd(qp->ibqp.pd);
340 struct qib_mregion *mr;
341 u32 rkey = wr->wr.fast_reg.rkey;
348 spin_lock_irqsave(&rkt->lock, flags);
349 if (pd->user || rkey == 0)
352 mr = rcu_dereference_protected(
353 rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))],
354 lockdep_is_held(&rkt->lock));
355 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
358 if (wr->wr.fast_reg.page_list_len > mr->max_segs)
361 ps = 1UL << wr->wr.fast_reg.page_shift;
362 if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len)
365 mr->user_base = wr->wr.fast_reg.iova_start;
366 mr->iova = wr->wr.fast_reg.iova_start;
368 mr->length = wr->wr.fast_reg.length;
369 mr->access_flags = wr->wr.fast_reg.access_flags;
370 page_list = wr->wr.fast_reg.page_list->page_list;
373 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
374 mr->map[m]->segs[n].vaddr = (void *) page_list[i];
375 mr->map[m]->segs[n].length = ps;
376 if (++n == QIB_SEGSZ) {
384 spin_unlock_irqrestore(&rkt->lock, flags);