int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
{
- struct rxe_map_set *set = mr->cur_map_set;
+
switch (mr->type) {
case IB_MR_TYPE_DMA:
case IB_MR_TYPE_USER:
case IB_MR_TYPE_MEM_REG:
- if (iova < set->iova || length > set->length ||
- iova > set->iova + set->length - length)
+ if (iova < mr->iova || length > mr->length ||
+ iova > mr->iova + mr->length - length)
return -EFAULT;
return 0;
mr->map_shift = ilog2(RXE_BUF_PER_MAP);
}
-static void rxe_mr_free_map_set(int num_map, struct rxe_map_set *set)
-{
- int i;
-
- for (i = 0; i < num_map; i++)
- kfree(set->map[i]);
-
- kfree(set->map);
- kfree(set);
-}
-
-static int rxe_mr_alloc_map_set(int num_map, struct rxe_map_set **setp)
+static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
{
int i;
- struct rxe_map_set *set;
+ int num_map;
+ struct rxe_map **map = mr->map;
- set = kmalloc(sizeof(*set), GFP_KERNEL);
- if (!set)
- goto err_out;
+ num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
- set->map = kmalloc_array(num_map, sizeof(struct rxe_map *), GFP_KERNEL);
- if (!set->map)
- goto err_free_set;
+ mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
+ if (!mr->map)
+ goto err1;
for (i = 0; i < num_map; i++) {
- set->map[i] = kmalloc(sizeof(struct rxe_map), GFP_KERNEL);
- if (!set->map[i])
- goto err_free_map;
+ mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
+ if (!mr->map[i])
+ goto err2;
}
- *setp = set;
-
- return 0;
-
-err_free_map:
- for (i--; i >= 0; i--)
- kfree(set->map[i]);
-
- kfree(set->map);
-err_free_set:
- kfree(set);
-err_out:
- return -ENOMEM;
-}
-
-/**
- * rxe_mr_alloc() - Allocate memory map array(s) for MR
- * @mr: Memory region
- * @num_buf: Number of buffer descriptors to support
- * @both: If non zero allocate both mr->map and mr->next_map
- * else just allocate mr->map. Used for fast MRs
- *
- * Return: 0 on success else an error
- */
-static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both)
-{
- int ret;
- int num_map;
-
BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
- num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
mr->map_shift = ilog2(RXE_BUF_PER_MAP);
mr->map_mask = RXE_BUF_PER_MAP - 1;
+
mr->num_buf = num_buf;
- mr->max_buf = num_map * RXE_BUF_PER_MAP;
mr->num_map = num_map;
-
- ret = rxe_mr_alloc_map_set(num_map, &mr->cur_map_set);
- if (ret)
- return -ENOMEM;
-
- if (both) {
- ret = rxe_mr_alloc_map_set(num_map, &mr->next_map_set);
- if (ret)
- goto err_free;
- }
+ mr->max_buf = num_map * RXE_BUF_PER_MAP;
return 0;
-err_free:
- rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
- mr->cur_map_set = NULL;
+err2:
+ for (i--; i >= 0; i--)
+ kfree(mr->map[i]);
+
+ kfree(mr->map);
+err1:
return -ENOMEM;
}
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr)
{
- struct rxe_map_set *set;
struct rxe_map **map;
struct rxe_phys_buf *buf = NULL;
struct ib_umem *umem;
int num_buf;
void *vaddr;
int err;
+ int i;
umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) {
rxe_mr_init(access, mr);
- err = rxe_mr_alloc(mr, num_buf, 0);
+ err = rxe_mr_alloc(mr, num_buf);
if (err) {
pr_warn("%s: Unable to allocate memory for map\n",
__func__);
goto err_release_umem;
}
- set = mr->cur_map_set;
- set->page_shift = PAGE_SHIFT;
- set->page_mask = PAGE_SIZE - 1;
-
- num_buf = 0;
- map = set->map;
+ mr->page_shift = PAGE_SHIFT;
+ mr->page_mask = PAGE_SIZE - 1;
+ num_buf = 0;
+ map = mr->map;
if (length > 0) {
buf = map[0]->buf;
pr_warn("%s: Unable to get virtual address\n",
__func__);
err = -ENOMEM;
- goto err_release_umem;
+ goto err_cleanup_map;
}
buf->addr = (uintptr_t)vaddr;
buf->size = PAGE_SIZE;
num_buf++;
buf++;
+
}
}
mr->ibmr.pd = &pd->ibpd;
mr->umem = umem;
mr->access = access;
+ mr->length = length;
+ mr->iova = iova;
+ mr->va = start;
+ mr->offset = ib_umem_offset(umem);
mr->state = RXE_MR_STATE_VALID;
mr->type = IB_MR_TYPE_USER;
- set->length = length;
- set->iova = iova;
- set->va = start;
- set->offset = ib_umem_offset(umem);
-
return 0;
+err_cleanup_map:
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
+ kfree(mr->map);
err_release_umem:
ib_umem_release(umem);
err_out:
/* always allow remote access for FMRs */
rxe_mr_init(IB_ACCESS_REMOTE, mr);
- err = rxe_mr_alloc(mr, max_pages, 1);
+ err = rxe_mr_alloc(mr, max_pages);
if (err)
goto err1;
static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
size_t *offset_out)
{
- struct rxe_map_set *set = mr->cur_map_set;
- size_t offset = iova - set->iova + set->offset;
+ size_t offset = iova - mr->iova + mr->offset;
int map_index;
int buf_index;
u64 length;
- struct rxe_map *map;
- if (likely(set->page_shift)) {
- *offset_out = offset & set->page_mask;
- offset >>= set->page_shift;
+ if (likely(mr->page_shift)) {
+ *offset_out = offset & mr->page_mask;
+ offset >>= mr->page_shift;
*n_out = offset & mr->map_mask;
*m_out = offset >> mr->map_shift;
} else {
map_index = 0;
buf_index = 0;
- map = set->map[map_index];
- length = map->buf[buf_index].size;
+ length = mr->map[map_index]->buf[buf_index].size;
while (offset >= length) {
offset -= length;
map_index++;
buf_index = 0;
}
- map = set->map[map_index];
- length = map->buf[buf_index].size;
+ length = mr->map[map_index]->buf[buf_index].size;
}
*m_out = map_index;
goto out;
}
- if (!mr->cur_map_set) {
+ if (!mr->map) {
addr = (void *)(uintptr_t)iova;
goto out;
}
lookup_iova(mr, iova, &m, &n, &offset);
- if (offset + length > mr->cur_map_set->map[m]->buf[n].size) {
+ if (offset + length > mr->map[m]->buf[n].size) {
pr_warn("crosses page boundary\n");
addr = NULL;
goto out;
}
- addr = (void *)(uintptr_t)mr->cur_map_set->map[m]->buf[n].addr + offset;
+ addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
out:
return addr;
return 0;
}
- WARN_ON_ONCE(!mr->cur_map_set);
+ WARN_ON_ONCE(!mr->map);
err = mr_check_range(mr, iova, length);
if (err) {
lookup_iova(mr, iova, &m, &i, &offset);
- map = mr->cur_map_set->map + m;
+ map = mr->map + m;
buf = map[0]->buf + i;
while (length > 0) {
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{
struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr);
- u32 key = wqe->wr.wr.reg.key & 0xff;
+ u32 key = wqe->wr.wr.reg.key;
u32 access = wqe->wr.wr.reg.access;
- struct rxe_map_set *set;
/* user can only register MR in free state */
if (unlikely(mr->state != RXE_MR_STATE_FREE)) {
return -EINVAL;
}
+ /* user is only allowed to change key portion of l/rkey */
+ if (unlikely((mr->lkey & ~0xff) != (key & ~0xff))) {
+ pr_warn("%s: key = 0x%x has wrong index mr->lkey = 0x%x\n",
+ __func__, key, mr->lkey);
+ return -EINVAL;
+ }
+
mr->access = access;
- mr->lkey = (mr->lkey & ~0xff) | key;
- mr->rkey = (access & IB_ACCESS_REMOTE) ? mr->lkey : 0;
+ mr->lkey = key;
+ mr->rkey = (access & IB_ACCESS_REMOTE) ? key : 0;
+ mr->iova = wqe->wr.wr.reg.mr->iova;
mr->state = RXE_MR_STATE_VALID;
- set = mr->cur_map_set;
- mr->cur_map_set = mr->next_map_set;
- mr->cur_map_set->iova = wqe->wr.wr.reg.mr->iova;
- mr->next_map_set = set;
-
- return 0;
-}
-
-int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr)
-{
- struct rxe_mr *mr = to_rmr(ibmr);
- struct rxe_map_set *set = mr->next_map_set;
- struct rxe_map *map;
- struct rxe_phys_buf *buf;
-
- if (unlikely(set->nbuf == mr->num_buf))
- return -ENOMEM;
-
- map = set->map[set->nbuf / RXE_BUF_PER_MAP];
- buf = &map->buf[set->nbuf % RXE_BUF_PER_MAP];
-
- buf->addr = addr;
- buf->size = ibmr->page_size;
- set->nbuf++;
-
return 0;
}
void rxe_mr_cleanup(struct rxe_pool_elem *elem)
{
struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
+ int i;
rxe_put(mr_pd(mr));
-
ib_umem_release(mr->umem);
- if (mr->cur_map_set)
- rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
+ if (mr->map) {
+ for (i = 0; i < mr->num_map; i++)
+ kfree(mr->map[i]);
- if (mr->next_map_set)
- rxe_mr_free_map_set(mr->num_map, mr->next_map_set);
+ kfree(mr->map);
+ }
}