int *order)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct ib_umem *u;
int err;
- *umem = ib_umem_get(pd->uobject->context, start, length,
- access_flags, 0);
- err = PTR_ERR_OR_ZERO(*umem);
+ *umem = NULL;
+
+ u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
+ err = PTR_ERR_OR_ZERO(u);
if (err) {
- *umem = NULL;
- mlx5_ib_err(dev, "umem get failed (%d)\n", err);
+ mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
return err;
}
- mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
+ mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
page_shift, ncont, order);
if (!*npages) {
mlx5_ib_warn(dev, "avoid zero region\n");
- ib_umem_release(*umem);
+ ib_umem_release(u);
return -EINVAL;
}
+ *umem = u;
+
mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
*npages, *ncont, *order, *page_shift);
int access_flags = flags & IB_MR_REREG_ACCESS ?
new_access_flags :
mr->access_flags;
- u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
- u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
int page_shift = 0;
int upd_flags = 0;
int npages = 0;
int ncont = 0;
int order = 0;
+ u64 addr, len;
int err;
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
+ if (!mr->umem)
+ return -EINVAL;
+
+ if (flags & IB_MR_REREG_TRANS) {
+ addr = virt_addr;
+ len = length;
+ } else {
+ addr = mr->umem->address;
+ len = mr->umem->length;
+ }
+
if (flags != IB_MR_REREG_PD) {
/*
* Replace umem. This needs to be done whether or not UMR is
*/
flags |= IB_MR_REREG_TRANS;
ib_umem_release(mr->umem);
+ mr->umem = NULL;
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
&npages, &page_shift, &ncont, &order);
if (err < 0) {