struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
struct irdma_hmc_pble *hmc_p;
u64 *arr = iwmr->pgaddrmem;
- u32 pg_size;
+ u32 pg_size, total;
int err = 0;
- int total;
bool ret = true;
- total = req->sq_pages + req->rq_pages + req->cq_pages;
pg_size = iwmr->page_size;
err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles);
if (err)
switch (iwmr->type) {
case IRDMA_MEMREG_TYPE_QP:
+ total = req->sq_pages + req->rq_pages;
hmc_p = &qpmr->sq_pbl;
qpmr->shadow = (dma_addr_t)arr[total];
hmc_p = &cqmr->cq_pbl;
if (!cqmr->split)
- cqmr->shadow = (dma_addr_t)arr[total];
+ cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
if (use_pbles)
ret = irdma_check_mem_contiguous(arr, req->cq_pages,
struct irdma_mr *iwmr;
struct ib_umem *region;
struct irdma_mem_reg_req req;
- u32 stag = 0;
+ u32 total, stag = 0;
+ u8 shadow_pgcnt = 1;
bool use_pbles = false;
unsigned long flags;
int err = -EINVAL;
switch (req.reg_type) {
case IRDMA_MEMREG_TYPE_QP:
- use_pbles = ((req.sq_pages + req.rq_pages) > 2);
+ total = req.sq_pages + req.rq_pages + shadow_pgcnt;
+ if (total > iwmr->page_cnt) {
+ err = -EINVAL;
+ goto error;
+ }
+ total = req.sq_pages + req.rq_pages;
+ use_pbles = (total > 2);
err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
if (err)
goto error;
spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
break;
case IRDMA_MEMREG_TYPE_CQ:
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
+ shadow_pgcnt = 0;
+ total = req.cq_pages + shadow_pgcnt;
+ if (total > iwmr->page_cnt) {
+ err = -EINVAL;
+ goto error;
+ }
+
use_pbles = (req.cq_pages > 1);
err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
if (err)