void *cpu_base;
u64 phy_base;
int ret = 0;
+ int ba_num;
int offset;
int total;
int step;
if (root_hem)
return 0;
+ ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
+ if (ba_num < 1)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&temp_root);
- total = r->offset;
+ offset = r->offset;
/* indicate to last region */
r = ®ions[region_cnt - 1];
- root_hem = hem_list_alloc_item(hr_dev, total, r->offset + r->count - 1,
- unit, true, 0);
+ root_hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
+ ba_num, true, 0);
if (!root_hem)
return -ENOMEM;
list_add(&root_hem->list, &temp_root);
INIT_LIST_HEAD(&temp_list[i]);
total = 0;
- for (i = 0; i < region_cnt && total < unit; i++) {
+ for (i = 0; i < region_cnt && total < ba_num; i++) {
r = ®ions[i];
if (!r->count)
continue;
/* if exist mid bt, link L1 to L0 */
list_for_each_entry_safe(hem, temp_hem,
&hem_list->mid_bt[i][1], list) {
- offset = hem->start / step * BA_BYTE_LEN;
+ offset = (hem->start - r->offset) / step *
+ BA_BYTE_LEN;
hem_list_link_bt(hr_dev, cpu_base + offset,
hem->dma_addr);
total++;
return region_cnt;
}
-static int calc_wqe_bt_page_shift(struct hns_roce_dev *hr_dev,
- struct hns_roce_buf_region *regions,
- int region_cnt)
-{
- int bt_pg_shift;
- int ba_num;
- int ret;
-
- bt_pg_shift = PAGE_SHIFT + hr_dev->caps.mtt_ba_pg_sz;
-
- /* all root ba entries must in one bt page */
- do {
- ba_num = (1 << bt_pg_shift) / BA_BYTE_LEN;
- ret = hns_roce_hem_list_calc_root_ba(regions, region_cnt,
- ba_num);
- if (ret <= ba_num)
- break;
-
- bt_pg_shift++;
- } while (ret > ba_num);
-
- return bt_pg_shift - PAGE_SHIFT;
-}
-
static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp)
{
static int map_wqe_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
u32 page_shift, bool is_user)
{
- dma_addr_t *buf_list[ARRAY_SIZE(hr_qp->regions)] = { NULL };
+/* WQE buffer include 3 parts: SQ, extend SGE and RQ. */
+#define HNS_ROCE_WQE_REGION_MAX 3
+ struct hns_roce_buf_region regions[HNS_ROCE_WQE_REGION_MAX] = {};
+ dma_addr_t *buf_list[HNS_ROCE_WQE_REGION_MAX] = {};
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_region *r;
int region_count;
int ret;
int i;
- region_count = split_wqe_buf_region(hr_dev, hr_qp, hr_qp->regions,
- ARRAY_SIZE(hr_qp->regions), page_shift);
+ region_count = split_wqe_buf_region(hr_dev, hr_qp, regions,
+ ARRAY_SIZE(regions), page_shift);
/* alloc a tmp list to store WQE buffers address */
- ret = hns_roce_alloc_buf_list(hr_qp->regions, buf_list, region_count);
+ ret = hns_roce_alloc_buf_list(regions, buf_list, region_count);
if (ret) {
ibdev_err(ibdev, "Failed to alloc WQE buffer list\n");
return ret;
}
for (i = 0; i < region_count; i++) {
- r = &hr_qp->regions[i];
+ r = ®ions[i];
if (is_user)
buf_count = hns_roce_get_umem_bufs(hr_dev, buf_list[i],
r->count, r->offset, hr_qp->umem,
}
}
- hr_qp->wqe_bt_pg_shift = calc_wqe_bt_page_shift(hr_dev, hr_qp->regions,
- region_count);
+ hr_qp->wqe_bt_pg_shift = hr_dev->caps.mtt_ba_pg_sz;
hns_roce_mtr_init(&hr_qp->mtr, PAGE_SHIFT + hr_qp->wqe_bt_pg_shift,
page_shift);
- ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, hr_qp->regions,
+ ret = hns_roce_mtr_attach(hr_dev, &hr_qp->mtr, buf_list, regions,
region_count);
if (ret)
ibdev_err(ibdev, "Failed to attach WQE's mtr\n");