2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: QPLib resource manager
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/spinlock.h>
42 #include <linux/pci.h>
43 #include <linux/interrupt.h>
44 #include <linux/inetdevice.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/if_vlan.h>
47 #include <linux/vmalloc.h>
48 #include <rdma/ib_verbs.h>
49 #include <rdma/ib_umem.h>
52 #include "qplib_res.h"
54 #include "qplib_rcfw.h"
56 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
57 struct bnxt_qplib_stats *stats);
58 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
59 struct bnxt_qplib_chip_ctx *cctx,
60 struct bnxt_qplib_stats *stats);
63 static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
66 struct pci_dev *pdev = res->pdev;
70 for (i = 0; i < pbl->pg_count; i++) {
72 dma_free_coherent(&pdev->dev, pbl->pg_size,
73 (void *)((unsigned long)
79 "PBL free pg_arr[%d] empty?!\n", i);
80 pbl->pg_arr[i] = NULL;
85 vfree(pbl->pg_map_arr);
86 pbl->pg_map_arr = NULL;
91 static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
92 struct bnxt_qplib_sg_info *sginfo)
94 struct ib_block_iter biter;
97 rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
98 pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
99 pbl->pg_arr[i] = NULL;
105 static int __alloc_pbl(struct bnxt_qplib_res *res,
106 struct bnxt_qplib_pbl *pbl,
107 struct bnxt_qplib_sg_info *sginfo)
109 struct pci_dev *pdev = res->pdev;
110 bool is_umem = false;
117 pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
119 pages = sginfo->npages;
120 /* page ptr arrays */
121 pbl->pg_arr = vmalloc(pages * sizeof(void *));
125 pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
126 if (!pbl->pg_map_arr) {
132 pbl->pg_size = sginfo->pgsize;
135 for (i = 0; i < pages; i++) {
136 pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
146 bnxt_qplib_fill_user_dma_pages(pbl, sginfo);
151 __free_pbl(res, pbl, is_umem);
156 void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
157 struct bnxt_qplib_hwq *hwq)
161 if (!hwq->max_elements)
163 if (hwq->level >= PBL_LVL_MAX)
166 for (i = 0; i < hwq->level + 1; i++) {
168 __free_pbl(res, &hwq->pbl[i], hwq->is_user);
170 __free_pbl(res, &hwq->pbl[i], false);
173 hwq->level = PBL_LVL_MAX;
174 hwq->max_elements = 0;
175 hwq->element_size = 0;
181 /* All HWQs are power of 2 in size */
183 int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
184 struct bnxt_qplib_hwq_attr *hwq_attr)
186 u32 npages, aux_slots, pg_size, aux_pages = 0, aux_size = 0;
187 struct bnxt_qplib_sg_info sginfo = {};
188 u32 depth, stride, npbl, npde;
189 dma_addr_t *src_phys_ptr, **dst_virt_ptr;
190 struct bnxt_qplib_res *res;
191 struct pci_dev *pdev;
196 pg_size = hwq_attr->sginfo->pgsize;
197 hwq->level = PBL_LVL_MAX;
199 depth = roundup_pow_of_two(hwq_attr->depth);
200 stride = roundup_pow_of_two(hwq_attr->stride);
201 if (hwq_attr->aux_depth) {
202 aux_slots = hwq_attr->aux_depth;
203 aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
204 aux_pages = (aux_slots * aux_size) / pg_size;
205 if ((aux_slots * aux_size) % pg_size)
209 if (!hwq_attr->sginfo->umem) {
210 hwq->is_user = false;
211 npages = (depth * stride) / pg_size + aux_pages;
212 if ((depth * stride) % pg_size)
216 hwq_attr->sginfo->npages = npages;
218 unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
219 hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
222 npages = sginfo_num_pages;
223 npages = (npages * PAGE_SIZE) /
224 BIT_ULL(hwq_attr->sginfo->pgshft);
225 if ((sginfo_num_pages * PAGE_SIZE) %
226 BIT_ULL(hwq_attr->sginfo->pgshft))
231 if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
232 /* This request is Level 0, map PTE */
233 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
236 hwq->level = PBL_LVL_0;
240 if (npages >= MAX_PBL_LVL_0_PGS) {
241 if (npages > MAX_PBL_LVL_1_PGS) {
242 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
244 /* 2 levels of indirection */
245 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
246 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
248 npde = npbl >> MAX_PDL_LVL_SHIFT;
249 if (npbl % BIT(MAX_PDL_LVL_SHIFT))
251 /* Alloc PDE pages */
252 sginfo.pgsize = npde * pg_size;
254 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
256 /* Alloc PBL pages */
257 sginfo.npages = npbl;
258 sginfo.pgsize = PAGE_SIZE;
259 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
262 /* Fill PDL with PBL page pointers */
264 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
265 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
266 if (hwq_attr->type == HWQ_TYPE_MR) {
267 /* For MR it is expected that we supply only 1 contigous
268 * page i.e only 1 entry in the PDL that will contain
269 * all the PBLs for the user supplied memory region
271 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
273 dst_virt_ptr[0][i] = src_phys_ptr[i] |
276 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
278 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
282 /* Alloc or init PTEs */
283 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
287 hwq->level = PBL_LVL_2;
288 if (hwq_attr->sginfo->nopte)
290 /* Fill PBLs with PTE pointers */
292 (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
293 src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
294 for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
295 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
296 src_phys_ptr[i] | PTU_PTE_VALID;
298 if (hwq_attr->type == HWQ_TYPE_QUEUE) {
299 /* Find the last pg of the size */
300 i = hwq->pbl[PBL_LVL_2].pg_count;
301 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
304 dst_virt_ptr[PTR_PG(i - 2)]
306 PTU_PTE_NEXT_TO_LAST;
308 } else { /* pages < 512 npbl = 1, npde = 0 */
309 u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
312 /* 1 level of indirection */
313 npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
314 if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
316 sginfo.npages = npbl;
317 sginfo.pgsize = PAGE_SIZE;
319 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
322 /* Alloc or init PTEs */
323 rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
327 hwq->level = PBL_LVL_1;
328 if (hwq_attr->sginfo->nopte)
330 /* Fill PBL with PTE pointers */
332 (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
333 src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
334 for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
335 dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
336 src_phys_ptr[i] | flag;
337 if (hwq_attr->type == HWQ_TYPE_QUEUE) {
338 /* Find the last pg of the size */
339 i = hwq->pbl[PBL_LVL_1].pg_count;
340 dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
343 dst_virt_ptr[PTR_PG(i - 2)]
345 PTU_PTE_NEXT_TO_LAST;
353 hwq->depth = hwq_attr->depth;
354 hwq->max_elements = depth;
355 hwq->element_size = stride;
356 hwq->qe_ppg = pg_size / stride;
357 /* For direct access to the elements */
359 if (hwq_attr->sginfo->nopte && hwq->level)
360 lvl = hwq->level - 1;
361 hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
362 hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
363 spin_lock_init(&hwq->lock);
367 bnxt_qplib_free_hwq(res, hwq);
372 void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
373 struct bnxt_qplib_ctx *ctx)
377 bnxt_qplib_free_hwq(res, &ctx->qpc_tbl);
378 bnxt_qplib_free_hwq(res, &ctx->mrw_tbl);
379 bnxt_qplib_free_hwq(res, &ctx->srqc_tbl);
380 bnxt_qplib_free_hwq(res, &ctx->cq_tbl);
381 bnxt_qplib_free_hwq(res, &ctx->tim_tbl);
382 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
383 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.qtbl[i]);
384 /* restore original pde level before destroy */
385 ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
386 bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
387 bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
390 static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
391 struct bnxt_qplib_ctx *ctx)
393 struct bnxt_qplib_hwq_attr hwq_attr = {};
394 struct bnxt_qplib_sg_info sginfo = {};
395 struct bnxt_qplib_tqm_ctx *tqmctx;
399 tqmctx = &ctx->tqm_ctx;
401 sginfo.pgsize = PAGE_SIZE;
402 sginfo.pgshft = PAGE_SHIFT;
403 hwq_attr.sginfo = &sginfo;
405 hwq_attr.type = HWQ_TYPE_CTX;
406 hwq_attr.depth = 512;
407 hwq_attr.stride = sizeof(u64);
408 /* Alloc pdl buffer */
409 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
412 /* Save original pdl level */
413 tqmctx->pde_level = tqmctx->pde.level;
416 for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
417 if (!tqmctx->qcount[i])
419 hwq_attr.depth = ctx->qpc_count * tqmctx->qcount[i];
420 rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
428 static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
430 struct bnxt_qplib_hwq *tbl;
432 __le64 **pbl_ptr, *ptr;
437 pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
439 for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
440 i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
442 if (!tbl->max_elements)
445 fnz_idx = i; /* first non-zero index */
446 switch (tbl->level) {
448 pg_count = tbl->pbl[PBL_LVL_1].pg_count;
449 for (k = 0; k < pg_count; k++) {
450 ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
451 dma_ptr = &tbl->pbl[PBL_LVL_1].pg_map_arr[k];
452 *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
458 ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
459 *ptr = cpu_to_le64(tbl->pbl[PBL_LVL_0].pg_map_arr[0] |
466 /* update pde level as per page table programming */
467 ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
468 ctx->qtbl[fnz_idx].level + 1;
471 static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
472 struct bnxt_qplib_ctx *ctx)
476 rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
480 bnxt_qplib_map_tqm_pgtbl(&ctx->tqm_ctx);
486 * Routine: bnxt_qplib_alloc_ctx
488 * Context tables are memories which are used by the chip fw.
489 * The 6 tables defined are:
490 * QPC ctx - holds QP states
491 * MRW ctx - holds memory region and window
492 * SRQ ctx - holds shared RQ states
493 * CQ ctx - holds completion queue states
494 * TQM ctx - holds Tx Queue Manager context
495 * TIM ctx - holds timer context
496 * Depending on the size of the tbl requested, either a 1 Page Buffer List
497 * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
499 * Table might be employed as follows:
500 * For 0 < ctx size <= 1 PAGE, 0 level of ind is used
501 * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
502 * For 512 < ctx size <= MAX, 2 levels of ind is used
504 * 0 if success, else -ERRORS
506 int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
507 struct bnxt_qplib_ctx *ctx,
508 bool virt_fn, bool is_p5)
510 struct bnxt_qplib_hwq_attr hwq_attr = {};
511 struct bnxt_qplib_sg_info sginfo = {};
514 if (virt_fn || is_p5)
518 sginfo.pgsize = PAGE_SIZE;
519 sginfo.pgshft = PAGE_SHIFT;
520 hwq_attr.sginfo = &sginfo;
523 hwq_attr.depth = ctx->qpc_count;
524 hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
525 hwq_attr.type = HWQ_TYPE_CTX;
526 rc = bnxt_qplib_alloc_init_hwq(&ctx->qpc_tbl, &hwq_attr);
531 hwq_attr.depth = ctx->mrw_count;
532 hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
533 rc = bnxt_qplib_alloc_init_hwq(&ctx->mrw_tbl, &hwq_attr);
538 hwq_attr.depth = ctx->srqc_count;
539 hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
540 rc = bnxt_qplib_alloc_init_hwq(&ctx->srqc_tbl, &hwq_attr);
545 hwq_attr.depth = ctx->cq_count;
546 hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
547 rc = bnxt_qplib_alloc_init_hwq(&ctx->cq_tbl, &hwq_attr);
552 rc = bnxt_qplib_setup_tqm_rings(res, ctx);
556 ctx->tim_tbl.max_elements = ctx->qpc_count * 16;
557 hwq_attr.depth = ctx->qpc_count * 16;
559 rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
564 rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
571 bnxt_qplib_free_ctx(res, ctx);
575 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
576 struct bnxt_qplib_sgid_tbl *sgid_tbl)
578 kfree(sgid_tbl->tbl);
579 kfree(sgid_tbl->hw_id);
580 kfree(sgid_tbl->ctx);
581 kfree(sgid_tbl->vlan);
582 sgid_tbl->tbl = NULL;
583 sgid_tbl->hw_id = NULL;
584 sgid_tbl->ctx = NULL;
585 sgid_tbl->vlan = NULL;
587 sgid_tbl->active = 0;
590 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
591 struct bnxt_qplib_sgid_tbl *sgid_tbl,
594 sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
598 sgid_tbl->hw_id = kcalloc(max, sizeof(u16), GFP_KERNEL);
599 if (!sgid_tbl->hw_id)
602 sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
606 sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
613 kfree(sgid_tbl->ctx);
614 sgid_tbl->ctx = NULL;
616 kfree(sgid_tbl->hw_id);
617 sgid_tbl->hw_id = NULL;
619 kfree(sgid_tbl->tbl);
620 sgid_tbl->tbl = NULL;
624 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
625 struct bnxt_qplib_sgid_tbl *sgid_tbl)
629 for (i = 0; i < sgid_tbl->max; i++) {
630 if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
631 sizeof(bnxt_qplib_gid_zero)))
632 bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
633 sgid_tbl->tbl[i].vlan_id, true);
635 memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
636 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
637 memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
638 sgid_tbl->active = 0;
641 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
642 struct net_device *netdev)
646 for (i = 0; i < sgid_tbl->max; i++)
647 sgid_tbl->tbl[i].vlan_id = 0xffff;
649 memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
653 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
657 bit_num = find_first_bit(pdt->tbl, pdt->max);
658 if (bit_num == pdt->max)
661 /* Found unused PD */
662 clear_bit(bit_num, pdt->tbl);
667 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
668 struct bnxt_qplib_pd_tbl *pdt,
669 struct bnxt_qplib_pd *pd)
671 if (test_and_set_bit(pd->id, pdt->tbl)) {
672 dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
680 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
687 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
688 struct bnxt_qplib_pd_tbl *pdt,
696 pdt->tbl = kmalloc(bytes, GFP_KERNEL);
701 memset((u8 *)pdt->tbl, 0xFF, bytes);
707 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
708 struct bnxt_qplib_dpi *dpi,
713 bit_num = find_first_bit(dpit->tbl, dpit->max);
714 if (bit_num == dpit->max)
717 /* Found unused DPI */
718 clear_bit(bit_num, dpit->tbl);
719 dpit->app_tbl[bit_num] = app;
722 dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
723 dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
728 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
729 struct bnxt_qplib_dpi_tbl *dpit,
730 struct bnxt_qplib_dpi *dpi)
732 if (dpi->dpi >= dpit->max) {
733 dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
736 if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
737 dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
742 dpit->app_tbl[dpi->dpi] = NULL;
743 memset(dpi, 0, sizeof(*dpi));
748 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res *res,
749 struct bnxt_qplib_dpi_tbl *dpit)
752 kfree(dpit->app_tbl);
753 if (dpit->dbr_bar_reg_iomem)
754 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
755 memset(dpit, 0, sizeof(*dpit));
758 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
759 struct bnxt_qplib_dpi_tbl *dpit,
762 u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
763 resource_size_t bar_reg_base;
766 if (dpit->dbr_bar_reg_iomem) {
767 dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
772 bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
774 dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
779 dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
780 if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
781 dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
785 dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
787 if (!dpit->dbr_bar_reg_iomem) {
788 dev_err(&res->pdev->dev,
789 "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
793 dpit->unmapped_dbr = bar_reg_base + dbr_offset;
794 dpit->max = dbr_len / PAGE_SIZE;
796 dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
800 bytes = dpit->max >> 3;
804 dpit->tbl = kmalloc(bytes, GFP_KERNEL);
806 kfree(dpit->app_tbl);
807 dpit->app_tbl = NULL;
811 memset((u8 *)dpit->tbl, 0xFF, bytes);
816 pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
817 dpit->dbr_bar_reg_iomem = NULL;
822 static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
823 struct bnxt_qplib_stats *stats)
826 dma_free_coherent(&pdev->dev, stats->size,
827 stats->dma, stats->dma_map);
829 memset(stats, 0, sizeof(*stats));
833 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
834 struct bnxt_qplib_chip_ctx *cctx,
835 struct bnxt_qplib_stats *stats)
837 memset(stats, 0, sizeof(*stats));
839 stats->size = cctx->hw_stats_size;
840 stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
841 &stats->dma_map, GFP_KERNEL);
843 dev_err(&pdev->dev, "Stats DMA allocation failed\n");
849 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
851 bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
854 int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
856 bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
861 void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
863 bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
864 bnxt_qplib_free_pd_tbl(&res->pd_tbl);
865 bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
868 int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
869 struct net_device *netdev,
870 struct bnxt_qplib_dev_attr *dev_attr)
875 res->netdev = netdev;
877 rc = bnxt_qplib_alloc_sgid_tbl(res, &res->sgid_tbl, dev_attr->max_sgid);
881 rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
885 rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
891 bnxt_qplib_free_res(res);
895 int bnxt_qplib_determine_atomics(struct pci_dev *dev)
900 comp = pci_enable_atomic_ops_to_root(dev,
901 PCI_EXP_DEVCAP2_ATOMIC_COMP32);
904 comp = pci_enable_atomic_ops_to_root(dev,
905 PCI_EXP_DEVCAP2_ATOMIC_COMP64);
908 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2);
909 return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);