return cgx->lmac_idmap[lmac_id];
}
-int cgx_get_cgx_cnt(void)
+int cgx_get_cgxcnt_max(void)
{
struct cgx *cgx_dev;
- int count = 0;
+ int idmax = -ENODEV;
list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
- count++;
+ if (cgx_dev->cgx_id > idmax)
+ idmax = cgx_dev->cgx_id;
- return count;
+ if (idmax < 0)
+ return 0;
+
+ return idmax + 1;
}
-EXPORT_SYMBOL(cgx_get_cgx_cnt);
+EXPORT_SYMBOL(cgx_get_cgxcnt_max);
int cgx_get_lmac_cnt(void *cgxd)
{
goto err_release_regions;
}
+ cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & CGX_ID_MASK;
+
list_add(&cgx->cgx_list, &cgx_list);
- cgx->cgx_id = cgx_get_cgx_cnt() - 1;
cgx_link_usertable_init();
void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
{
- if (cgx_id >= rvu->cgx_cnt)
+ if (cgx_id >= rvu->cgx_cnt_max)
return NULL;
return rvu->cgx_idmap[cgx_id];
static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
- int cgx_cnt = rvu->cgx_cnt;
+ int cgx_cnt_max = rvu->cgx_cnt_max;
int cgx, lmac_cnt, lmac;
int pf = PF_CGXMAP_BASE;
int size, free_pkind;
- if (!cgx_cnt)
+ if (!cgx_cnt_max)
return 0;
- if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF)
+ if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
return -EINVAL;
/* Alloc map table
* An additional entry is required since PF id starts from 1 and
* hence entry at offset 0 is invalid.
*/
- size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
- rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL);
+ size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
+ rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
if (!rvu->pf2cgxlmac_map)
return -ENOMEM;
- /* Initialize offset 0 with an invalid cgx and lmac id */
- rvu->pf2cgxlmac_map[0] = 0xFF;
+ /* Initialize all entries with an invalid cgx and lmac id */
+ memset(rvu->pf2cgxlmac_map, 0xFF, size);
/* Reverse map table */
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
- cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16),
+ cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
GFP_KERNEL);
if (!rvu->cgxlmac2pf_map)
return -ENOMEM;
rvu->cgx_mapped_pfs = 0;
- for (cgx = 0; cgx < cgx_cnt; cgx++) {
+ for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
+ if (!rvu_cgx_pdata(cgx, rvu))
+ continue;
lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */
cb.data = rvu;
- for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) {
+ for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
cgxd = rvu_cgx_pdata(cgx, rvu);
for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
{
int cgx, err;
- /* find available cgx ports */
- rvu->cgx_cnt = cgx_get_cgx_cnt();
- if (!rvu->cgx_cnt) {
+ /* CGX port id starts from 0 and are not necessarily contiguous
+ * Hence we allocate resources based on the maximum port id value.
+ */
+ rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
+ if (!rvu->cgx_cnt_max) {
dev_info(rvu->dev, "No CGX devices found!\n");
return -ENODEV;
}
- rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *),
- GFP_KERNEL);
+ rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
+ sizeof(void *), GFP_KERNEL);
if (!rvu->cgx_idmap)
return -ENOMEM;
/* Initialize the cgxdata table */
- for (cgx = 0; cgx < rvu->cgx_cnt; cgx++)
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
/* Map CGX LMAC interfaces to RVU PFs */