return NULL;
}
- device->groups[0] = &ib_dev_attr_group;
rdma_init_coredev(&device->coredev, device, &init_net);
INIT_LIST_HEAD(&device->event_handler_list);
return ret;
}
+ device->groups[0] = &ib_dev_attr_group;
+ device->groups[1] = device->ops.device_group;
ret = ib_setup_device_attrs(device);
if (ret)
goto cache_cleanup;
SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
SET_DEVICE_OP(dev_ops, destroy_srq);
SET_DEVICE_OP(dev_ops, destroy_wq);
+ SET_DEVICE_OP(dev_ops, device_group);
SET_DEVICE_OP(dev_ops, detach_mcast);
SET_DEVICE_OP(dev_ops, disassociate_ucontext);
SET_DEVICE_OP(dev_ops, drain_rq);
.destroy_cq = bnxt_re_destroy_cq,
.destroy_qp = bnxt_re_destroy_qp,
.destroy_srq = bnxt_re_destroy_srq,
+ .device_group = &bnxt_re_dev_attr_group,
.get_dev_fw_str = bnxt_re_query_fw_str,
.get_dma_mr = bnxt_re_get_dma_mr,
.get_hw_stats = bnxt_re_ib_get_hw_stats,
ibdev->dev.parent = &rdev->en_dev->pdev->dev;
ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
- rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group);
ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
if (ret)
.destroy_cq = c4iw_destroy_cq,
.destroy_qp = c4iw_destroy_qp,
.destroy_srq = c4iw_destroy_srq,
+ .device_group = &c4iw_attr_group,
.fill_res_cq_entry = c4iw_fill_res_cq_entry,
.fill_res_cm_id_entry = c4iw_fill_res_cm_id_entry,
.fill_res_mr_entry = c4iw_fill_res_mr_entry,
memcpy(dev->ibdev.iw_ifname, dev->rdev.lldi.ports[0]->name,
sizeof(dev->ibdev.iw_ifname));
- rdma_set_device_sysfs_group(&dev->ibdev, &c4iw_attr_group);
ib_set_device_ops(&dev->ibdev, &c4iw_dev_ops);
ret = set_netdevs(&dev->ibdev, &dev->rdev);
if (ret)
.alloc_hw_device_stats = hfi1_alloc_hw_device_stats,
.alloc_hw_port_stats = hfi_alloc_hw_port_stats,
.alloc_rdma_netdev = hfi1_vnic_alloc_rn,
+ .device_group = &ib_hfi1_attr_group,
.get_dev_fw_str = hfi1_get_dev_fw_str,
.get_hw_stats = get_hw_stats,
.modify_device = modify_device,
i,
ppd->pkeys);
- rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev,
- &ib_hfi1_attr_group);
-
ret = rvt_register_device(&dd->verbs_dev.rdi);
if (ret)
goto err_verbs_txreq;
.destroy_qp = mlx4_ib_destroy_qp,
.destroy_srq = mlx4_ib_destroy_srq,
.detach_mcast = mlx4_ib_mcg_detach,
+ .device_group = &mlx4_attr_group,
.disassociate_ucontext = mlx4_ib_disassociate_ucontext,
.drain_rq = mlx4_ib_drain_rq,
.drain_sq = mlx4_ib_drain_sq,
if (mlx4_ib_alloc_diag_counters(ibdev))
goto err_steer_free_bitmap;
- rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
if (ib_register_device(&ibdev->ib_dev, "mlx4_%d",
&dev->persist->pdev->dev))
goto err_diag_counters;
.disassociate_ucontext = mlx5_ib_disassociate_ucontext,
.drain_rq = mlx5_ib_drain_rq,
.drain_sq = mlx5_ib_drain_sq,
+ .device_group = &mlx5_attr_group,
.enable_driver = mlx5_ib_enable_driver,
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = mlx5_ib_get_dma_mr,
{
const char *name;
- rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group);
if (!mlx5_lag_is_roce(dev->mdev))
name = "mlx5_%d";
else
.destroy_cq = mthca_destroy_cq,
.destroy_qp = mthca_destroy_qp,
.detach_mcast = mthca_multicast_detach,
+ .device_group = &mthca_attr_group,
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = mthca_get_dma_mr,
.get_port_immutable = mthca_port_immutable,
mutex_init(&dev->cap_mask_mutex);
- rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group);
ret = ib_register_device(&dev->ib_dev, "mthca%d", &dev->pdev->dev);
if (ret)
return ret;
.destroy_ah = ocrdma_destroy_ah,
.destroy_cq = ocrdma_destroy_cq,
.destroy_qp = ocrdma_destroy_qp,
+ .device_group = &ocrdma_attr_group,
.get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = ocrdma_get_dma_mr,
.get_link_layer = ocrdma_link_layer,
if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R)
ib_set_device_ops(&dev->ibdev, &ocrdma_dev_srq_ops);
- rdma_set_device_sysfs_group(&dev->ibdev, &ocrdma_attr_group);
ret = ib_device_set_netdev(&dev->ibdev, dev->nic_info.netdev, 1);
if (ret)
return ret;
.destroy_cq = qedr_destroy_cq,
.destroy_qp = qedr_destroy_qp,
.destroy_srq = qedr_destroy_srq,
+ .device_group = &qedr_attr_group,
.get_dev_fw_str = qedr_get_dev_fw_str,
.get_dma_mr = qedr_get_dma_mr,
.get_link_layer = qedr_link_layer,
dev->ibdev.num_comp_vectors = dev->num_cnq;
dev->ibdev.dev.parent = &dev->pdev->dev;
- rdma_set_device_sysfs_group(&dev->ibdev, &qedr_attr_group);
ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1);
.driver_id = RDMA_DRIVER_QIB,
.port_groups = qib_attr_port_groups,
+ .device_group = &qib_attr_group,
.modify_device = qib_modify_device,
.process_mad = qib_process_mad,
};
i,
dd->rcd[ctxt]->pkeys);
}
- rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev, &qib_attr_group);
ib_set_device_ops(ibdev, &qib_dev_ops);
ret = rvt_register_device(&dd->verbs_dev.rdi);
.dereg_mr = usnic_ib_dereg_mr,
.destroy_cq = usnic_ib_destroy_cq,
.destroy_qp = usnic_ib_destroy_qp,
+ .device_group = &usnic_attr_group,
.get_dev_fw_str = usnic_get_dev_fw_str,
.get_link_layer = usnic_ib_port_link_layer,
.get_port_immutable = usnic_port_immutable,
ib_set_device_ops(&us_ibdev->ib_dev, &usnic_dev_ops);
- rdma_set_device_sysfs_group(&us_ibdev->ib_dev, &usnic_attr_group);
-
ret = ib_device_set_netdev(&us_ibdev->ib_dev, us_ibdev->netdev, 1);
if (ret)
goto err_fwd_dealloc;
.destroy_ah = pvrdma_destroy_ah,
.destroy_cq = pvrdma_destroy_cq,
.destroy_qp = pvrdma_destroy_qp,
+ .device_group = &pvrdma_attr_group,
.get_dev_fw_str = pvrdma_get_fw_ver_str,
.get_dma_mr = pvrdma_get_dma_mr,
.get_link_layer = pvrdma_port_link_layer,
if (ret)
goto err_srq_free;
spin_lock_init(&dev->srq_tbl_lock);
- rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", &dev->pdev->dev);
if (ret)
.destroy_qp = rxe_destroy_qp,
.destroy_srq = rxe_destroy_srq,
.detach_mcast = rxe_detach_mcast,
+ .device_group = &rxe_attr_group,
.enable_driver = rxe_enable_driver,
.get_dma_mr = rxe_get_dma_mr,
.get_hw_stats = rxe_ib_get_hw_stats,
}
rxe->tfm = tfm;
- rdma_set_device_sysfs_group(dev, &rxe_attr_group);
err = ib_register_device(dev, ibdev_name, NULL);
if (err)
pr_warn("%s failed with error %d\n", __func__, err);
u32 uverbs_abi_ver;
unsigned int uverbs_no_driver_id_binding:1;
+ /*
+ * NOTE: New drivers should not make use of device_group; instead new
+ * device parameter should be exposed via netlink command. This
+ * mechanism exists only for existing drivers.
+ */
+ const struct attribute_group *device_group;
const struct attribute_group **port_groups;
int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
void (*setup)(struct net_device *),
struct net_device *netdev);
-/**
- * rdma_set_device_sysfs_group - Set device attributes group to have
- * driver specific sysfs entries at
- * for infiniband class.
- *
- * @device: device pointer for which attributes to be created
- * @group: Pointer to group which should be added when device
- * is registered with sysfs.
- * rdma_set_device_sysfs_group() allows existing drivers to expose one
- * group per device to have sysfs attributes.
- *
- * NOTE: New drivers should not make use of this API; instead new device
- * parameter should be exposed via netlink command. This API and mechanism
- * exist only for existing drivers.
- */
-static inline void
-rdma_set_device_sysfs_group(struct ib_device *dev,
- const struct attribute_group *group)
-{
- dev->groups[1] = group;
-}
-
/**
* rdma_device_to_ibdev - Get ib_device pointer from device pointer
*
*
* NOTE: New drivers should not make use of this API; This API is only for
* existing drivers who have exposed sysfs entries using
- * rdma_set_device_sysfs_group().
+ * ops->device_group.
*/
#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)