u8 netdev_flags;
struct dentry *hnae3_dbgfs;
+
+ /* Network interface message level enabled bits */
+ u32 msg_enable;
};
#define hnae3_set_field(origin, mask, shift, val) \
static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
static struct hnae3_client client;
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, " Network interface message level setting");
+
+#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
/* hns3_pci_tbl - PCI Device ID Table
*
* Last entry must be all 0s
handle->ae_algo->ops->client_stop(handle);
}
+static void hns3_info_show(struct hns3_nic_priv *priv)
+{
+ struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+
+ dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
+ dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
+ dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
+ dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
+ dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
+ dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
+ dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
+ dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
+ dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
priv->tx_timeout_count = 0;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+ handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
+
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+ if (netif_msg_drv(handle))
+ hns3_info_show(priv);
+
return ret;
out_client_start:
if (linkup) {
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
- netdev_info(netdev, "link up\n");
+ if (netif_msg_link(handle))
+ netdev_info(netdev, "link up\n");
} else {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
- netdev_info(netdev, "link down\n");
+ if (netif_msg_link(handle))
+ netdev_info(netdev, "link down\n");
}
}
return h->ae_algo->ops->set_led_id(h, state);
}
+static u32 hns3_get_msglevel(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ return h->msg_enable;
+}
+
+static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ h->msg_enable = msg_level;
+}
+
static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam,
.get_regs_len = hns3_get_regs_len,
.get_regs = hns3_get_regs,
.get_link = hns3_get_link,
+ .get_msglevel = hns3_get_msglevel,
+ .set_msglevel = hns3_set_msglevel,
};
static const struct ethtool_ops hns3_ethtool_ops = {
.get_regs_len = hns3_get_regs_len,
.get_regs = hns3_get_regs,
.set_phys_id = hns3_set_phys_id,
+ .get_msglevel = hns3_get_msglevel,
+ .set_msglevel = hns3_set_msglevel,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
*tp_mdix = ETH_TP_MDI;
}
+static void hclge_info_show(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ dev_info(dev, "PF info begin:\n");
+
+ dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
+ dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
+ dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
+ dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
+ dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
+ dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
+ dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
+ dev_info(dev, "This is %s PF\n",
+ hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
+ dev_info(dev, "DCB %s\n",
+ hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
+ dev_info(dev, "MQPRIO %s\n",
+ hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
+
+ dev_info(dev, "PF info end.\n");
+}
+
static int hclge_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
hnae3_set_client_init_flag(client, ae_dev, 1);
+ if (netif_msg_drv(&hdev->vport->nic))
+ hclge_info_show(hdev);
+
if (hdev->roce_client &&
hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
hclgevf_free_vector(hdev, 0);
}
+static void hclgevf_info_show(struct hclgevf_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ dev_info(dev, "VF info begin:\n");
+
+ dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
+ dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
+ dev_info(dev, "PF media type of this VF: %d\n",
+ hdev->hw.mac.media_type);
+
+ dev_info(dev, "VF info end.\n");
+}
+
static int hclgevf_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
hnae3_set_client_init_flag(client, ae_dev, 1);
+ if (netif_msg_drv(&hdev->nic))
+ hclgevf_info_show(hdev);
+
if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;