#include <linux/mlx5/transobj.h>
#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
+#include <net/udp_tunnel.h>
#include <net/switchdev.h>
#include <net/xdp.h>
#include <linux/dim.h>
u16 drop_rq_q_counter;
struct notifier_block events_nb;
+ struct udp_tunnel_nic_info nic_info;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx dcbx;
#endif
int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
mlx5e_fp_preactivate preactivate);
+void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
/* ethtool helpers */
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
-void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
-void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct net_device *netdev,
netdev_features_t features);
}
#endif
-struct mlx5e_vxlan_work {
- struct work_struct work;
- struct mlx5e_priv *priv;
- u16 port;
-};
-
-static void mlx5e_vxlan_add_work(struct work_struct *work)
-{
- struct mlx5e_vxlan_work *vxlan_work =
- container_of(work, struct mlx5e_vxlan_work, work);
- struct mlx5e_priv *priv = vxlan_work->priv;
- u16 port = vxlan_work->port;
-
- mutex_lock(&priv->state_lock);
- mlx5_vxlan_add_port(priv->mdev->vxlan, port);
- mutex_unlock(&priv->state_lock);
-
- kfree(vxlan_work);
-}
-
-static void mlx5e_vxlan_del_work(struct work_struct *work)
-{
- struct mlx5e_vxlan_work *vxlan_work =
- container_of(work, struct mlx5e_vxlan_work, work);
- struct mlx5e_priv *priv = vxlan_work->priv;
- u16 port = vxlan_work->port;
-
- mutex_lock(&priv->state_lock);
- mlx5_vxlan_del_port(priv->mdev->vxlan, port);
- mutex_unlock(&priv->state_lock);
- kfree(vxlan_work);
-}
-
-static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add)
-{
- struct mlx5e_vxlan_work *vxlan_work;
-
- vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
- if (!vxlan_work)
- return;
-
- if (add)
- INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work);
- else
- INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work);
-
- vxlan_work->priv = priv;
- vxlan_work->port = port;
- queue_work(priv->wq, &vxlan_work->work);
-}
-
-void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
- return;
-
- if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
- return;
-
- mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
-}
-
-void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
-{
- struct mlx5e_priv *priv = netdev_priv(netdev);
-
- if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
- return;
-
- if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
- return;
-
- mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0);
-}
-
static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
struct sk_buff *skb,
netdev_features_t features)
.ndo_change_mtu = mlx5e_change_nic_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
- .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
- .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx5e_features_check,
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
}
}
+static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5_vxlan_add_port(priv->mdev->vxlan, ntohs(ti->port));
+}
+
+static int mlx5e_vxlan_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return mlx5_vxlan_del_port(priv->mdev->vxlan, ntohs(ti->port));
+}
+
+void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
+{
+ if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
+ return;
+
+ priv->nic_info.set_port = mlx5e_vxlan_set_port;
+ priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
+ priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+ UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
+ priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
+ /* Don't count the space hard-coded to the IANA port */
+ priv->nic_info.tables[0].n_entries =
+ mlx5_vxlan_max_udp_ports(priv->mdev) - 1;
+
+ priv->netdev->udp_tunnel_nic_info = &priv->nic_info;
+}
+
static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ mlx5e_vxlan_set_netdev_info(priv);
+
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
mlx5e_any_tunnel_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
rtnl_lock();
if (netif_running(netdev))
mlx5e_open(netdev);
- if (mlx5_vxlan_allowed(priv->mdev->vxlan))
- udp_tunnel_get_rx_info(netdev);
+ udp_tunnel_nic_reset_ntf(priv->netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
rtnl_lock();
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
- if (mlx5_vxlan_allowed(priv->mdev->vxlan))
- udp_tunnel_drop_rx_info(priv->netdev);
netif_device_detach(priv->netdev);
rtnl_unlock();
struct mlx5_core_dev *mdev;
/* max_num_ports is usuallly 4, 16 buckets is more than enough */
DECLARE_HASHTABLE(htable, 4);
- int num_ports;
struct mutex sync_lock; /* sync add/del port HW operations */
};
struct mlx5_vxlan_port {
struct hlist_node hlist;
- refcount_t refcount;
u16 udp_port;
};
-static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
-{
- return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
-}
-
static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
{
u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {};
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
{
struct mlx5_vxlan_port *vxlanp;
- int ret = 0;
-
- mutex_lock(&vxlan->sync_lock);
- vxlanp = vxlan_lookup_port(vxlan, port);
- if (vxlanp) {
- refcount_inc(&vxlanp->refcount);
- goto unlock;
- }
+ int ret;
- if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
- mlx5_core_info(vxlan->mdev,
- "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
- port, mlx5_vxlan_max_udp_ports(vxlan->mdev));
- ret = -ENOSPC;
- goto unlock;
- }
+ vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL);
+ if (!vxlanp)
+ return -ENOMEM;
+ vxlanp->udp_port = port;
ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port);
- if (ret)
- goto unlock;
-
- vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL);
- if (!vxlanp) {
- ret = -ENOMEM;
- goto err_delete_port;
+ if (ret) {
+ kfree(vxlanp);
+ return ret;
}
- vxlanp->udp_port = port;
- refcount_set(&vxlanp->refcount, 1);
-
+ mutex_lock(&vxlan->sync_lock);
hash_add_rcu(vxlan->htable, &vxlanp->hlist, port);
-
- vxlan->num_ports++;
mutex_unlock(&vxlan->sync_lock);
- return 0;
-
-err_delete_port:
- mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
-unlock:
- mutex_unlock(&vxlan->sync_lock);
- return ret;
+ return 0;
}
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
mutex_lock(&vxlan->sync_lock);
vxlanp = vxlan_lookup_port(vxlan, port);
- if (!vxlanp) {
+ if (WARN_ON(!vxlanp)) {
ret = -ENOENT;
goto out_unlock;
}
- if (refcount_dec_and_test(&vxlanp->refcount)) {
- hash_del_rcu(&vxlanp->hlist);
- synchronize_rcu();
- mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
- kfree(vxlanp);
- vxlan->num_ports--;
- }
+ hash_del_rcu(&vxlanp->hlist);
+ synchronize_rcu();
+ mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
+ kfree(vxlanp);
out_unlock:
mutex_unlock(&vxlan->sync_lock);