}
}
-static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
+static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
+ struct nicvf *nic)
{
- struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
- work.work);
- struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
union nic_mbx mbx = {};
int idx;
- if (!vf_work)
- return;
-
/* From the inside of VM code flow we have only 128 bits memory
* available to send message to host's PF, so send all mc addrs
* one by one, starting from flush command in case if kernel
mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
nicvf_send_msg_to_pf(nic, &mbx);
- if (vf_work->mode & BGX_XCAST_MCAST_FILTER) {
+ if (mode & BGX_XCAST_MCAST_FILTER) {
/* once enabling filtering, we need to signal to PF to add
* its' own LMAC to the filter to accept packets for it.
*/
}
/* check if we have any specific MACs to be added to PF DMAC filter */
- if (vf_work->mc) {
+ if (mc_addrs) {
/* now go through kernel list of MACs and add them one by one */
- for (idx = 0; idx < vf_work->mc->count; idx++) {
+ for (idx = 0; idx < mc_addrs->count; idx++) {
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
- mbx.xcast.data.mac = vf_work->mc->mc[idx];
+ mbx.xcast.data.mac = mc_addrs->mc[idx];
nicvf_send_msg_to_pf(nic, &mbx);
}
- kfree(vf_work->mc);
+ kfree(mc_addrs);
}
/* and finally set rx mode for PF accordingly */
mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
- mbx.xcast.data.mode = vf_work->mode;
+ mbx.xcast.data.mode = mode;
nicvf_send_msg_to_pf(nic, &mbx);
}
+static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
+{
+ struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
+ work.work);
+ struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
+ u8 mode;
+ struct xcast_addr_list *mc;
+
+ if (!vf_work)
+ return;
+
+ /* Save message data locally to prevent them from
+ * being overwritten by next ndo_set_rx_mode call().
+ */
+ spin_lock(&nic->rx_mode_wq_lock);
+ mode = vf_work->mode;
+ mc = vf_work->mc;
+ vf_work->mc = NULL;
+ spin_unlock(&nic->rx_mode_wq_lock);
+
+ __nicvf_set_rx_mode_task(mode, mc, nic);
+}
+
static void nicvf_set_rx_mode(struct net_device *netdev)
{
struct nicvf *nic = netdev_priv(netdev);
}
}
}
+ spin_lock(&nic->rx_mode_wq_lock);
+ kfree(nic->rx_mode_work.mc);
nic->rx_mode_work.mc = mc_list;
nic->rx_mode_work.mode = mode;
- queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 2 * HZ);
+ queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0);
+ spin_unlock(&nic->rx_mode_wq_lock);
}
static const struct net_device_ops nicvf_netdev_ops = {
INIT_WORK(&nic->reset_task, nicvf_reset_task);
INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
+ spin_lock_init(&nic->rx_mode_wq_lock);
err = register_netdev(netdev);
if (err) {