bool wd_state;
int rc;
+ set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
if (rc) {
dev_err(&pdev->dev, "Can not initialize device\n");
goto err_device_destroy;
}
+ clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
+ /* Make sure we don't have a race with AENQ Links state handler */
+ if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
+ netif_carrier_on(adapter->netdev);
+
rc = ena_enable_msix_and_set_admin_interrupts(adapter,
adapter->num_queues);
if (rc) {
ena_com_admin_destroy(ena_dev);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
-
+ clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
dev_err(&pdev->dev,
"Reset attempt failed. Can not reset the device\n");
if (status) {
netdev_dbg(adapter->netdev, "%s\n", __func__);
set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
- netif_carrier_on(adapter->netdev);
+ if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
+ netif_carrier_on(adapter->netdev);
} else {
clear_bit(ENA_FLAG_LINK_UP, &adapter->flags);
netif_carrier_off(adapter->netdev);
ENA_FLAG_DEV_UP,
ENA_FLAG_LINK_UP,
ENA_FLAG_MSIX_ENABLED,
- ENA_FLAG_TRIGGER_RESET
+ ENA_FLAG_TRIGGER_RESET,
+ ENA_FLAG_ONGOING_RESET
};
/* adapter specific private data structure */
if (!mlxsw_sp->ports)
return -ENOMEM;
- mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
+ mlxsw_sp->port_to_module = kmalloc_array(max_ports, sizeof(int),
+ GFP_KERNEL);
if (!mlxsw_sp->port_to_module) {
err = -ENOMEM;
goto err_port_to_module_alloc;
}
for (i = 1; i < max_ports; i++) {
+ /* Mark as invalid */
+ mlxsw_sp->port_to_module[i] = -1;
+
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
&width, &lane);
if (err)
for (i = 0; i < count; i++) {
local_port = base_port + i * 2;
+ if (mlxsw_sp->port_to_module[local_port] < 0)
+ continue;
module = mlxsw_sp->port_to_module[local_port];
mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
const struct mlxsw_bus_info *bus_info;
unsigned char base_mac[ETH_ALEN];
struct mlxsw_sp_upper *lags;
- u8 *port_to_module;
+ int *port_to_module;
struct mlxsw_sp_sb *sb;
struct mlxsw_sp_bridge *bridge;
struct mlxsw_sp_router *router;
vxge_debug_init(VXGE_ERR,
"vxge_hw_vpath_reset failed for"
"vpath:%d", vp_id);
- return status;
+ return status;
}
} else
return VXGE_HW_FAIL;
* for all VPATHs. The h/w only uses the lowest numbered VPATH
* when steering frames.
*/
- for (index = 0; index < vdev->no_of_vpath; index++) {
+ for (index = 0; index < vdev->no_of_vpath; index++) {
status = vxge_hw_vpath_rts_rth_set(
vdev->vpaths[index].handle,
vdev->config.rth_algorithm,
&hash_types,
vdev->config.rth_bkt_sz);
- if (status != VXGE_HW_OK) {
+ if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"RTH configuration failed for vpath:%d",
vdev->vpaths[index].device_id);
return status;
- }
- }
+ }
+ }
return status;
}
vxge_debug_init(VXGE_ERR,
"vxge_hw_vpath_reset failed for "
"vpath:%d", i);
- return status;
+ return status;
}
}
}
switch (msix_idx) {
case 0:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
+ "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
vdev->ndev->name,
vdev->entries[intr_cnt].entry,
pci_fun, vp_idx);
ret = request_irq(
- vdev->entries[intr_cnt].vector,
+ vdev->entries[intr_cnt].vector,
vxge_tx_msix_handle, 0,
vdev->desc[intr_cnt],
&vdev->vpaths[vp_idx].fifo);
- vdev->vxge_entries[intr_cnt].arg =
+ vdev->vxge_entries[intr_cnt].arg =
&vdev->vpaths[vp_idx].fifo;
irq_req = 1;
break;
case 1:
snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
- "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
+ "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
vdev->ndev->name,
vdev->entries[intr_cnt].entry,
pci_fun, vp_idx);
ret = request_irq(
- vdev->entries[intr_cnt].vector,
- vxge_rx_msix_napi_handle,
- 0,
+ vdev->entries[intr_cnt].vector,
+ vxge_rx_msix_napi_handle, 0,
vdev->desc[intr_cnt],
&vdev->vpaths[vp_idx].ring);
- vdev->vxge_entries[intr_cnt].arg =
+ vdev->vxge_entries[intr_cnt].arg =
&vdev->vpaths[vp_idx].ring;
irq_req = 1;
break;
vxge_rem_msix_isr(vdev);
vdev->config.intr_type = INTA;
vxge_debug_init(VXGE_ERR,
- "%s: Defaulting to INTA"
- , vdev->ndev->name);
- goto INTA_MODE;
+ "%s: Defaulting to INTA",
+ vdev->ndev->name);
+ goto INTA_MODE;
}
if (irq_req) {
if (status != VXGE_HW_OK) {
vxge_debug_init(VXGE_ERR,
"Failed to initialize device (%d)", status);
- ret = -EINVAL;
- goto _exit3;
+ ret = -EINVAL;
+ goto _exit3;
}
if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure.
+ * @egress: NFP netdev is the egress.
*
* Adds a new flow to the repeated hash structure and action payload.
*
rtl_writephy(tp, 0x1f, 0x0000);
/* EEE setting */
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
+ rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x8b85);
- rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000);
+ rtl_w0w1_phy(tp, 0x06, 0x2000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0004);
rtl_writephy(tp, 0x1f, 0x0007);
rtl_writephy(tp, 0x1e, 0x0020);
- rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100);
+ rtl_w0w1_phy(tp, 0x15, 0x0100, 0x0000);
rtl_writephy(tp, 0x1f, 0x0002);
rtl_writephy(tp, 0x1f, 0x0000);
rtl_writephy(tp, 0x0d, 0x0007);
rtl_writephy(tp, 0x0e, 0x003c);
rtl_writephy(tp, 0x0d, 0x4007);
- rtl_writephy(tp, 0x0e, 0x0000);
+ rtl_writephy(tp, 0x0e, 0x0006);
rtl_writephy(tp, 0x0d, 0x0000);
/* Green feature */
rtl_writephy(tp, 0x1f, 0x0003);
- rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001);
- rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400);
+ rtl_w0w1_phy(tp, 0x19, 0x0001, 0x0000);
+ rtl_w0w1_phy(tp, 0x10, 0x0400, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
+ /* soft-reset phy */
+ rtl_writephy(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
if (err)
goto err_redirect;
+ rcu_read_unlock();
return NULL;
case XDP_TX:
xdp_xmit = true;
if (xdp_xmit) {
skb->dev = tun->dev;
generic_xdp_tx(skb, xdp_prog);
- rcu_read_lock();
+ rcu_read_unlock();
return NULL;
}
static int ipheth_carrier_set(struct ipheth_device *dev)
{
- struct usb_device *udev = dev->udev;
+ struct usb_device *udev;
int retval;
+
if (!dev)
return 0;
if (!dev->confirmed_pairing)
return 0;
+
+ udev = dev->udev;
retval = usb_control_msg(udev,
usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
IPHETH_CMD_CARRIER_CHECK, /* request */
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+ {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
{
struct hwsim_new_radio_params param = { 0 };
const char *hwname = NULL;
+ int ret;
param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
param.regd = hwsim_world_regdom_custom[idx];
}
- return mac80211_hwsim_new_radio(info, ¶m);
+ ret = mac80211_hwsim_new_radio(info, ¶m);
+ kfree(hwname);
+ return ret;
}
static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
void tcp_send_loss_probe(struct sock *sk);
-bool tcp_schedule_loss_probe(struct sock *sk);
+bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
void tcp_skb_collapse_tstamp(struct sk_buff *skb,
const struct sk_buff *next_skb);
/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
static void tcp_set_xmit_timer(struct sock *sk)
{
- if (!tcp_schedule_loss_probe(sk))
+ if (!tcp_schedule_loss_probe(sk, true))
tcp_rearm_rto(sk);
}
/* Send one loss probe per tail loss episode. */
if (push_one != 2)
- tcp_schedule_loss_probe(sk);
+ tcp_schedule_loss_probe(sk, false);
is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
tcp_cwnd_validate(sk, is_cwnd_limited);
return false;
return !tp->packets_out && !tcp_write_queue_empty(sk);
}
-bool tcp_schedule_loss_probe(struct sock *sk)
+bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
}
/* If the RTO formula yields an earlier time, then use that time. */
- rto_delta_us = tcp_rto_delta_us(sk); /* How far in future is RTO? */
+ rto_delta_us = advancing_rto ?
+ jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
+ tcp_rto_delta_us(sk); /* How far in future is RTO? */
if (rto_delta_us > 0)
timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
&ipv6h->saddr, &ipv6h->daddr, tpi->key,
tpi->proto);
if (tunnel) {
- ip6_tnl_rcv(tunnel, skb, tpi, NULL, false);
+ ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
return PACKET_RCVD;
}
* After accepting the AddBA Request we activated a timer,
* resetting it after each frame that arrives from the originator.
*/
-static void sta_rx_agg_session_timer_expired(unsigned long data)
+static void sta_rx_agg_session_timer_expired(struct timer_list *t)
{
- /* not an elegant detour, but there is no choice as the timer passes
- * only one argument, and various sta_info are needed here, so init
- * flow in sta_info_create gives the TID as data, while the timer_to_id
- * array gives the sta through container_of */
- u8 *ptid = (u8 *)data;
- u8 *timer_to_id = ptid - *ptid;
- struct sta_info *sta = container_of(timer_to_id, struct sta_info,
- timer_to_tid[0]);
+ struct tid_ampdu_rx *tid_rx_timer =
+ from_timer(tid_rx_timer, t, session_timer);
+ struct sta_info *sta = tid_rx_timer->sta;
+ u8 tid = tid_rx_timer->tid;
struct tid_ampdu_rx *tid_rx;
unsigned long timeout;
rcu_read_lock();
- tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]);
+ tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
if (!tid_rx) {
rcu_read_unlock();
return;
rcu_read_unlock();
ht_dbg(sta->sdata, "RX session timer expired on %pM tid %d\n",
- sta->sta.addr, (u16)*ptid);
+ sta->sta.addr, tid);
- set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired);
+ set_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired);
ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
}
-static void sta_rx_agg_reorder_timer_expired(unsigned long data)
+static void sta_rx_agg_reorder_timer_expired(struct timer_list *t)
{
- u8 *ptid = (u8 *)data;
- u8 *timer_to_id = ptid - *ptid;
- struct sta_info *sta = container_of(timer_to_id, struct sta_info,
- timer_to_tid[0]);
+ struct tid_ampdu_rx *tid_rx = from_timer(tid_rx, t, reorder_timer);
rcu_read_lock();
- ieee80211_release_reorder_timeout(sta, *ptid);
+ ieee80211_release_reorder_timeout(tid_rx->sta, tid_rx->tid);
rcu_read_unlock();
}
spin_lock_init(&tid_agg_rx->reorder_lock);
/* rx timer */
- setup_deferrable_timer(&tid_agg_rx->session_timer,
- sta_rx_agg_session_timer_expired,
- (unsigned long)&sta->timer_to_tid[tid]);
+ timer_setup(&tid_agg_rx->session_timer,
+ sta_rx_agg_session_timer_expired, TIMER_DEFERRABLE);
/* rx reorder timer */
- setup_timer(&tid_agg_rx->reorder_timer,
- sta_rx_agg_reorder_timer_expired,
- (unsigned long)&sta->timer_to_tid[tid]);
+ timer_setup(&tid_agg_rx->reorder_timer,
+ sta_rx_agg_reorder_timer_expired, 0);
/* prepare reordering buffer */
tid_agg_rx->reorder_buf =
tid_agg_rx->auto_seq = auto_seq;
tid_agg_rx->started = false;
tid_agg_rx->reorder_buf_filtered = 0;
+ tid_agg_rx->tid = tid;
+ tid_agg_rx->sta = sta;
status = WLAN_STATUS_SUCCESS;
/* activate it for RX */
spin_lock_bh(&sta->lock);
+ /* free struct pending for start, if present */
+ tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
+ kfree(tid_tx);
+ sta->ampdu_mlme.tid_start_tx[tid] = NULL;
+
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
if (!tid_tx) {
spin_unlock_bh(&sta->lock);
* add Block Ack response will arrive from the recipient.
* If this timer expires sta_addba_resp_timer_expired will be executed.
*/
-static void sta_addba_resp_timer_expired(unsigned long data)
+static void sta_addba_resp_timer_expired(struct timer_list *t)
{
- /* not an elegant detour, but there is no choice as the timer passes
- * only one argument, and both sta_info and TID are needed, so init
- * flow in sta_info_create gives the TID as data, while the timer_to_id
- * array gives the sta through container_of */
- u16 tid = *(u8 *)data;
- struct sta_info *sta = container_of((void *)data,
- struct sta_info, timer_to_tid[tid]);
+ struct tid_ampdu_tx *tid_tx_timer =
+ from_timer(tid_tx_timer, t, addba_resp_timer);
+ struct sta_info *sta = tid_tx_timer->sta;
+ u8 tid = tid_tx_timer->tid;
struct tid_ampdu_tx *tid_tx;
/* check if the TID waits for addBA response */
* After accepting the AddBA Response we activated a timer,
* resetting it after each frame that we send.
*/
-static void sta_tx_agg_session_timer_expired(unsigned long data)
+static void sta_tx_agg_session_timer_expired(struct timer_list *t)
{
- /* not an elegant detour, but there is no choice as the timer passes
- * only one argument, and various sta_info are needed here, so init
- * flow in sta_info_create gives the TID as data, while the timer_to_id
- * array gives the sta through container_of */
- u8 *ptid = (u8 *)data;
- u8 *timer_to_id = ptid - *ptid;
- struct sta_info *sta = container_of(timer_to_id, struct sta_info,
- timer_to_tid[0]);
+ struct tid_ampdu_tx *tid_tx_timer =
+ from_timer(tid_tx_timer, t, session_timer);
+ struct sta_info *sta = tid_tx_timer->sta;
+ u8 tid = tid_tx_timer->tid;
struct tid_ampdu_tx *tid_tx;
unsigned long timeout;
rcu_read_lock();
- tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]);
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
rcu_read_unlock();
return;
rcu_read_unlock();
ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
- sta->sta.addr, (u16)*ptid);
+ sta->sta.addr, tid);
- ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
+ ieee80211_stop_tx_ba_session(&sta->sta, tid);
}
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
__set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
tid_tx->timeout = timeout;
+ tid_tx->sta = sta;
+ tid_tx->tid = tid;
/* response timer */
- setup_timer(&tid_tx->addba_resp_timer,
- sta_addba_resp_timer_expired,
- (unsigned long)&sta->timer_to_tid[tid]);
+ timer_setup(&tid_tx->addba_resp_timer, sta_addba_resp_timer_expired, 0);
/* tx timer */
- setup_deferrable_timer(&tid_tx->session_timer,
- sta_tx_agg_session_timer_expired,
- (unsigned long)&sta->timer_to_tid[tid]);
+ timer_setup(&tid_tx->session_timer,
+ sta_tx_agg_session_timer_expired, TIMER_DEFERRABLE);
/* assign a dialog token */
sta->ampdu_mlme.dialog_token_allocator++;
sdata_unlock(sdata);
}
-static void ieee80211_ibss_timer(unsigned long data)
+static void ieee80211_ibss_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
- (struct ieee80211_sub_if_data *) data;
+ from_timer(sdata, t, u.ibss.timer);
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
}
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
- setup_timer(&ifibss->timer, ieee80211_ibss_timer,
- (unsigned long) sdata);
+ timer_setup(&ifibss->timer, ieee80211_ibss_timer, 0);
INIT_LIST_HEAD(&ifibss->incomplete_stations);
spin_lock_init(&ifibss->incomplete_lock);
INIT_WORK(&ifibss->csa_connection_drop_work,
const struct ieee80211_tpt_blink *blink_table;
unsigned int blink_table_len;
struct timer_list timer;
+ struct ieee80211_local *local;
unsigned long prev_traffic;
unsigned long tx_bytes, rx_bytes;
unsigned int active, want;
void ieee80211_dynamic_ps_enable_work(struct work_struct *work);
void ieee80211_dynamic_ps_disable_work(struct work_struct *work);
-void ieee80211_dynamic_ps_timer(unsigned long data);
+void ieee80211_dynamic_ps_timer(struct timer_list *t);
void ieee80211_send_nullfunc(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
bool powersave);
return DIV_ROUND_UP(delta, 1024 / 8);
}
-static void tpt_trig_timer(unsigned long data)
+static void tpt_trig_timer(struct timer_list *t)
{
- struct ieee80211_local *local = (void *)data;
- struct tpt_led_trigger *tpt_trig = local->tpt_led_trigger;
+ struct tpt_led_trigger *tpt_trig = from_timer(tpt_trig, t, timer);
+ struct ieee80211_local *local = tpt_trig->local;
struct led_classdev *led_cdev;
unsigned long on, off, tpt;
int i;
tpt_trig->blink_table = blink_table;
tpt_trig->blink_table_len = blink_table_len;
tpt_trig->want = flags;
+ tpt_trig->local = local;
- setup_timer(&tpt_trig->timer, tpt_trig_timer, (unsigned long)local);
+ timer_setup(&tpt_trig->timer, tpt_trig_timer, 0);
local->tpt_led_trigger = tpt_trig;
tpt_trig_traffic(local, tpt_trig);
tpt_trig->running = true;
- tpt_trig_timer((unsigned long)local);
+ tpt_trig_timer(&tpt_trig->timer);
mod_timer(&tpt_trig->timer, round_jiffies(jiffies + HZ));
}
ieee80211_dynamic_ps_enable_work);
INIT_WORK(&local->dynamic_ps_disable_work,
ieee80211_dynamic_ps_disable_work);
- setup_timer(&local->dynamic_ps_timer,
- ieee80211_dynamic_ps_timer, (unsigned long) local);
+ timer_setup(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, 0);
INIT_WORK(&local->sched_scan_stopped_work,
ieee80211_sched_scan_stopped_work);
kmem_cache_destroy(rm_cache);
}
-static void ieee80211_mesh_housekeeping_timer(unsigned long data)
+static void ieee80211_mesh_housekeeping_timer(struct timer_list *t)
{
- struct ieee80211_sub_if_data *sdata = (void *) data;
+ struct ieee80211_sub_if_data *sdata =
+ from_timer(sdata, t, u.mesh.housekeeping_timer);
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
return 0;
}
-static void ieee80211_mesh_path_timer(unsigned long data)
+static void ieee80211_mesh_path_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
- (struct ieee80211_sub_if_data *) data;
+ from_timer(sdata, t, u.mesh.mesh_path_timer);
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
}
-static void ieee80211_mesh_path_root_timer(unsigned long data)
+static void ieee80211_mesh_path_root_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
- (struct ieee80211_sub_if_data *) data;
+ from_timer(sdata, t, u.mesh.mesh_path_root_timer);
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
static u8 zero_addr[ETH_ALEN] = {};
- setup_timer(&ifmsh->housekeeping_timer,
- ieee80211_mesh_housekeeping_timer,
- (unsigned long) sdata);
+ timer_setup(&ifmsh->housekeeping_timer,
+ ieee80211_mesh_housekeeping_timer, 0);
ifmsh->accepting_plinks = true;
atomic_set(&ifmsh->mpaths, 0);
mesh_pathtbl_init(sdata);
- setup_timer(&ifmsh->mesh_path_timer,
- ieee80211_mesh_path_timer,
- (unsigned long) sdata);
- setup_timer(&ifmsh->mesh_path_root_timer,
- ieee80211_mesh_path_root_timer,
- (unsigned long) sdata);
+ timer_setup(&ifmsh->mesh_path_timer, ieee80211_mesh_path_timer, 0);
+ timer_setup(&ifmsh->mesh_path_root_timer,
+ ieee80211_mesh_path_root_timer, 0);
INIT_LIST_HEAD(&ifmsh->preq_queue.list);
skb_queue_head_init(&ifmsh->ps.bc_buf);
spin_lock_init(&ifmsh->mesh_preq_queue_lock);
int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
-void mesh_path_timer(unsigned long data);
+void mesh_path_timer(struct timer_list *t);
void mesh_path_flush_by_nexthop(struct sta_info *sta);
void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
return err;
}
-void mesh_path_timer(unsigned long data)
+void mesh_path_timer(struct timer_list *t)
{
- struct mesh_path *mpath = (void *) data;
+ struct mesh_path *mpath = from_timer(mpath, t, timer);
struct ieee80211_sub_if_data *sdata = mpath->sdata;
int ret;
skb_queue_head_init(&new_mpath->frame_queue);
new_mpath->exp_time = jiffies;
spin_lock_init(&new_mpath->state_lock);
- setup_timer(&new_mpath->timer, mesh_path_timer,
- (unsigned long) new_mpath);
+ timer_setup(&new_mpath->timer, mesh_path_timer, 0);
return new_mpath;
}
}
EXPORT_SYMBOL(ieee80211_chswitch_done);
-static void ieee80211_chswitch_timer(unsigned long data)
+static void ieee80211_chswitch_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
- (struct ieee80211_sub_if_data *) data;
+ from_timer(sdata, t, u.mgd.chswitch_timer);
ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
}
}
}
-void ieee80211_dynamic_ps_timer(unsigned long data)
+void ieee80211_dynamic_ps_timer(struct timer_list *t)
{
- struct ieee80211_local *local = (void *) data;
+ struct ieee80211_local *local = from_timer(local, t, dynamic_ps_timer);
ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work);
}
sdata_unlock(sdata);
}
-static void ieee80211_sta_timer(unsigned long data)
+static void ieee80211_sta_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
- (struct ieee80211_sub_if_data *) data;
+ from_timer(sdata, t, u.mgd.timer);
ieee80211_queue_work(&sdata->local->hw, &sdata->work);
}
sdata_unlock(sdata);
}
-static void ieee80211_sta_bcn_mon_timer(unsigned long data)
+static void ieee80211_sta_bcn_mon_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
- (struct ieee80211_sub_if_data *) data;
+ from_timer(sdata, t, u.mgd.bcn_mon_timer);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn)
&sdata->u.mgd.beacon_connection_loss_work);
}
-static void ieee80211_sta_conn_mon_timer(unsigned long data)
+static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
{
struct ieee80211_sub_if_data *sdata =
- (struct ieee80211_sub_if_data *) data;
+ from_timer(sdata, t, u.mgd.conn_mon_timer);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
INIT_WORK(&ifmgd->request_smps_work, ieee80211_request_smps_mgd_work);
INIT_DELAYED_WORK(&ifmgd->tdls_peer_del_work,
ieee80211_tdls_peer_del_work);
- setup_timer(&ifmgd->timer, ieee80211_sta_timer,
- (unsigned long) sdata);
- setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer,
- (unsigned long) sdata);
- setup_timer(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer,
- (unsigned long) sdata);
- setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer,
- (unsigned long) sdata);
+ timer_setup(&ifmgd->timer, ieee80211_sta_timer, 0);
+ timer_setup(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 0);
+ timer_setup(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, 0);
+ timer_setup(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, 0);
INIT_DELAYED_WORK(&ifmgd->tx_tspec_wk,
ieee80211_sta_handle_tspec_ac_params_wk);
sdata_unlock(sdata);
}
-static void ieee80211_ocb_housekeeping_timer(unsigned long data)
+static void ieee80211_ocb_housekeeping_timer(struct timer_list *t)
{
- struct ieee80211_sub_if_data *sdata = (void *)data;
+ struct ieee80211_sub_if_data *sdata =
+ from_timer(sdata, t, u.ocb.housekeeping_timer);
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
{
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
- setup_timer(&ifocb->housekeeping_timer,
- ieee80211_ocb_housekeeping_timer,
- (unsigned long)sdata);
+ timer_setup(&ifocb->housekeeping_timer,
+ ieee80211_ocb_housekeeping_timer, 0);
INIT_LIST_HEAD(&ifocb->incomplete_stations);
spin_lock_init(&ifocb->incomplete_lock);
}
if (sta_prepare_rate_control(local, sta, gfp))
goto free_txq;
- for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
- /*
- * timer_to_tid must be initialized with identity mapping
- * to enable session_timer's data differentiation. See
- * sta_rx_agg_session_timer_expired for usage.
- */
- sta->timer_to_tid[i] = i;
- }
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
skb_queue_head_init(&sta->ps_tx_buf[i]);
skb_queue_head_init(&sta->tx_filtered[i]);
return ret;
}
-static void sta_info_cleanup(unsigned long data)
+static void sta_info_cleanup(struct timer_list *t)
{
- struct ieee80211_local *local = (struct ieee80211_local *) data;
+ struct ieee80211_local *local = from_timer(local, t, sta_cleanup);
struct sta_info *sta;
bool timer_needed = false;
mutex_init(&local->sta_mtx);
INIT_LIST_HEAD(&local->sta_list);
- setup_timer(&local->sta_cleanup, sta_info_cleanup,
- (unsigned long)local);
+ timer_setup(&local->sta_cleanup, sta_info_cleanup, 0);
return 0;
}
AGG_STOP_DESTROY_STA,
};
+struct sta_info;
+
/**
* struct tid_ampdu_tx - TID aggregation information (Tx).
*
* @session_timer: check if we keep Tx-ing on the TID (by timeout value)
* @addba_resp_timer: timer for peer's response to addba request
* @pending: pending frames queue -- use sta's spinlock to protect
+ * @sta: station we are attached to
* @dialog_token: dialog token for aggregation session
* @timeout: session timeout value to be filled in ADDBA requests
+ * @tid: TID number
* @state: session state (see above)
* @last_tx: jiffies of last tx activity
* @stop_initiator: initiator of a session stop
struct timer_list session_timer;
struct timer_list addba_resp_timer;
struct sk_buff_head pending;
+ struct sta_info *sta;
unsigned long state;
unsigned long last_tx;
u16 timeout;
u16 failed_bar_ssn;
bool bar_pending;
bool amsdu;
+ u8 tid;
};
/**
* @reorder_time: jiffies when skb was added
* @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
* @reorder_timer: releases expired frames from the reorder buffer.
+ * @sta: station we are attached to
* @last_rx: jiffies of last rx activity
* @head_seq_num: head sequence number in reordering buffer.
* @stored_mpdu_num: number of MPDUs in reordering buffer
* @ssn: Starting Sequence Number expected to be aggregated.
* @buf_size: buffer size for incoming A-MPDUs
* @timeout: reset timer value (in TUs).
+ * @tid: TID number
* @rcu_head: RCU head used for freeing this struct
* @reorder_lock: serializes access to reorder buffer, see below.
* @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
u64 reorder_buf_filtered;
struct sk_buff_head *reorder_buf;
unsigned long *reorder_time;
+ struct sta_info *sta;
struct timer_list session_timer;
struct timer_list reorder_timer;
unsigned long last_rx;
u16 ssn;
u16 buf_size;
u16 timeout;
+ u8 tid;
u8 auto_seq:1,
removed:1,
started:1;
* plus one for non-QoS frames)
* @tid_seq: per-TID sequence numbers for sending to this STA
* @ampdu_mlme: A-MPDU state machine state
- * @timer_to_tid: identity mapping to ID timers
* @mesh: mesh STA information
* @debugfs_dir: debug filesystem directory dentry
* @dead: set to true when sta is unlinked
* Aggregation information, locked with lock.
*/
struct sta_ampdu_mlme ampdu_mlme;
- u8 timer_to_tid[IEEE80211_NUM_TIDS];
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *debugfs_dir;
tipc_group_proto_xmit(grp, m, GRP_ACK_MSG, xmitq);
if (leave) {
- tipc_group_delete_member(grp, m);
__skb_queue_purge(defq);
+ tipc_group_delete_member(grp, m);
break;
}
if (!update)
goto nla_put_failure;
}
- if (wdev->ssid_len) {
- if (nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
+ wdev_lock(wdev);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ if (wdev->ssid_len &&
+ nla_put(msg, NL80211_ATTR_SSID, wdev->ssid_len, wdev->ssid))
goto nla_put_failure;
+ break;
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_ADHOC: {
+ const u8 *ssid_ie;
+ if (!wdev->current_bss)
+ break;
+ ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub,
+ WLAN_EID_SSID);
+ if (!ssid_ie)
+ break;
+ if (nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
+ goto nla_put_failure;
+ break;
+ }
+ default:
+ /* nothing */
+ break;
}
+ wdev_unlock(wdev);
genlmsg_end(msg, hdr);
return 0;
}
}
-int __init regulatory_init(void)
+static int __init regulatory_init_db(void)
{
- int err = 0;
+ int err;
err = load_builtin_regdb_keys();
if (err)
return err;
- reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0);
- if (IS_ERR(reg_pdev))
- return PTR_ERR(reg_pdev);
-
- spin_lock_init(®_requests_lock);
- spin_lock_init(®_pending_beacons_lock);
- spin_lock_init(®_indoor_lock);
-
- rcu_assign_pointer(cfg80211_regdomain, cfg80211_world_regdom);
-
- user_alpha2[0] = '9';
- user_alpha2[1] = '7';
-
/* We always try to get an update for the static regdomain */
err = regulatory_hint_core(cfg80211_world_regdom->alpha2);
if (err) {
return 0;
}
+#ifndef MODULE
+late_initcall(regulatory_init_db);
+#endif
+
+int __init regulatory_init(void)
+{
+ reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0);
+ if (IS_ERR(reg_pdev))
+ return PTR_ERR(reg_pdev);
+
+ spin_lock_init(®_requests_lock);
+ spin_lock_init(®_pending_beacons_lock);
+ spin_lock_init(®_indoor_lock);
+
+ rcu_assign_pointer(cfg80211_regdomain, cfg80211_world_regdom);
+
+ user_alpha2[0] = '9';
+ user_alpha2[1] = '7';
+
+#ifdef MODULE
+ return regulatory_init_db();
+#else
+ return 0;
+#endif
+}
void regulatory_exit(void)
{