unsigned int mem_block_size;
unsigned int rx_size;
unsigned int tx_seq_table;
+ bool ba_filter;
} fw;
/* interface configuration combinations */
struct sk_buff *rx_failover;
int rx_failover_missing;
+ /* FIFO for collecting outstanding BlockAckRequest */
+ struct list_head bar_list[__AR9170_NUM_TXQ];
+ spinlock_t bar_list_lock[__AR9170_NUM_TXQ];
+
#ifdef CONFIG_CARL9170_WPC
struct {
bool pbc_state;
PS_OFF_BCN = BIT(1),
};
+struct carl9170_bar_list_entry {
+ struct list_head list;
+ struct rcu_head head;
+ struct sk_buff *skb;
+};
+
struct carl9170_ba_stats {
u8 ampdu_len;
u8 ampdu_ack_len;
if (SUPP(CARL9170FW_WOL))
device_set_wakeup_enable(&ar->udev->dev, true);
+ if (SUPP(CARL9170FW_RX_BA_FILTER))
+ ar->fw.ba_filter = true;
+
if_comb_types = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT);
if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
u32 rx_filter = 0;
+ if (!ar->fw.ba_filter)
+ rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
+
if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
rx_filter |= CARL9170_RX_FILTER_BAD;
for (i = 0; i < ar->hw->queues; i++) {
skb_queue_head_init(&ar->tx_status[i]);
skb_queue_head_init(&ar->tx_pending[i]);
+
+ INIT_LIST_HEAD(&ar->bar_list[i]);
+ spin_lock_init(&ar->bar_list_lock[i]);
}
INIT_WORK(&ar->ps_work, carl9170_ps_work);
INIT_WORK(&ar->ping_work, carl9170_ping_work);
}
}
+static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
+{
+ struct ieee80211_bar *bar = (void *) data;
+ struct carl9170_bar_list_entry *entry;
+ unsigned int queue;
+
+ if (likely(!ieee80211_is_back(bar->frame_control)))
+ return;
+
+ if (len <= sizeof(*bar) + FCS_LEN)
+ return;
+
+ queue = TID_TO_WME_AC(((le16_to_cpu(bar->control) &
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT) & 7);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
+ struct sk_buff *entry_skb = entry->skb;
+ struct _carl9170_tx_superframe *super = (void *)entry_skb->data;
+ struct ieee80211_bar *entry_bar = (void *)super->frame_data;
+
+#define TID_CHECK(a, b) ( \
+ ((a) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK)) == \
+ ((b) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK))) \
+
+ if (bar->start_seq_num == entry_bar->start_seq_num &&
+ TID_CHECK(bar->control, entry_bar->control) &&
+ compare_ether_addr(bar->ra, entry_bar->ta) == 0 &&
+ compare_ether_addr(bar->ta, entry_bar->ra) == 0) {
+ struct ieee80211_tx_info *tx_info;
+
+ tx_info = IEEE80211_SKB_CB(entry_skb);
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+ spin_lock_bh(&ar->bar_list_lock[queue]);
+ list_del_rcu(&entry->list);
+ spin_unlock_bh(&ar->bar_list_lock[queue]);
+ kfree_rcu(entry, head);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+#undef TID_CHECK
+}
+
static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
{
__le16 fc;
carl9170_ps_beacon(ar, buf, mpdu_len);
+ carl9170_ba_check(ar, buf, mpdu_len);
+
skb = carl9170_rx_copy_data(buf, mpdu_len);
if (!skb)
goto drop;
rcu_read_unlock();
}
+static void carl9170_tx_bar_status(struct ar9170 *ar, struct sk_buff *skb,
+ struct ieee80211_tx_info *tx_info)
+{
+ struct _carl9170_tx_superframe *super = (void *) skb->data;
+ struct ieee80211_bar *bar = (void *) super->frame_data;
+
+ /*
+ * Unlike all other frames, the status report for BARs does
+ * not directly come from the hardware as it is incapable of
+ * matching a BA to a previously send BAR.
+ * Instead the RX-path will scan for incoming BAs and set the
+ * IEEE80211_TX_STAT_ACK if it sees one that was likely
+ * caused by a BAR from us.
+ */
+
+ if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
+ !(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
+ struct carl9170_bar_list_entry *entry;
+ int queue = skb_get_queue_mapping(skb);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
+ if (entry->skb == skb) {
+ spin_lock_bh(&ar->bar_list_lock[queue]);
+ list_del_rcu(&entry->list);
+ spin_unlock_bh(&ar->bar_list_lock[queue]);
+ kfree_rcu(entry, head);
+ goto out;
+ }
+ }
+
+ WARN(1, "bar not found in %d - ra:%pM ta:%pM c:%x ssn:%x\n",
+ queue, bar->ra, bar->ta, bar->control,
+ bar->start_seq_num);
+out:
+ rcu_read_unlock();
+ }
+}
+
void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
const bool success)
{
txinfo = IEEE80211_SKB_CB(skb);
+ carl9170_tx_bar_status(ar, skb, txinfo);
+
if (success)
txinfo->flags |= IEEE80211_TX_STAT_ACK;
else
return false;
}
+static void carl9170_bar_check(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct _carl9170_tx_superframe *super = (void *) skb->data;
+ struct ieee80211_bar *bar = (void *) super->frame_data;
+
+ if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
+ skb->len >= sizeof(struct ieee80211_bar)) {
+ struct carl9170_bar_list_entry *entry;
+ unsigned int queue = skb_get_queue_mapping(skb);
+
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!WARN_ON_ONCE(!entry)) {
+ entry->skb = skb;
+ spin_lock_bh(&ar->bar_list_lock[queue]);
+ list_add_tail_rcu(&entry->list, &ar->bar_list[queue]);
+ spin_unlock_bh(&ar->bar_list_lock[queue]);
+ }
+ }
+}
+
static void carl9170_tx(struct ar9170 *ar)
{
struct sk_buff *skb;
if (unlikely(carl9170_tx_ps_drop(ar, skb)))
continue;
+ carl9170_bar_check(ar, skb);
+
atomic_inc(&ar->tx_total_pending);
q = __carl9170_get_queue(ar, i);