bss_prio->priv = priv;
INIT_LIST_HEAD(&bss_prio->list);
- if (!tbl[priv->bss_priority].bss_prio_cur)
- tbl[priv->bss_priority].bss_prio_cur = bss_prio;
spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
list_add_tail(&bss_prio->list, &tbl[priv->bss_priority].bss_prio_head);
for (i = 0; i < adapter->priv_num; ++i) {
INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head);
- adapter->bss_prio_tbl[i].bss_prio_cur = NULL;
spin_lock_init(&adapter->bss_prio_tbl[i].bss_prio_lock);
}
{
int i;
struct mwifiex_adapter *adapter = priv->adapter;
- struct mwifiex_bss_prio_node *bssprio_node, *tmp_node, **cur;
+ struct mwifiex_bss_prio_node *bssprio_node, *tmp_node;
struct list_head *head;
spinlock_t *lock; /* bss priority lock */
unsigned long flags;
for (i = 0; i < adapter->priv_num; ++i) {
head = &adapter->bss_prio_tbl[i].bss_prio_head;
- cur = &adapter->bss_prio_tbl[i].bss_prio_cur;
lock = &adapter->bss_prio_tbl[i].bss_prio_lock;
dev_dbg(adapter->dev, "info: delete BSS priority table,"
" bss_type = %d, bss_num = %d, i = %d,"
- " head = %p, cur = %p\n",
- priv->bss_type, priv->bss_num, i, head, *cur);
- if (*cur) {
+ " head = %p\n",
+ priv->bss_type, priv->bss_num, i, head);
+
+ {
spin_lock_irqsave(lock, flags);
if (list_empty(head)) {
spin_unlock_irqrestore(lock, flags);
continue;
}
- bssprio_node = list_first_entry(head,
- struct mwifiex_bss_prio_node, list);
- spin_unlock_irqrestore(lock, flags);
-
list_for_each_entry_safe(bssprio_node, tmp_node, head,
list) {
if (bssprio_node->priv == priv) {
dev_dbg(adapter->dev, "info: Delete "
"node %p, next = %p\n",
bssprio_node, tmp_node);
- spin_lock_irqsave(lock, flags);
list_del(&bssprio_node->list);
- spin_unlock_irqrestore(lock, flags);
kfree(bssprio_node);
}
}
- *cur = (struct mwifiex_bss_prio_node *)head;
+ spin_unlock_irqrestore(lock, flags);
}
}
}
{
struct mwifiex_private *priv_tmp;
struct mwifiex_ra_list_tbl *ptr;
- struct mwifiex_bss_prio_node *bssprio_node, *bssprio_head;
struct mwifiex_tid_tbl *tid_ptr;
atomic_t *hqp;
unsigned long flags_bss, flags_ra;
int i, j;
+ /* check the BSS with highest priority first */
for (j = adapter->priv_num - 1; j >= 0; --j) {
spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock,
flags_bss);
- if (list_empty(&adapter->bss_prio_tbl[j].bss_prio_head))
- goto skip_prio_tbl;
-
- if (adapter->bss_prio_tbl[j].bss_prio_cur ==
- (struct mwifiex_bss_prio_node *)
- &adapter->bss_prio_tbl[j].bss_prio_head) {
- adapter->bss_prio_tbl[j].bss_prio_cur =
- list_first_entry(&adapter->bss_prio_tbl[j]
- .bss_prio_head,
- struct mwifiex_bss_prio_node,
- list);
- }
-
- bssprio_node = adapter->bss_prio_tbl[j].bss_prio_cur;
- bssprio_head = bssprio_node;
+ /* iterate over BSS with the equal priority */
+ list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
+ &adapter->bss_prio_tbl[j].bss_prio_head,
+ list) {
- do {
- priv_tmp = bssprio_node->priv;
+ priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)
- goto skip_bss;
+ continue;
/* iterate over the WMM queues of the BSS */
hqp = &priv_tmp->wmm.highest_queued_prio;
ra_list_spinlock,
flags_ra);
}
+ }
-skip_bss:
- /* Get next bss priority node */
- bssprio_node = list_first_entry(&bssprio_node->list,
- struct mwifiex_bss_prio_node,
- list);
-
- if (bssprio_node ==
- (struct mwifiex_bss_prio_node *)
- &adapter->bss_prio_tbl[j].bss_prio_head)
- /* Get next bss priority node */
- bssprio_node = list_first_entry(
- &bssprio_node->list,
- struct mwifiex_bss_prio_node,
- list);
- } while (bssprio_node != bssprio_head);
-
-skip_prio_tbl:
spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
flags_bss);
}
return ptr;
}
-/* This functions rotates ra lists so packets are picked in round robin
- * fashion.
+/* This functions rotates ra and bss lists so packets are picked round robin.
*
* After a packet is successfully transmitted, rotate the ra list, so the ra
* next to the one transmitted, will come first in the list. This way we pick
- * the ra in a round robin fashion.
+ * the ra' in a round robin fashion. Same applies to bss nodes of equal
+ * priority.
*
* Function also increments wmm.packets_out counter.
*/
struct mwifiex_ra_list_tbl *ra,
int tid)
{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
unsigned long flags;
+ spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags);
+ /*
+ * dirty trick: we remove 'head' temporarily and reinsert it after
+ * curr bss node. imagine list to stay fixed while head is moved
+ */
+ list_move(&tbl[priv->bss_priority].bss_prio_head,
+ &tbl[priv->bss_priority].bss_prio_cur->list);
+ spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags);
+
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
if (mwifiex_is_ralist_valid(priv, ra, tid)) {
priv->wmm.packets_out[tid]++;
- /*
- * dirty trick: we remove 'head' temporarily and reinsert it
- * after curr bss node. imagine list to stay fixed while only
- * head is moved
- */
+ /* same as above */
list_move(&tid_ptr->ra_list, &ra->list);
}
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
ra_list_flags);
} else {
mwifiex_rotate_priolists(priv, ptr, ptr_index);
- adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
- list_first_entry(
- &adapter->bss_prio_tbl[priv->bss_priority]
- .bss_prio_cur->list,
- struct mwifiex_bss_prio_node,
- list);
atomic_dec(&priv->wmm.tx_pkts_queued);
}
}
}
if (ret != -EBUSY) {
mwifiex_rotate_priolists(priv, ptr, ptr_index);
- adapter->bss_prio_tbl[priv->bss_priority].bss_prio_cur =
- list_first_entry(
- &adapter->bss_prio_tbl[priv->bss_priority]
- .bss_prio_cur->list,
- struct mwifiex_bss_prio_node,
- list);
atomic_dec(&priv->wmm.tx_pkts_queued);
}
}