INIT_LIST_HEAD(&dev->wcid_list);
INIT_LIST_HEAD(&dev->txwi_cache);
+ dev->token_size = dev->drv->token_size;
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
skb_queue_head_init(&dev->rx_skb[i]);
spinlock_t token_lock;
struct idr token;
- int token_count;
+ u16 token_count;
+ u16 token_size;
wait_queue_head_t tx_wait;
/* spinclock used to protect wcid pktid linked list */
int token;
spin_lock_bh(&dev->token_lock);
- token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
- GFP_ATOMIC);
+ token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
spin_unlock_bh(&dev->token_lock);
return token;
spin_lock_bh(&dev->token_lock);
- token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
- GFP_ATOMIC);
+ token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
if (token >= 0)
dev->token_count++;
- if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR)
+ if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR)
__mt76_set_tx_blocked(dev, true);
spin_unlock_bh(&dev->token_lock);
if (txwi)
dev->token_count--;
- if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR &&
+ if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR &&
dev->phy.q_tx[0]->blocked)
*wake = true;