* @skb: the OGM to check
* @hard_iface: the interface to use to send the OGM
*
- * Caller needs to hold the hard_iface->bat_v.aggr_list_lock.
+ * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
*
* Return: True, if the given OGMv2 packet still fits, false otherwise.
*/
BATADV_MAX_AGGREGATION_BYTES);
unsigned int ogm_len = batadv_v_ogm_len(skb);
- lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock);
+ lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
return hard_iface->bat_v.aggr_len + ogm_len <= max;
}
*
* Empties the OGMv2 aggregation queue and frees all the skbs it contained.
*
- * Caller needs to hold the hard_iface->bat_v.aggr_list_lock.
+ * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
*/
static void batadv_v_ogm_aggr_list_free(struct batadv_hard_iface *hard_iface)
{
- lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock);
+ lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
- skb_queue_purge(&hard_iface->bat_v.aggr_list);
+ __skb_queue_purge(&hard_iface->bat_v.aggr_list);
hard_iface->bat_v.aggr_len = 0;
}
*
* The aggregation queue is empty after this call.
*
- * Caller needs to hold the hard_iface->bat_v.aggr_list_lock.
+ * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
*/
static void batadv_v_ogm_aggr_send(struct batadv_hard_iface *hard_iface)
{
unsigned int ogm_len;
struct sk_buff *skb;
- lockdep_assert_held(&hard_iface->bat_v.aggr_list_lock);
+ lockdep_assert_held(&hard_iface->bat_v.aggr_list.lock);
if (!aggr_len)
return;
skb_reserve(skb_aggr, ETH_HLEN + NET_IP_ALIGN);
skb_reset_network_header(skb_aggr);
- while ((skb = skb_dequeue(&hard_iface->bat_v.aggr_list))) {
+ while ((skb = __skb_dequeue(&hard_iface->bat_v.aggr_list))) {
hard_iface->bat_v.aggr_len -= batadv_v_ogm_len(skb);
ogm_len = batadv_v_ogm_len(skb);
return;
}
- spin_lock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
if (!batadv_v_ogm_queue_left(skb, hard_iface))
batadv_v_ogm_aggr_send(hard_iface);
hard_iface->bat_v.aggr_len += batadv_v_ogm_len(skb);
- skb_queue_tail(&hard_iface->bat_v.aggr_list, skb);
- spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock);
+ __skb_queue_tail(&hard_iface->bat_v.aggr_list, skb);
+ spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
}
/**
batv = container_of(work, struct batadv_hard_iface_bat_v, aggr_wq.work);
hard_iface = container_of(batv, struct batadv_hard_iface, bat_v);
- spin_lock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_aggr_send(hard_iface);
- spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_start_queue_timer(hard_iface);
}
{
cancel_delayed_work_sync(&hard_iface->bat_v.aggr_wq);
- spin_lock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_lock_bh(&hard_iface->bat_v.aggr_list.lock);
batadv_v_ogm_aggr_list_free(hard_iface);
- spin_unlock_bh(&hard_iface->bat_v.aggr_list_lock);
+ spin_unlock_bh(&hard_iface->bat_v.aggr_list.lock);
}
/**