Resizing fq hash table allocates memory while holding qdisc spinlock,
with BH disabled.
This is definitely not good, as allocation might sleep.
We can drop the lock and get it when needed, we hold RTNL so no other
changes can happen at the same time.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Fixes:
afe4fd062416 ("pkt_sched: fq: Fair Queue packet scheduler")
Signed-off-by: David S. Miller <davem@davemloft.net>
{
struct fq_sched_data *q = qdisc_priv(sch);
struct rb_root *array;
{
struct fq_sched_data *q = qdisc_priv(sch);
struct rb_root *array;
u32 idx;
if (q->fq_root && log == q->fq_trees_log)
u32 idx;
if (q->fq_root && log == q->fq_trees_log)
for (idx = 0; idx < (1U << log); idx++)
array[idx] = RB_ROOT;
for (idx = 0; idx < (1U << log); idx++)
array[idx] = RB_ROOT;
- if (q->fq_root) {
- fq_rehash(q, q->fq_root, q->fq_trees_log, array, log);
- fq_free(q->fq_root);
- }
+ sch_tree_lock(sch);
+
+ old_fq_root = q->fq_root;
+ if (old_fq_root)
+ fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
+
q->fq_root = array;
q->fq_trees_log = log;
q->fq_root = array;
q->fq_trees_log = log;
+ sch_tree_unlock(sch);
+
+ fq_free(old_fq_root);
+
q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
}
q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
}
+ if (!err) {
+ sch_tree_unlock(sch);
err = fq_resize(sch, fq_log);
err = fq_resize(sch, fq_log);
+ sch_tree_lock(sch);
+ }
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_dequeue(sch);
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_dequeue(sch);