From: Vlad Buslov Date: Mon, 21 May 2018 20:03:04 +0000 (+0300) Subject: net: sched: don't disable bh when accessing action idr X-Git-Tag: v5.15~8692^2~175 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=290aa0ad74c995c60d94fb4f1d66d411efa13dd5;p=platform%2Fkernel%2Flinux-starfive.git net: sched: don't disable bh when accessing action idr Initial net_device implementation used ingress_lock spinlock to synchronize ingress path of device. This lock was used in both process and bh context. In some code paths action map lock was obtained while holding ingress_lock. Commit e1e992e52faa ("[NET_SCHED] protect action config/dump from irqs") modified actions to always disable bh, while using action map lock, in order to prevent deadlock on ingress_lock in softirq. This lock was removed from net_device, so disabling bh, while accessing action map, is no longer necessary. Replace all action idr spinlock usage with regular calls that do not disable bh. Signed-off-by: Vlad Buslov Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 72251241..3f4cf93 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -77,9 +77,9 @@ static void free_tcf(struct tc_action *p) static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p) { - spin_lock_bh(&idrinfo->lock); + spin_lock(&idrinfo->lock); idr_remove(&idrinfo->action_idr, p->tcfa_index); - spin_unlock_bh(&idrinfo->lock); + spin_unlock(&idrinfo->lock); gen_kill_estimator(&p->tcfa_rate_est); free_tcf(p); } @@ -156,7 +156,7 @@ static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, struct tc_action *p; unsigned long id = 1; - spin_lock_bh(&idrinfo->lock); + spin_lock(&idrinfo->lock); s_i = cb->args[0]; @@ -191,7 +191,7 @@ done: if (index >= 0) cb->args[0] = index + 1; - spin_unlock_bh(&idrinfo->lock); + spin_unlock(&idrinfo->lock); if (n_i) { if (act_flags & TCA_FLAG_LARGE_DUMP_ON) cb->args[1] = n_i; @@ -261,9 +261,9 @@ static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo) { struct tc_action *p = NULL; - spin_lock_bh(&idrinfo->lock); + spin_lock(&idrinfo->lock); p = idr_find(&idrinfo->action_idr, index); - spin_unlock_bh(&idrinfo->lock); + spin_unlock(&idrinfo->lock); return p; } @@ -323,7 +323,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, } spin_lock_init(&p->tcfa_lock); idr_preload(GFP_KERNEL); - spin_lock_bh(&idrinfo->lock); + spin_lock(&idrinfo->lock); /* user doesn't specify an index */ if (!index) { index = 1; @@ -331,7 +331,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, } else { err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC); } - spin_unlock_bh(&idrinfo->lock); + spin_unlock(&idrinfo->lock); idr_preload_end(); if (err) goto err3; @@ -369,9 +369,9 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a) { struct tcf_idrinfo *idrinfo = tn->idrinfo; - spin_lock_bh(&idrinfo->lock); + spin_lock(&idrinfo->lock); idr_replace(&idrinfo->action_idr, a, a->tcfa_index); - spin_unlock_bh(&idrinfo->lock); + spin_unlock(&idrinfo->lock); } EXPORT_SYMBOL(tcf_idr_insert);