From 6f3dfb0dc831953187fea8e3b798768611441321 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Wed, 11 Jul 2018 16:04:49 +0200 Subject: [PATCH] net/sched: skbedit: use per-cpu counters use per-CPU counters, instead of sharing a single set of stats with all cores: this removes the need of spinlocks when stats are read/updated. Signed-off-by: Davide Caratti Signed-off-by: David S. Miller --- net/sched/act_skbedit.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index 86521a7..8651b5b 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -38,10 +38,10 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a, { struct tcf_skbedit *d = to_skbedit(a); - spin_lock(&d->tcf_lock); tcf_lastuse_update(&d->tcf_tm); - bstats_update(&d->tcf_bstats, skb); + bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb); + spin_lock(&d->tcf_lock); if (d->flags & SKBEDIT_F_PRIORITY) skb->priority = d->priority; if (d->flags & SKBEDIT_F_INHERITDSFIELD) { @@ -77,8 +77,8 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a, return d->tcf_action; err: - d->tcf_qstats.drops++; spin_unlock(&d->tcf_lock); + qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats)); return TC_ACT_SHOT; } @@ -169,7 +169,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, - &act_skbedit_ops, bind, false); + &act_skbedit_ops, bind, true); if (ret) { tcf_idr_cleanup(tn, parm->index); return ret; -- 2.7.4