net_sched: sfb: optimize enqueue on full queue
authorEric Dumazet <eric.dumazet@gmail.com>
Thu, 25 Aug 2011 06:21:32 +0000 (06:21 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 26 Aug 2011 16:55:18 +0000 (12:55 -0400)
In case SFB queue is full (hard limit reached), there is no point
spending time to compute hash and maximum qlen/p_mark.

We instead just early drop packet.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/sch_sfb.c

index 0a833d0..e83c272 100644 (file)
@@ -287,6 +287,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        u32 r, slot, salt, sfbhash;
        int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 
+       if (unlikely(sch->q.qlen >= q->limit)) {
+               sch->qstats.overlimits++;
+               q->stats.queuedrop++;
+               goto drop;
+       }
+
        if (q->rehash_interval > 0) {
                unsigned long limit = q->rehash_time + q->rehash_interval;
 
@@ -332,12 +338,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        slot ^= 1;
        sfb_skb_cb(skb)->hashes[slot] = 0;
 
-       if (unlikely(minqlen >= q->max || sch->q.qlen >= q->limit)) {
+       if (unlikely(minqlen >= q->max)) {
                sch->qstats.overlimits++;
-               if (minqlen >= q->max)
-                       q->stats.bucketdrop++;
-               else
-                       q->stats.queuedrop++;
+               q->stats.bucketdrop++;
                goto drop;
        }