From: willy tarreau Date: Thu, 16 Jan 2014 07:20:12 +0000 (+0100) Subject: net: mvneta: remove tests for impossible cases in the tx_done path X-Git-Tag: upstream/snapshot3+hdmi~3572^2~146^2~7 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=6c4989748759f433bf999f33c1ff3853ed5ca945;p=platform%2Fadaptation%2Frenesas_rcar%2Frenesas_kernel.git net: mvneta: remove tests for impossible cases in the tx_done path Currently, mvneta_txq_bufs_free() calls mvneta_tx_done_policy() with a non-null cause to retrieve the pointer to the next queue to process. There are useless tests on the return queue number and on the pointer, all of which are well defined within a known limited set. This code path is fast, although not critical. Removing 3 tests here that the compiler could not optimize (verified) is always desirable. Cc: Thomas Petazzoni Cc: Gregory CLEMENT Tested-by: Arnaud Ebalard Signed-off-by: Willy Tarreau Signed-off-by: David S. Miller --- diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index df75a23..2fb9559 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1276,13 +1276,16 @@ static void mvneta_rx_csum(struct mvneta_port *pp, skb->ip_summed = CHECKSUM_NONE; } -/* Return tx queue pointer (find last set bit) according to causeTxDone reg */ +/* Return tx queue pointer (find last set bit) according to returned + * form tx_done reg. must not be null. The return value is always a + * valid queue for matching the first one found in . + */ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, u32 cause) { int queue = fls(cause) - 1; - return (queue < 0 || queue >= txq_number) ? NULL : &pp->txqs[queue]; + return &pp->txqs[queue]; } /* Free tx queue skbuffs */ @@ -1651,7 +1654,9 @@ static void mvneta_txq_done_force(struct mvneta_port *pp, txq->txq_get_index = 0; } -/* handle tx done - called from tx done timer callback */ +/* Handle tx done - called in softirq context. The argument + * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL. + */ static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done, int *tx_todo) { @@ -1660,10 +1665,8 @@ static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done, struct netdev_queue *nq; *tx_todo = 0; - while (cause_tx_done != 0) { + while (cause_tx_done) { txq = mvneta_tx_done_policy(pp, cause_tx_done); - if (!txq) - break; nq = netdev_get_tx_queue(pp->dev, txq->id); __netif_tx_lock(nq, smp_processor_id());