tcp: introduce tcp_skb_timestamp_us() helper
authorEric Dumazet <edumazet@google.com>
Fri, 21 Sep 2018 15:51:47 +0000 (08:51 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sat, 22 Sep 2018 02:37:59 +0000 (19:37 -0700)
There are few places where TCP reads skb->skb_mstamp expecting
a value in usec unit.

skb->tstamp (aka skb->skb_mstamp) will soon store CLOCK_TAI nsec value.

Add tcp_skb_timestamp_us() to provide proper conversion when needed.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/tcp.h
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/tcp_rate.c
net/ipv4/tcp_recovery.c

index c6f0bc1..0ca5ea1 100644 (file)
@@ -774,6 +774,12 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
        return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
 }
 
+/* provide the departure time in us unit */
+static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
+{
+       return skb->skb_mstamp;
+}
+
 
 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
 
@@ -1940,7 +1946,7 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk)
 {
        const struct sk_buff *skb = tcp_rtx_queue_head(sk);
        u32 rto = inet_csk(sk)->icsk_rto;
-       u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
+       u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
 
        return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
 }
index d903407..d703a0b 100644 (file)
@@ -1305,7 +1305,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
         */
        tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
                        start_seq, end_seq, dup_sack, pcount,
-                       skb->skb_mstamp);
+                       tcp_skb_timestamp_us(skb));
        tcp_rate_skb_delivered(sk, skb, state->rate);
 
        if (skb == tp->lost_skb_hint)
@@ -1580,7 +1580,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
                                                TCP_SKB_CB(skb)->end_seq,
                                                dup_sack,
                                                tcp_skb_pcount(skb),
-                                               skb->skb_mstamp);
+                                               tcp_skb_timestamp_us(skb));
                        tcp_rate_skb_delivered(sk, skb, state->rate);
                        if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
                                list_del_init(&skb->tcp_tsorted_anchor);
@@ -3103,7 +3103,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
                                tp->retrans_out -= acked_pcount;
                        flag |= FLAG_RETRANS_DATA_ACKED;
                } else if (!(sacked & TCPCB_SACKED_ACKED)) {
-                       last_ackt = skb->skb_mstamp;
+                       last_ackt = tcp_skb_timestamp_us(skb);
                        WARN_ON_ONCE(last_ackt == 0);
                        if (!first_ackt)
                                first_ackt = last_ackt;
@@ -3121,7 +3121,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
                        tp->delivered += acked_pcount;
                        if (!tcp_skb_spurious_retrans(tp, skb))
                                tcp_rack_advance(tp, sacked, scb->end_seq,
-                                                skb->skb_mstamp);
+                                                tcp_skb_timestamp_us(skb));
                }
                if (sacked & TCPCB_LOST)
                        tp->lost_out -= acked_pcount;
@@ -3215,7 +3215,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
                        tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
                }
        } else if (skb && rtt_update && sack_rtt_us >= 0 &&
-                  sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) {
+                  sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
+                                                   tcp_skb_timestamp_us(skb))) {
                /* Do not re-arm RTO if the sack RTT is measured from data sent
                 * after when the head was last (re)transmitted. Otherwise the
                 * timeout may continue to extend in loss recovery.
index 09547ef..1f2496e 100644 (file)
@@ -544,7 +544,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                BUG_ON(!skb);
 
                tcp_mstamp_refresh(tp);
-               delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
+               delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
                remaining = icsk->icsk_rto -
                            usecs_to_jiffies(delta_us);
 
index 597dbd7..b95aa72 100644 (file)
@@ -1966,7 +1966,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
        head = tcp_rtx_queue_head(sk);
        if (!head)
                goto send_now;
-       age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp);
+       age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head));
        /* If next ACK is likely to come too late (half srtt), do not defer */
        if (age < (tp->srtt_us >> 4))
                goto send_now;
index 4dff40d..baed218 100644 (file)
@@ -55,8 +55,10 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb)
          * bandwidth estimate.
          */
        if (!tp->packets_out) {
-               tp->first_tx_mstamp  = skb->skb_mstamp;
-               tp->delivered_mstamp = skb->skb_mstamp;
+               u64 tstamp_us = tcp_skb_timestamp_us(skb);
+
+               tp->first_tx_mstamp  = tstamp_us;
+               tp->delivered_mstamp = tstamp_us;
        }
 
        TCP_SKB_CB(skb)->tx.first_tx_mstamp     = tp->first_tx_mstamp;
@@ -88,13 +90,12 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
                rs->is_app_limited   = scb->tx.is_app_limited;
                rs->is_retrans       = scb->sacked & TCPCB_RETRANS;
 
+               /* Record send time of most recently ACKed packet: */
+               tp->first_tx_mstamp  = tcp_skb_timestamp_us(skb);
                /* Find the duration of the "send phase" of this window: */
-               rs->interval_us      = tcp_stamp_us_delta(
-                                               skb->skb_mstamp,
-                                               scb->tx.first_tx_mstamp);
+               rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp,
+                                                    scb->tx.first_tx_mstamp);
 
-               /* Record send time of most recently ACKed packet: */
-               tp->first_tx_mstamp  = skb->skb_mstamp;
        }
        /* Mark off the skb delivered once it's sacked to avoid being
         * used again when it's cumulatively acked. For acked packets
index c81aadf..fdb715b 100644 (file)
@@ -50,7 +50,7 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk)
 s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
 {
        return tp->rack.rtt_us + reo_wnd -
-              tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp);
+              tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
 }
 
 /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
@@ -91,7 +91,8 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
                    !(scb->sacked & TCPCB_SACKED_RETRANS))
                        continue;
 
-               if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
+               if (!tcp_rack_sent_after(tp->rack.mstamp,
+                                        tcp_skb_timestamp_us(skb),
                                         tp->rack.end_seq, scb->end_seq))
                        break;