net: Add trace events for all receive exit points
authorGeneviève Bastien <gbastien@versatic.net>
Tue, 27 Nov 2018 17:52:39 +0000 (12:52 -0500)
committerDavid S. Miller <davem@davemloft.net>
Fri, 30 Nov 2018 21:23:25 +0000 (13:23 -0800)
Trace events are already present for the receive entry points, to indicate
how the reception entered the stack.

This patch adds the corresponding exit trace events that will bound the
reception such that all events occurring between the entry and the exit
can be considered as part of the reception context. This greatly helps
for dependency and root cause analyses.

Without this, it is not possible with tracepoint instrumentation to
determine whether a sched_wakeup event following a netif_receive_skb
event is the result of the packet reception or a simple coincidence after
further processing by the thread. It is possible using other mechanisms
like kretprobes, but considering the "entry" points are already present,
it would be good to add the matching exit events.

In addition to linking packets with wakeups, the entry/exit event pair
can also be used to perform network stack latency analyses.

Signed-off-by: Geneviève Bastien <gbastien@versatic.net>
CC: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
CC: Steven Rostedt <rostedt@goodmis.org>
CC: Ingo Molnar <mingo@redhat.com>
CC: David S. Miller <davem@davemloft.net>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org> (tracing side)
Signed-off-by: David S. Miller <davem@davemloft.net>
include/trace/events/net.h
net/core/dev.c

index 00aa72c..1efd7d9 100644 (file)
@@ -244,6 +244,65 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_ni_entry,
        TP_ARGS(skb)
 );
 
+DECLARE_EVENT_CLASS(net_dev_rx_exit_template,
+
+       TP_PROTO(int ret),
+
+       TP_ARGS(ret),
+
+       TP_STRUCT__entry(
+               __field(int,    ret)
+       ),
+
+       TP_fast_assign(
+               __entry->ret = ret;
+       ),
+
+       TP_printk("ret=%d", __entry->ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, napi_gro_frags_exit,
+
+       TP_PROTO(int ret),
+
+       TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, napi_gro_receive_exit,
+
+       TP_PROTO(int ret),
+
+       TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_exit,
+
+       TP_PROTO(int ret),
+
+       TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_exit,
+
+       TP_PROTO(int ret),
+
+       TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_rx_ni_exit,
+
+       TP_PROTO(int ret),
+
+       TP_ARGS(ret)
+);
+
+DEFINE_EVENT(net_dev_rx_exit_template, netif_receive_skb_list_exit,
+
+       TP_PROTO(int ret),
+
+       TP_ARGS(ret)
+);
+
 #endif /* _TRACE_NET_H */
 
 /* This part must be outside protection */
index abe50c4..04a6b71 100644 (file)
@@ -4527,9 +4527,14 @@ static int netif_rx_internal(struct sk_buff *skb)
 
 int netif_rx(struct sk_buff *skb)
 {
+       int ret;
+
        trace_netif_rx_entry(skb);
 
-       return netif_rx_internal(skb);
+       ret = netif_rx_internal(skb);
+       trace_netif_rx_exit(ret);
+
+       return ret;
 }
 EXPORT_SYMBOL(netif_rx);
 
@@ -4544,6 +4549,7 @@ int netif_rx_ni(struct sk_buff *skb)
        if (local_softirq_pending())
                do_softirq();
        preempt_enable();
+       trace_netif_rx_ni_exit(err);
 
        return err;
 }
@@ -5229,9 +5235,14 @@ static void netif_receive_skb_list_internal(struct list_head *head)
  */
 int netif_receive_skb(struct sk_buff *skb)
 {
+       int ret;
+
        trace_netif_receive_skb_entry(skb);
 
-       return netif_receive_skb_internal(skb);
+       ret = netif_receive_skb_internal(skb);
+       trace_netif_receive_skb_exit(ret);
+
+       return ret;
 }
 EXPORT_SYMBOL(netif_receive_skb);
 
@@ -5251,9 +5262,12 @@ void netif_receive_skb_list(struct list_head *head)
 
        if (list_empty(head))
                return;
-       list_for_each_entry(skb, head, list)
-               trace_netif_receive_skb_list_entry(skb);
+       if (trace_netif_receive_skb_list_entry_enabled()) {
+               list_for_each_entry(skb, head, list)
+                       trace_netif_receive_skb_list_entry(skb);
+       }
        netif_receive_skb_list_internal(head);
+       trace_netif_receive_skb_list_exit(0);
 }
 EXPORT_SYMBOL(netif_receive_skb_list);
 
@@ -5645,12 +5659,17 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
 
 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
+       gro_result_t ret;
+
        skb_mark_napi_id(skb, napi);
        trace_napi_gro_receive_entry(skb);
 
        skb_gro_reset_offset(skb);
 
-       return napi_skb_finish(dev_gro_receive(napi, skb), skb);
+       ret = napi_skb_finish(dev_gro_receive(napi, skb), skb);
+       trace_napi_gro_receive_exit(ret);
+
+       return ret;
 }
 EXPORT_SYMBOL(napi_gro_receive);
 
@@ -5768,6 +5787,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
 
 gro_result_t napi_gro_frags(struct napi_struct *napi)
 {
+       gro_result_t ret;
        struct sk_buff *skb = napi_frags_skb(napi);
 
        if (!skb)
@@ -5775,7 +5795,10 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
 
        trace_napi_gro_frags_entry(skb);
 
-       return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
+       ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
+       trace_napi_gro_frags_exit(ret);
+
+       return ret;
 }
 EXPORT_SYMBOL(napi_gro_frags);