After commit
6b229cf77d68 ("udp: add batching to udp_rmem_release()")
the sk_rmem_alloc field does not measure exactly anymore the
receive queue length, because we batch the rmem release. The issue
is really apparent only after commit
0d4a6608f68c ("udp: do rmem bulk
free even if the rx sk queue is empty"): the user space can easily
check for an empty socket with not-0 queue length reported by the 'ss'
tool or the procfs interface.
We need to use a custom UDP helper to report the correct queue length,
taking into account the forward allocation deficit.
Reported-by: trevor.francis@46labs.com
Fixes:
6b229cf77d68 ("UDP: add batching to udp_rmem_release()")
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
struct sockcm_cookie *sockc);
-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
- __u16 srcp, __u16 destp, int bucket);
+void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+ __u16 srcp, __u16 destp, int rqueue, int bucket);
+static inline void
+ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
+ __u16 destp, int bucket)
+{
+ __ip6_dgram_sock_seq_show(seq, sp, srcp, destp, sk_rmem_alloc_get(sp),
+ bucket);
+}
#define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
return htons((((u64) hash * (max - min)) >> 32) + min);
}
+static inline int udp_rqueue_get(struct sock *sk)
+{
+ return sk_rmem_alloc_get(sk) - READ_ONCE(udp_sk(sk)->forward_deficit);
+}
+
/* net/ipv4/udp.c */
void udp_destruct_sock(struct sock *sk);
void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
bucket, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
- sk_rmem_alloc_get(sp),
+ udp_rqueue_get(sp),
0, 0L, 0,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
0, sock_i_ino(sp),
static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
void *info)
{
- r->idiag_rqueue = sk_rmem_alloc_get(sk);
+ r->idiag_rqueue = udp_rqueue_get(sk);
r->idiag_wqueue = sk_wmem_alloc_get(sk);
}
}
EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl);
-void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
- __u16 srcp, __u16 destp, int bucket)
+void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
+ __u16 srcp, __u16 destp, int rqueue, int bucket)
{
const struct in6_addr *dest, *src;
dest->s6_addr32[2], dest->s6_addr32[3], destp,
sp->sk_state,
sk_wmem_alloc_get(sp),
- sk_rmem_alloc_get(sp),
+ rqueue,
0, 0L, 0,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
0,
struct inet_sock *inet = inet_sk(v);
__u16 srcp = ntohs(inet->inet_sport);
__u16 destp = ntohs(inet->inet_dport);
- ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
+ __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
+ udp_rqueue_get(v), bucket);
}
return 0;
}