1 // SPDX-License-Identifier: GPL-2.0-only
3 * net/psample/psample.c - Netlink channel for packet sampling
4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <linux/skbuff.h>
10 #include <linux/module.h>
11 #include <linux/timekeeping.h>
12 #include <net/net_namespace.h>
14 #include <net/netlink.h>
15 #include <net/genetlink.h>
16 #include <net/psample.h>
17 #include <linux/spinlock.h>
18 #include <net/ip_tunnels.h>
19 #include <net/dst_metadata.h>
21 #define PSAMPLE_MAX_PACKET_SIZE 0xffff
23 static LIST_HEAD(psample_groups_list);
24 static DEFINE_SPINLOCK(psample_groups_lock);
26 /* multicast groups */
27 enum psample_nl_multicast_groups {
28 PSAMPLE_NL_MCGRP_CONFIG,
29 PSAMPLE_NL_MCGRP_SAMPLE,
32 static const struct genl_multicast_group psample_nl_mcgrps[] = {
33 [PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
34 [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
37 static struct genl_family psample_nl_family __ro_after_init;
39 static int psample_group_nl_fill(struct sk_buff *msg,
40 struct psample_group *group,
41 enum psample_command cmd, u32 portid, u32 seq,
47 hdr = genlmsg_put(msg, portid, seq, &psample_nl_family, flags, cmd);
51 ret = nla_put_u32(msg, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
55 ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_REFCOUNT, group->refcount);
59 ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_SEQ, group->seq);
63 genlmsg_end(msg, hdr);
67 genlmsg_cancel(msg, hdr);
71 static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg,
72 struct netlink_callback *cb)
74 struct psample_group *group;
75 int start = cb->args[0];
79 spin_lock_bh(&psample_groups_lock);
80 list_for_each_entry(group, &psample_groups_list, list) {
81 if (!net_eq(group->net, sock_net(msg->sk)))
87 err = psample_group_nl_fill(msg, group, PSAMPLE_CMD_NEW_GROUP,
88 NETLINK_CB(cb->skb).portid,
89 cb->nlh->nlmsg_seq, NLM_F_MULTI);
95 spin_unlock_bh(&psample_groups_lock);
100 static const struct genl_small_ops psample_nl_ops[] = {
102 .cmd = PSAMPLE_CMD_GET_GROUP,
103 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
104 .dumpit = psample_nl_cmd_get_group_dumpit,
105 /* can be retrieved by unprivileged users */
109 static struct genl_family psample_nl_family __ro_after_init = {
110 .name = PSAMPLE_GENL_NAME,
111 .version = PSAMPLE_GENL_VERSION,
112 .maxattr = PSAMPLE_ATTR_MAX,
114 .module = THIS_MODULE,
115 .mcgrps = psample_nl_mcgrps,
116 .small_ops = psample_nl_ops,
117 .n_small_ops = ARRAY_SIZE(psample_nl_ops),
118 .resv_start_op = PSAMPLE_CMD_GET_GROUP + 1,
119 .n_mcgrps = ARRAY_SIZE(psample_nl_mcgrps),
122 static void psample_group_notify(struct psample_group *group,
123 enum psample_command cmd)
128 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
132 err = psample_group_nl_fill(msg, group, cmd, 0, 0, NLM_F_MULTI);
134 genlmsg_multicast_netns(&psample_nl_family, group->net, msg, 0,
135 PSAMPLE_NL_MCGRP_CONFIG, GFP_ATOMIC);
140 static struct psample_group *psample_group_create(struct net *net,
143 struct psample_group *group;
145 group = kzalloc(sizeof(*group), GFP_ATOMIC);
150 group->group_num = group_num;
151 list_add_tail(&group->list, &psample_groups_list);
153 psample_group_notify(group, PSAMPLE_CMD_NEW_GROUP);
157 static void psample_group_destroy(struct psample_group *group)
159 psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
160 list_del(&group->list);
161 kfree_rcu(group, rcu);
164 static struct psample_group *
165 psample_group_lookup(struct net *net, u32 group_num)
167 struct psample_group *group;
169 list_for_each_entry(group, &psample_groups_list, list)
170 if ((group->group_num == group_num) && (group->net == net))
175 struct psample_group *psample_group_get(struct net *net, u32 group_num)
177 struct psample_group *group;
179 spin_lock_bh(&psample_groups_lock);
181 group = psample_group_lookup(net, group_num);
183 group = psample_group_create(net, group_num);
190 spin_unlock_bh(&psample_groups_lock);
193 EXPORT_SYMBOL_GPL(psample_group_get);
195 void psample_group_take(struct psample_group *group)
197 spin_lock_bh(&psample_groups_lock);
199 spin_unlock_bh(&psample_groups_lock);
201 EXPORT_SYMBOL_GPL(psample_group_take);
203 void psample_group_put(struct psample_group *group)
205 spin_lock_bh(&psample_groups_lock);
207 if (--group->refcount == 0)
208 psample_group_destroy(group);
210 spin_unlock_bh(&psample_groups_lock);
212 EXPORT_SYMBOL_GPL(psample_group_put);
215 static int __psample_ip_tun_to_nlattr(struct sk_buff *skb,
216 struct ip_tunnel_info *tun_info)
218 unsigned short tun_proto = ip_tunnel_info_af(tun_info);
219 const void *tun_opts = ip_tunnel_info_opts(tun_info);
220 const struct ip_tunnel_key *tun_key = &tun_info->key;
221 int tun_opts_len = tun_info->options_len;
223 if (tun_key->tun_flags & TUNNEL_KEY &&
224 nla_put_be64(skb, PSAMPLE_TUNNEL_KEY_ATTR_ID, tun_key->tun_id,
225 PSAMPLE_TUNNEL_KEY_ATTR_PAD))
228 if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE &&
229 nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE))
234 if (tun_key->u.ipv4.src &&
235 nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_SRC,
236 tun_key->u.ipv4.src))
238 if (tun_key->u.ipv4.dst &&
239 nla_put_in_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV4_DST,
240 tun_key->u.ipv4.dst))
244 if (!ipv6_addr_any(&tun_key->u.ipv6.src) &&
245 nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_SRC,
246 &tun_key->u.ipv6.src))
248 if (!ipv6_addr_any(&tun_key->u.ipv6.dst) &&
249 nla_put_in6_addr(skb, PSAMPLE_TUNNEL_KEY_ATTR_IPV6_DST,
250 &tun_key->u.ipv6.dst))
255 nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TOS, tun_key->tos))
257 if (nla_put_u8(skb, PSAMPLE_TUNNEL_KEY_ATTR_TTL, tun_key->ttl))
259 if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
260 nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
262 if ((tun_key->tun_flags & TUNNEL_CSUM) &&
263 nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_CSUM))
265 if (tun_key->tp_src &&
266 nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_SRC, tun_key->tp_src))
268 if (tun_key->tp_dst &&
269 nla_put_be16(skb, PSAMPLE_TUNNEL_KEY_ATTR_TP_DST, tun_key->tp_dst))
271 if ((tun_key->tun_flags & TUNNEL_OAM) &&
272 nla_put_flag(skb, PSAMPLE_TUNNEL_KEY_ATTR_OAM))
275 if (tun_key->tun_flags & TUNNEL_GENEVE_OPT &&
276 nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_GENEVE_OPTS,
277 tun_opts_len, tun_opts))
279 else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT &&
280 nla_put(skb, PSAMPLE_TUNNEL_KEY_ATTR_ERSPAN_OPTS,
281 tun_opts_len, tun_opts))
288 static int psample_ip_tun_to_nlattr(struct sk_buff *skb,
289 struct ip_tunnel_info *tun_info)
294 nla = nla_nest_start_noflag(skb, PSAMPLE_ATTR_TUNNEL);
298 err = __psample_ip_tun_to_nlattr(skb, tun_info);
300 nla_nest_cancel(skb, nla);
304 nla_nest_end(skb, nla);
309 static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info)
311 unsigned short tun_proto = ip_tunnel_info_af(tun_info);
312 const struct ip_tunnel_key *tun_key = &tun_info->key;
313 int tun_opts_len = tun_info->options_len;
314 int sum = nla_total_size(0); /* PSAMPLE_ATTR_TUNNEL */
316 if (tun_key->tun_flags & TUNNEL_KEY)
317 sum += nla_total_size_64bit(sizeof(u64));
319 if (tun_info->mode & IP_TUNNEL_INFO_BRIDGE)
320 sum += nla_total_size(0);
324 if (tun_key->u.ipv4.src)
325 sum += nla_total_size(sizeof(u32));
326 if (tun_key->u.ipv4.dst)
327 sum += nla_total_size(sizeof(u32));
330 if (!ipv6_addr_any(&tun_key->u.ipv6.src))
331 sum += nla_total_size(sizeof(struct in6_addr));
332 if (!ipv6_addr_any(&tun_key->u.ipv6.dst))
333 sum += nla_total_size(sizeof(struct in6_addr));
337 sum += nla_total_size(sizeof(u8));
338 sum += nla_total_size(sizeof(u8)); /* TTL */
339 if (tun_key->tun_flags & TUNNEL_DONT_FRAGMENT)
340 sum += nla_total_size(0);
341 if (tun_key->tun_flags & TUNNEL_CSUM)
342 sum += nla_total_size(0);
344 sum += nla_total_size(sizeof(u16));
346 sum += nla_total_size(sizeof(u16));
347 if (tun_key->tun_flags & TUNNEL_OAM)
348 sum += nla_total_size(0);
350 if (tun_key->tun_flags & TUNNEL_GENEVE_OPT)
351 sum += nla_total_size(tun_opts_len);
352 else if (tun_key->tun_flags & TUNNEL_ERSPAN_OPT)
353 sum += nla_total_size(tun_opts_len);
360 void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
361 u32 sample_rate, const struct psample_metadata *md)
363 ktime_t tstamp = ktime_get_real();
364 int out_ifindex = md->out_ifindex;
365 int in_ifindex = md->in_ifindex;
366 u32 trunc_size = md->trunc_size;
368 struct ip_tunnel_info *tun_info;
370 struct sk_buff *nl_skb;
376 meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) +
377 (out_ifindex ? nla_total_size(sizeof(u16)) : 0) +
378 (md->out_tc_valid ? nla_total_size(sizeof(u16)) : 0) +
379 (md->out_tc_occ_valid ? nla_total_size_64bit(sizeof(u64)) : 0) +
380 (md->latency_valid ? nla_total_size_64bit(sizeof(u64)) : 0) +
381 nla_total_size(sizeof(u32)) + /* sample_rate */
382 nla_total_size(sizeof(u32)) + /* orig_size */
383 nla_total_size(sizeof(u32)) + /* group_num */
384 nla_total_size(sizeof(u32)) + /* seq */
385 nla_total_size_64bit(sizeof(u64)) + /* timestamp */
386 nla_total_size(sizeof(u16)); /* protocol */
389 tun_info = skb_tunnel_info(skb);
391 meta_len += psample_tunnel_meta_len(tun_info);
394 data_len = min(skb->len, trunc_size);
395 if (meta_len + nla_total_size(data_len) > PSAMPLE_MAX_PACKET_SIZE)
396 data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
399 nl_skb = genlmsg_new(meta_len + nla_total_size(data_len), GFP_ATOMIC);
400 if (unlikely(!nl_skb))
403 data = genlmsg_put(nl_skb, 0, 0, &psample_nl_family, 0,
409 ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_IIFINDEX, in_ifindex);
410 if (unlikely(ret < 0))
415 ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OIFINDEX, out_ifindex);
416 if (unlikely(ret < 0))
420 ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_RATE, sample_rate);
421 if (unlikely(ret < 0))
424 ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_ORIGSIZE, skb->len);
425 if (unlikely(ret < 0))
428 ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
429 if (unlikely(ret < 0))
432 ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_GROUP_SEQ, group->seq++);
433 if (unlikely(ret < 0))
436 if (md->out_tc_valid) {
437 ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OUT_TC, md->out_tc);
438 if (unlikely(ret < 0))
442 if (md->out_tc_occ_valid) {
443 ret = nla_put_u64_64bit(nl_skb, PSAMPLE_ATTR_OUT_TC_OCC,
444 md->out_tc_occ, PSAMPLE_ATTR_PAD);
445 if (unlikely(ret < 0))
449 if (md->latency_valid) {
450 ret = nla_put_u64_64bit(nl_skb, PSAMPLE_ATTR_LATENCY,
451 md->latency, PSAMPLE_ATTR_PAD);
452 if (unlikely(ret < 0))
456 ret = nla_put_u64_64bit(nl_skb, PSAMPLE_ATTR_TIMESTAMP,
457 ktime_to_ns(tstamp), PSAMPLE_ATTR_PAD);
458 if (unlikely(ret < 0))
461 ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_PROTO,
462 be16_to_cpu(skb->protocol));
463 if (unlikely(ret < 0))
467 int nla_len = nla_total_size(data_len);
470 nla = skb_put(nl_skb, nla_len);
471 nla->nla_type = PSAMPLE_ATTR_DATA;
472 nla->nla_len = nla_attr_size(data_len);
474 if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
480 ret = psample_ip_tun_to_nlattr(nl_skb, tun_info);
481 if (unlikely(ret < 0))
486 genlmsg_end(nl_skb, data);
487 genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
488 PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC);
492 pr_err_ratelimited("Could not create psample log message\n");
495 EXPORT_SYMBOL_GPL(psample_sample_packet);
497 static int __init psample_module_init(void)
499 return genl_register_family(&psample_nl_family);
502 static void __exit psample_module_exit(void)
504 genl_unregister_family(&psample_nl_family);
507 module_init(psample_module_init);
508 module_exit(psample_module_exit);
510 MODULE_AUTHOR("Yotam Gigi <yotam.gi@gmail.com>");
511 MODULE_DESCRIPTION("netlink channel for packet sampling");
512 MODULE_LICENSE("GPL v2");