1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Checksum updating actions
5 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
8 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/spinlock.h>
14 #include <linux/netlink.h>
15 #include <net/netlink.h>
16 #include <linux/rtnetlink.h>
18 #include <linux/skbuff.h>
23 #include <linux/icmpv6.h>
24 #include <linux/igmp.h>
27 #include <net/ip6_checksum.h>
28 #include <net/sctp/checksum.h>
30 #include <net/act_api.h>
31 #include <net/pkt_cls.h>
33 #include <linux/tc_act/tc_csum.h>
34 #include <net/tc_act/tc_csum.h>
36 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
37 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
40 static struct tc_action_ops act_csum_ops;
42 static int tcf_csum_init(struct net *net, struct nlattr *nla,
43 struct nlattr *est, struct tc_action **a,
45 u32 flags, struct netlink_ext_ack *extack)
47 struct tc_action_net *tn = net_generic(net, act_csum_ops.net_id);
48 bool bind = flags & TCA_ACT_FLAGS_BIND;
49 struct tcf_csum_params *params_new;
50 struct nlattr *tb[TCA_CSUM_MAX + 1];
51 struct tcf_chain *goto_ch = NULL;
60 err = nla_parse_nested_deprecated(tb, TCA_CSUM_MAX, nla, csum_policy,
65 if (tb[TCA_CSUM_PARMS] == NULL)
67 parm = nla_data(tb[TCA_CSUM_PARMS]);
69 err = tcf_idr_check_alloc(tn, &index, a, bind);
71 ret = tcf_idr_create_from_flags(tn, index, est, a,
72 &act_csum_ops, bind, flags);
74 tcf_idr_cleanup(tn, index);
79 if (bind)/* dont override defaults */
81 if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
82 tcf_idr_release(*a, bind);
89 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
95 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
96 if (unlikely(!params_new)) {
100 params_new->update_flags = parm->update_flags;
102 spin_lock_bh(&p->tcf_lock);
103 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
104 params_new = rcu_replace_pointer(p->params, params_new,
105 lockdep_is_held(&p->tcf_lock));
106 spin_unlock_bh(&p->tcf_lock);
109 tcf_chain_put_by_act(goto_ch);
111 kfree_rcu(params_new, rcu);
116 tcf_chain_put_by_act(goto_ch);
118 tcf_idr_release(*a, bind);
123 * tcf_csum_skb_nextlayer - Get next layer pointer
124 * @skb: sk_buff to use
125 * @ihl: previous summed headers length
126 * @ipl: complete packet length
127 * @jhl: next header length
129 * Check the expected next layer availability in the specified sk_buff.
130 * Return the next layer pointer if pass, NULL otherwise.
132 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
133 unsigned int ihl, unsigned int ipl,
136 int ntkoff = skb_network_offset(skb);
139 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
140 skb_try_make_writable(skb, hl + ntkoff))
143 return (void *)(skb_network_header(skb) + ihl);
146 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
149 struct icmphdr *icmph;
151 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
156 skb->csum = csum_partial(icmph, ipl - ihl, 0);
157 icmph->checksum = csum_fold(skb->csum);
159 skb->ip_summed = CHECKSUM_NONE;
164 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
165 unsigned int ihl, unsigned int ipl)
167 struct igmphdr *igmph;
169 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
174 skb->csum = csum_partial(igmph, ipl - ihl, 0);
175 igmph->csum = csum_fold(skb->csum);
177 skb->ip_summed = CHECKSUM_NONE;
182 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
185 struct icmp6hdr *icmp6h;
186 const struct ipv6hdr *ip6h;
188 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
192 ip6h = ipv6_hdr(skb);
193 icmp6h->icmp6_cksum = 0;
194 skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
195 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
196 ipl - ihl, IPPROTO_ICMPV6,
199 skb->ip_summed = CHECKSUM_NONE;
204 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
208 const struct iphdr *iph;
210 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
213 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
219 skb->csum = csum_partial(tcph, ipl - ihl, 0);
220 tcph->check = tcp_v4_check(ipl - ihl,
221 iph->saddr, iph->daddr, skb->csum);
223 skb->ip_summed = CHECKSUM_NONE;
228 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
232 const struct ipv6hdr *ip6h;
234 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
237 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
241 ip6h = ipv6_hdr(skb);
243 skb->csum = csum_partial(tcph, ipl - ihl, 0);
244 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
245 ipl - ihl, IPPROTO_TCP,
248 skb->ip_summed = CHECKSUM_NONE;
253 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
254 unsigned int ipl, int udplite)
257 const struct iphdr *iph;
260 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
264 * Support both UDP and UDPLITE checksum algorithms, Don't use
265 * udph->len to get the real length without any protocol check,
266 * UDPLITE uses udph->len for another thing,
267 * Use iph->tot_len, or just ipl.
270 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
275 ul = ntohs(udph->len);
277 if (udplite || udph->check) {
283 skb->csum = csum_partial(udph, ipl - ihl, 0);
284 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
285 skb->csum = csum_partial(udph, ul, 0);
287 goto ignore_obscure_skb;
290 goto ignore_obscure_skb;
292 skb->csum = csum_partial(udph, ul, 0);
295 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
300 udph->check = CSUM_MANGLED_0;
303 skb->ip_summed = CHECKSUM_NONE;
309 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
310 unsigned int ipl, int udplite)
313 const struct ipv6hdr *ip6h;
316 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
320 * Support both UDP and UDPLITE checksum algorithms, Don't use
321 * udph->len to get the real length without any protocol check,
322 * UDPLITE uses udph->len for another thing,
323 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
326 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
330 ip6h = ipv6_hdr(skb);
331 ul = ntohs(udph->len);
337 skb->csum = csum_partial(udph, ipl - ihl, 0);
339 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
340 skb->csum = csum_partial(udph, ul, 0);
343 goto ignore_obscure_skb;
346 goto ignore_obscure_skb;
348 skb->csum = csum_partial(udph, ul, 0);
351 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
352 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
356 udph->check = CSUM_MANGLED_0;
358 skb->ip_summed = CHECKSUM_NONE;
364 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
367 struct sctphdr *sctph;
369 if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
372 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
376 sctph->checksum = sctp_compute_cksum(skb,
377 skb_network_offset(skb) + ihl);
378 skb->ip_summed = CHECKSUM_NONE;
379 skb->csum_not_inet = 0;
384 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
386 const struct iphdr *iph;
389 ntkoff = skb_network_offset(skb);
391 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
396 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
398 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
399 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
400 ntohs(iph->tot_len)))
404 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
405 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
406 ntohs(iph->tot_len)))
410 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
411 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
412 ntohs(iph->tot_len)))
416 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
417 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
418 ntohs(iph->tot_len), 0))
421 case IPPROTO_UDPLITE:
422 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
423 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
424 ntohs(iph->tot_len), 1))
428 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
429 !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
434 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
435 if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
438 ip_send_check(ip_hdr(skb));
447 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
450 int off, len, optlen;
451 unsigned char *xh = (void *)ip6xh;
453 off = sizeof(*ip6xh);
462 optlen = xh[off + 1] + 2;
463 if (optlen != 6 || len < 6 || (off & 3) != 2)
464 /* wrong jumbo option length/alignment */
466 *pl = ntohl(*(__be32 *)(xh + off + 2));
469 optlen = xh[off + 1] + 2;
471 /* ignore obscure options */
483 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
485 struct ipv6hdr *ip6h;
486 struct ipv6_opt_hdr *ip6xh;
487 unsigned int hl, ixhl;
492 ntkoff = skb_network_offset(skb);
496 if (!pskb_may_pull(skb, hl + ntkoff))
499 ip6h = ipv6_hdr(skb);
501 pl = ntohs(ip6h->payload_len);
502 nexthdr = ip6h->nexthdr;
506 case NEXTHDR_FRAGMENT:
508 case NEXTHDR_ROUTING:
511 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
513 ip6xh = (void *)(skb_network_header(skb) + hl);
514 ixhl = ipv6_optlen(ip6xh);
515 if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
517 ip6xh = (void *)(skb_network_header(skb) + hl);
518 if ((nexthdr == NEXTHDR_HOP) &&
519 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
521 nexthdr = ip6xh->nexthdr;
525 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
526 if (!tcf_csum_ipv6_icmp(skb,
527 hl, pl + sizeof(*ip6h)))
531 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
532 if (!tcf_csum_ipv6_tcp(skb,
533 hl, pl + sizeof(*ip6h)))
537 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
538 if (!tcf_csum_ipv6_udp(skb, hl,
539 pl + sizeof(*ip6h), 0))
542 case IPPROTO_UDPLITE:
543 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
544 if (!tcf_csum_ipv6_udp(skb, hl,
545 pl + sizeof(*ip6h), 1))
549 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
550 !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
556 } while (pskb_may_pull(skb, hl + 1 + ntkoff));
566 static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
567 struct tcf_result *res)
569 struct tcf_csum *p = to_tcf_csum(a);
570 bool orig_vlan_tag_present = false;
571 unsigned int vlan_hdr_count = 0;
572 struct tcf_csum_params *params;
577 params = rcu_dereference_bh(p->params);
579 tcf_lastuse_update(&p->tcf_tm);
580 tcf_action_update_bstats(&p->common, skb);
582 action = READ_ONCE(p->tcf_action);
583 if (unlikely(action == TC_ACT_SHOT))
586 update_flags = params->update_flags;
587 protocol = skb_protocol(skb, false);
590 case cpu_to_be16(ETH_P_IP):
591 if (!tcf_csum_ipv4(skb, update_flags))
594 case cpu_to_be16(ETH_P_IPV6):
595 if (!tcf_csum_ipv6(skb, update_flags))
598 case cpu_to_be16(ETH_P_8021AD):
600 case cpu_to_be16(ETH_P_8021Q):
601 if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) {
602 protocol = skb->protocol;
603 orig_vlan_tag_present = true;
605 struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data;
607 protocol = vlan->h_vlan_encapsulated_proto;
608 skb_pull(skb, VLAN_HLEN);
609 skb_reset_network_header(skb);
616 /* Restore the skb for the pulled VLAN tags */
617 while (vlan_hdr_count--) {
618 skb_push(skb, VLAN_HLEN);
619 skb_reset_network_header(skb);
625 tcf_action_inc_drop_qstats(&p->common);
626 action = TC_ACT_SHOT;
630 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
633 unsigned char *b = skb_tail_pointer(skb);
634 struct tcf_csum *p = to_tcf_csum(a);
635 struct tcf_csum_params *params;
636 struct tc_csum opt = {
637 .index = p->tcf_index,
638 .refcnt = refcount_read(&p->tcf_refcnt) - ref,
639 .bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
643 spin_lock_bh(&p->tcf_lock);
644 params = rcu_dereference_protected(p->params,
645 lockdep_is_held(&p->tcf_lock));
646 opt.action = p->tcf_action;
647 opt.update_flags = params->update_flags;
649 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
650 goto nla_put_failure;
652 tcf_tm_dump(&t, &p->tcf_tm);
653 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
654 goto nla_put_failure;
655 spin_unlock_bh(&p->tcf_lock);
660 spin_unlock_bh(&p->tcf_lock);
665 static void tcf_csum_cleanup(struct tc_action *a)
667 struct tcf_csum *p = to_tcf_csum(a);
668 struct tcf_csum_params *params;
670 params = rcu_dereference_protected(p->params, 1);
672 kfree_rcu(params, rcu);
675 static size_t tcf_csum_get_fill_size(const struct tc_action *act)
677 return nla_total_size(sizeof(struct tc_csum));
680 static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data,
681 u32 *index_inc, bool bind,
682 struct netlink_ext_ack *extack)
685 struct flow_action_entry *entry = entry_data;
687 entry->id = FLOW_ACTION_CSUM;
688 entry->csum_flags = tcf_csum_update_flags(act);
691 struct flow_offload_action *fl_action = entry_data;
693 fl_action->id = FLOW_ACTION_CSUM;
699 static struct tc_action_ops act_csum_ops = {
702 .owner = THIS_MODULE,
704 .dump = tcf_csum_dump,
705 .init = tcf_csum_init,
706 .cleanup = tcf_csum_cleanup,
707 .get_fill_size = tcf_csum_get_fill_size,
708 .offload_act_setup = tcf_csum_offload_act_setup,
709 .size = sizeof(struct tcf_csum),
712 static __net_init int csum_init_net(struct net *net)
714 struct tc_action_net *tn = net_generic(net, act_csum_ops.net_id);
716 return tc_action_net_init(net, tn, &act_csum_ops);
719 static void __net_exit csum_exit_net(struct list_head *net_list)
721 tc_action_net_exit(net_list, act_csum_ops.net_id);
724 static struct pernet_operations csum_net_ops = {
725 .init = csum_init_net,
726 .exit_batch = csum_exit_net,
727 .id = &act_csum_ops.net_id,
728 .size = sizeof(struct tc_action_net),
731 MODULE_DESCRIPTION("Checksum updating actions");
732 MODULE_LICENSE("GPL");
734 static int __init csum_init_module(void)
736 return tcf_register_action(&act_csum_ops, &csum_net_ops);
739 static void __exit csum_cleanup_module(void)
741 tcf_unregister_action(&act_csum_ops, &csum_net_ops);
744 module_init(csum_init_module);
745 module_exit(csum_cleanup_module);