1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Checksum updating actions
5 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
8 #include <linux/types.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/spinlock.h>
14 #include <linux/netlink.h>
15 #include <net/netlink.h>
16 #include <linux/rtnetlink.h>
18 #include <linux/skbuff.h>
23 #include <linux/icmpv6.h>
24 #include <linux/igmp.h>
27 #include <net/ip6_checksum.h>
28 #include <net/sctp/checksum.h>
30 #include <net/act_api.h>
31 #include <net/pkt_cls.h>
33 #include <linux/tc_act/tc_csum.h>
34 #include <net/tc_act/tc_csum.h>
36 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
37 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
40 static unsigned int csum_net_id;
41 static struct tc_action_ops act_csum_ops;
43 static int tcf_csum_init(struct net *net, struct nlattr *nla,
44 struct nlattr *est, struct tc_action **a, int ovr,
45 int bind, bool rtnl_held, struct tcf_proto *tp,
46 struct netlink_ext_ack *extack)
48 struct tc_action_net *tn = net_generic(net, csum_net_id);
49 struct tcf_csum_params *params_new;
50 struct nlattr *tb[TCA_CSUM_MAX + 1];
51 struct tcf_chain *goto_ch = NULL;
59 err = nla_parse_nested_deprecated(tb, TCA_CSUM_MAX, nla, csum_policy,
64 if (tb[TCA_CSUM_PARMS] == NULL)
66 parm = nla_data(tb[TCA_CSUM_PARMS]);
68 err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
70 ret = tcf_idr_create(tn, parm->index, est, a,
71 &act_csum_ops, bind, true);
73 tcf_idr_cleanup(tn, parm->index);
78 if (bind)/* dont override defaults */
81 tcf_idr_release(*a, bind);
88 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
94 params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
95 if (unlikely(!params_new)) {
99 params_new->update_flags = parm->update_flags;
101 spin_lock_bh(&p->tcf_lock);
102 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
103 rcu_swap_protected(p->params, params_new,
104 lockdep_is_held(&p->tcf_lock));
105 spin_unlock_bh(&p->tcf_lock);
108 tcf_chain_put_by_act(goto_ch);
110 kfree_rcu(params_new, rcu);
112 if (ret == ACT_P_CREATED)
113 tcf_idr_insert(tn, *a);
118 tcf_chain_put_by_act(goto_ch);
120 tcf_idr_release(*a, bind);
125 * tcf_csum_skb_nextlayer - Get next layer pointer
126 * @skb: sk_buff to use
127 * @ihl: previous summed headers length
128 * @ipl: complete packet length
129 * @jhl: next header length
131 * Check the expected next layer availability in the specified sk_buff.
132 * Return the next layer pointer if pass, NULL otherwise.
134 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
135 unsigned int ihl, unsigned int ipl,
138 int ntkoff = skb_network_offset(skb);
141 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
142 skb_try_make_writable(skb, hl + ntkoff))
145 return (void *)(skb_network_header(skb) + ihl);
148 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl,
151 struct icmphdr *icmph;
153 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
158 skb->csum = csum_partial(icmph, ipl - ihl, 0);
159 icmph->checksum = csum_fold(skb->csum);
161 skb->ip_summed = CHECKSUM_NONE;
166 static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
167 unsigned int ihl, unsigned int ipl)
169 struct igmphdr *igmph;
171 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
176 skb->csum = csum_partial(igmph, ipl - ihl, 0);
177 igmph->csum = csum_fold(skb->csum);
179 skb->ip_summed = CHECKSUM_NONE;
184 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl,
187 struct icmp6hdr *icmp6h;
188 const struct ipv6hdr *ip6h;
190 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
194 ip6h = ipv6_hdr(skb);
195 icmp6h->icmp6_cksum = 0;
196 skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
197 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
198 ipl - ihl, IPPROTO_ICMPV6,
201 skb->ip_summed = CHECKSUM_NONE;
206 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl,
210 const struct iphdr *iph;
212 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
215 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
221 skb->csum = csum_partial(tcph, ipl - ihl, 0);
222 tcph->check = tcp_v4_check(ipl - ihl,
223 iph->saddr, iph->daddr, skb->csum);
225 skb->ip_summed = CHECKSUM_NONE;
230 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl,
234 const struct ipv6hdr *ip6h;
236 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
239 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
243 ip6h = ipv6_hdr(skb);
245 skb->csum = csum_partial(tcph, ipl - ihl, 0);
246 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
247 ipl - ihl, IPPROTO_TCP,
250 skb->ip_summed = CHECKSUM_NONE;
255 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl,
256 unsigned int ipl, int udplite)
259 const struct iphdr *iph;
262 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
266 * Support both UDP and UDPLITE checksum algorithms, Don't use
267 * udph->len to get the real length without any protocol check,
268 * UDPLITE uses udph->len for another thing,
269 * Use iph->tot_len, or just ipl.
272 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
277 ul = ntohs(udph->len);
279 if (udplite || udph->check) {
285 skb->csum = csum_partial(udph, ipl - ihl, 0);
286 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
287 skb->csum = csum_partial(udph, ul, 0);
289 goto ignore_obscure_skb;
292 goto ignore_obscure_skb;
294 skb->csum = csum_partial(udph, ul, 0);
297 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
302 udph->check = CSUM_MANGLED_0;
305 skb->ip_summed = CHECKSUM_NONE;
311 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl,
312 unsigned int ipl, int udplite)
315 const struct ipv6hdr *ip6h;
318 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
322 * Support both UDP and UDPLITE checksum algorithms, Don't use
323 * udph->len to get the real length without any protocol check,
324 * UDPLITE uses udph->len for another thing,
325 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
328 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
332 ip6h = ipv6_hdr(skb);
333 ul = ntohs(udph->len);
339 skb->csum = csum_partial(udph, ipl - ihl, 0);
341 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
342 skb->csum = csum_partial(udph, ul, 0);
345 goto ignore_obscure_skb;
348 goto ignore_obscure_skb;
350 skb->csum = csum_partial(udph, ul, 0);
353 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
354 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
358 udph->check = CSUM_MANGLED_0;
360 skb->ip_summed = CHECKSUM_NONE;
366 static int tcf_csum_sctp(struct sk_buff *skb, unsigned int ihl,
369 struct sctphdr *sctph;
371 if (skb_is_gso(skb) && skb_is_gso_sctp(skb))
374 sctph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*sctph));
378 sctph->checksum = sctp_compute_cksum(skb,
379 skb_network_offset(skb) + ihl);
380 skb->ip_summed = CHECKSUM_NONE;
381 skb->csum_not_inet = 0;
386 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
388 const struct iphdr *iph;
391 ntkoff = skb_network_offset(skb);
393 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
398 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
400 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
401 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
402 ntohs(iph->tot_len)))
406 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
407 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
408 ntohs(iph->tot_len)))
412 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
413 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4,
414 ntohs(iph->tot_len)))
418 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
419 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
420 ntohs(iph->tot_len), 0))
423 case IPPROTO_UDPLITE:
424 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
425 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4,
426 ntohs(iph->tot_len), 1))
430 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
431 !tcf_csum_sctp(skb, iph->ihl * 4, ntohs(iph->tot_len)))
436 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
437 if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
440 ip_send_check(ip_hdr(skb));
449 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl,
452 int off, len, optlen;
453 unsigned char *xh = (void *)ip6xh;
455 off = sizeof(*ip6xh);
464 optlen = xh[off + 1] + 2;
465 if (optlen != 6 || len < 6 || (off & 3) != 2)
466 /* wrong jumbo option length/alignment */
468 *pl = ntohl(*(__be32 *)(xh + off + 2));
471 optlen = xh[off + 1] + 2;
473 /* ignore obscure options */
485 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
487 struct ipv6hdr *ip6h;
488 struct ipv6_opt_hdr *ip6xh;
489 unsigned int hl, ixhl;
494 ntkoff = skb_network_offset(skb);
498 if (!pskb_may_pull(skb, hl + ntkoff))
501 ip6h = ipv6_hdr(skb);
503 pl = ntohs(ip6h->payload_len);
504 nexthdr = ip6h->nexthdr;
508 case NEXTHDR_FRAGMENT:
510 case NEXTHDR_ROUTING:
513 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
515 ip6xh = (void *)(skb_network_header(skb) + hl);
516 ixhl = ipv6_optlen(ip6xh);
517 if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
519 ip6xh = (void *)(skb_network_header(skb) + hl);
520 if ((nexthdr == NEXTHDR_HOP) &&
521 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
523 nexthdr = ip6xh->nexthdr;
527 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
528 if (!tcf_csum_ipv6_icmp(skb,
529 hl, pl + sizeof(*ip6h)))
533 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
534 if (!tcf_csum_ipv6_tcp(skb,
535 hl, pl + sizeof(*ip6h)))
539 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
540 if (!tcf_csum_ipv6_udp(skb, hl,
541 pl + sizeof(*ip6h), 0))
544 case IPPROTO_UDPLITE:
545 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
546 if (!tcf_csum_ipv6_udp(skb, hl,
547 pl + sizeof(*ip6h), 1))
551 if ((update_flags & TCA_CSUM_UPDATE_FLAG_SCTP) &&
552 !tcf_csum_sctp(skb, hl, pl + sizeof(*ip6h)))
558 } while (pskb_may_pull(skb, hl + 1 + ntkoff));
568 static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
569 struct tcf_result *res)
571 struct tcf_csum *p = to_tcf_csum(a);
572 bool orig_vlan_tag_present = false;
573 unsigned int vlan_hdr_count = 0;
574 struct tcf_csum_params *params;
579 params = rcu_dereference_bh(p->params);
581 tcf_lastuse_update(&p->tcf_tm);
582 bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
584 action = READ_ONCE(p->tcf_action);
585 if (unlikely(action == TC_ACT_SHOT))
588 update_flags = params->update_flags;
589 protocol = tc_skb_protocol(skb);
592 case cpu_to_be16(ETH_P_IP):
593 if (!tcf_csum_ipv4(skb, update_flags))
596 case cpu_to_be16(ETH_P_IPV6):
597 if (!tcf_csum_ipv6(skb, update_flags))
600 case cpu_to_be16(ETH_P_8021AD): /* fall through */
601 case cpu_to_be16(ETH_P_8021Q):
602 if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) {
603 protocol = skb->protocol;
604 orig_vlan_tag_present = true;
606 struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data;
608 protocol = vlan->h_vlan_encapsulated_proto;
609 skb_pull(skb, VLAN_HLEN);
610 skb_reset_network_header(skb);
617 /* Restore the skb for the pulled VLAN tags */
618 while (vlan_hdr_count--) {
619 skb_push(skb, VLAN_HLEN);
620 skb_reset_network_header(skb);
626 qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
627 action = TC_ACT_SHOT;
631 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
634 unsigned char *b = skb_tail_pointer(skb);
635 struct tcf_csum *p = to_tcf_csum(a);
636 struct tcf_csum_params *params;
637 struct tc_csum opt = {
638 .index = p->tcf_index,
639 .refcnt = refcount_read(&p->tcf_refcnt) - ref,
640 .bindcnt = atomic_read(&p->tcf_bindcnt) - bind,
644 spin_lock_bh(&p->tcf_lock);
645 params = rcu_dereference_protected(p->params,
646 lockdep_is_held(&p->tcf_lock));
647 opt.action = p->tcf_action;
648 opt.update_flags = params->update_flags;
650 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
651 goto nla_put_failure;
653 tcf_tm_dump(&t, &p->tcf_tm);
654 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD))
655 goto nla_put_failure;
656 spin_unlock_bh(&p->tcf_lock);
661 spin_unlock_bh(&p->tcf_lock);
666 static void tcf_csum_cleanup(struct tc_action *a)
668 struct tcf_csum *p = to_tcf_csum(a);
669 struct tcf_csum_params *params;
671 params = rcu_dereference_protected(p->params, 1);
673 kfree_rcu(params, rcu);
676 static int tcf_csum_walker(struct net *net, struct sk_buff *skb,
677 struct netlink_callback *cb, int type,
678 const struct tc_action_ops *ops,
679 struct netlink_ext_ack *extack)
681 struct tc_action_net *tn = net_generic(net, csum_net_id);
683 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
686 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index)
688 struct tc_action_net *tn = net_generic(net, csum_net_id);
690 return tcf_idr_search(tn, a, index);
693 static size_t tcf_csum_get_fill_size(const struct tc_action *act)
695 return nla_total_size(sizeof(struct tc_csum));
698 static struct tc_action_ops act_csum_ops = {
701 .owner = THIS_MODULE,
703 .dump = tcf_csum_dump,
704 .init = tcf_csum_init,
705 .cleanup = tcf_csum_cleanup,
706 .walk = tcf_csum_walker,
707 .lookup = tcf_csum_search,
708 .get_fill_size = tcf_csum_get_fill_size,
709 .size = sizeof(struct tcf_csum),
712 static __net_init int csum_init_net(struct net *net)
714 struct tc_action_net *tn = net_generic(net, csum_net_id);
716 return tc_action_net_init(tn, &act_csum_ops);
719 static void __net_exit csum_exit_net(struct list_head *net_list)
721 tc_action_net_exit(net_list, csum_net_id);
724 static struct pernet_operations csum_net_ops = {
725 .init = csum_init_net,
726 .exit_batch = csum_exit_net,
728 .size = sizeof(struct tc_action_net),
731 MODULE_DESCRIPTION("Checksum updating actions");
732 MODULE_LICENSE("GPL");
734 static int __init csum_init_module(void)
736 return tcf_register_action(&act_csum_ops, &csum_net_ops);
739 static void __exit csum_cleanup_module(void)
741 tcf_unregister_action(&act_csum_ops, &csum_net_ops);
744 module_init(csum_init_module);
745 module_exit(csum_cleanup_module);