1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 #include <net/netlink.h>
3 #include <net/sch_generic.h>
4 #include <net/pkt_sched.h>
7 #include <net/ip6_fib.h>
11 struct qdisc_skb_cb cb;
12 __be16 inner_protocol;
16 u8 l2_data[VLAN_ETH_HLEN];
17 int (*xmit)(struct sk_buff *skb);
20 static DEFINE_PER_CPU(struct sch_frag_data, sch_frag_data_storage);
22 static int sch_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
24 struct sch_frag_data *data = this_cpu_ptr(&sch_frag_data_storage);
26 if (skb_cow_head(skb, data->l2_len) < 0) {
31 __skb_dst_copy(skb, data->dst);
32 *qdisc_skb_cb(skb) = data->cb;
33 skb->inner_protocol = data->inner_protocol;
34 if (data->vlan_tci & VLAN_CFI_MASK)
35 __vlan_hwaccel_put_tag(skb, data->vlan_proto,
36 data->vlan_tci & ~VLAN_CFI_MASK);
38 __vlan_hwaccel_clear_tag(skb);
40 /* Reconstruct the MAC header. */
41 skb_push(skb, data->l2_len);
42 memcpy(skb->data, &data->l2_data, data->l2_len);
43 skb_postpush_rcsum(skb, skb->data, data->l2_len);
44 skb_reset_mac_header(skb);
46 return data->xmit(skb);
49 static void sch_frag_prepare_frag(struct sk_buff *skb,
50 int (*xmit)(struct sk_buff *skb))
52 unsigned int hlen = skb_network_offset(skb);
53 struct sch_frag_data *data;
55 data = this_cpu_ptr(&sch_frag_data_storage);
56 data->dst = skb->_skb_refdst;
57 data->cb = *qdisc_skb_cb(skb);
59 data->inner_protocol = skb->inner_protocol;
60 if (skb_vlan_tag_present(skb))
61 data->vlan_tci = skb_vlan_tag_get(skb) | VLAN_CFI_MASK;
64 data->vlan_proto = skb->vlan_proto;
66 memcpy(&data->l2_data, skb->data, hlen);
68 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
73 sch_frag_dst_get_mtu(const struct dst_entry *dst)
78 static struct dst_ops sch_frag_dst_ops = {
80 .mtu = sch_frag_dst_get_mtu,
83 static int sch_fragment(struct net *net, struct sk_buff *skb,
84 u16 mru, int (*xmit)(struct sk_buff *skb))
88 if (skb_network_offset(skb) > VLAN_ETH_HLEN) {
89 net_warn_ratelimited("L2 header too long to fragment\n");
93 if (skb_protocol(skb, true) == htons(ETH_P_IP)) {
94 struct rtable sch_frag_rt = { 0 };
95 unsigned long orig_dst;
97 sch_frag_prepare_frag(skb, xmit);
98 dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL, 1,
99 DST_OBSOLETE_NONE, DST_NOCOUNT);
100 sch_frag_rt.dst.dev = skb->dev;
102 orig_dst = skb->_skb_refdst;
103 skb_dst_set_noref(skb, &sch_frag_rt.dst);
104 IPCB(skb)->frag_max_size = mru;
106 ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit);
107 refdst_drop(orig_dst);
108 } else if (skb_protocol(skb, true) == htons(ETH_P_IPV6)) {
109 unsigned long orig_dst;
110 struct rt6_info sch_frag_rt;
112 sch_frag_prepare_frag(skb, xmit);
113 memset(&sch_frag_rt, 0, sizeof(sch_frag_rt));
114 dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL, 1,
115 DST_OBSOLETE_NONE, DST_NOCOUNT);
116 sch_frag_rt.dst.dev = skb->dev;
118 orig_dst = skb->_skb_refdst;
119 skb_dst_set_noref(skb, &sch_frag_rt.dst);
120 IP6CB(skb)->frag_max_size = mru;
122 ret = ipv6_stub->ipv6_fragment(net, skb->sk, skb,
124 refdst_drop(orig_dst);
126 net_warn_ratelimited("Fail frag %s: eth=%x, MRU=%d, MTU=%d\n",
127 netdev_name(skb->dev),
128 ntohs(skb_protocol(skb, true)), mru,
139 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb))
141 u16 mru = tc_skb_cb(skb)->mru;
144 if (mru && skb->len > mru + skb->dev->hard_header_len)
145 err = sch_fragment(dev_net(skb->dev), skb, mru, xmit);
151 EXPORT_SYMBOL_GPL(sch_frag_xmit_hook);