1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/skbuff.h>
3 #include <linux/netdevice.h>
4 #include <linux/if_vlan.h>
5 #include <linux/netpoll.h>
6 #include <linux/export.h>
10 bool vlan_do_receive(struct sk_buff **skbp)
12 struct sk_buff *skb = *skbp;
13 __be16 vlan_proto = skb->vlan_proto;
14 u16 vlan_id = skb_vlan_tag_get_id(skb);
15 struct net_device *vlan_dev;
16 struct vlan_pcpu_stats *rx_stats;
18 vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
22 skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
26 if (unlikely(!(vlan_dev->flags & IFF_UP))) {
33 if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
34 /* Our lower layer thinks this is not local, let's make sure.
35 * This allows the VLAN to have a different MAC than the
36 * underlying device, and still route correctly. */
37 if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
38 skb->pkt_type = PACKET_HOST;
41 if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
42 !netif_is_macvlan_port(vlan_dev) &&
43 !netif_is_bridge_port(vlan_dev)) {
44 unsigned int offset = skb->data - skb_mac_header(skb);
47 * vlan_insert_tag expect skb->data pointing to mac header.
48 * So change skb->data before calling it and change back to
49 * original position later
51 skb_push(skb, offset);
52 skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
53 skb->vlan_tci, skb->mac_len);
56 skb_pull(skb, offset + VLAN_HLEN);
57 skb_reset_mac_len(skb);
60 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
61 __vlan_hwaccel_clear_tag(skb);
63 rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
65 u64_stats_update_begin(&rx_stats->syncp);
66 rx_stats->rx_packets++;
67 rx_stats->rx_bytes += skb->len;
68 if (skb->pkt_type == PACKET_MULTICAST)
69 rx_stats->rx_multicast++;
70 u64_stats_update_end(&rx_stats->syncp);
75 /* Must be invoked with rcu_read_lock. */
76 struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
77 __be16 vlan_proto, u16 vlan_id)
79 struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
82 return vlan_group_get_device(&vlan_info->grp,
86 * Lower devices of master uppers (bonding, team) do not have
87 * grp assigned to themselves. Grp is assigned to upper device
90 struct net_device *upper_dev;
92 upper_dev = netdev_master_upper_dev_get_rcu(dev);
94 return __vlan_find_dev_deep_rcu(upper_dev,
100 EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
102 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
104 struct net_device *ret = vlan_dev_priv(dev)->real_dev;
106 while (is_vlan_dev(ret))
107 ret = vlan_dev_priv(ret)->real_dev;
111 EXPORT_SYMBOL(vlan_dev_real_dev);
113 u16 vlan_dev_vlan_id(const struct net_device *dev)
115 return vlan_dev_priv(dev)->vlan_id;
117 EXPORT_SYMBOL(vlan_dev_vlan_id);
119 __be16 vlan_dev_vlan_proto(const struct net_device *dev)
121 return vlan_dev_priv(dev)->vlan_proto;
123 EXPORT_SYMBOL(vlan_dev_vlan_proto);
126 * vlan info and vid list
129 static void vlan_group_free(struct vlan_group *grp)
133 for (i = 0; i < VLAN_PROTO_NUM; i++)
134 for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
135 kfree(grp->vlan_devices_arrays[i][j]);
138 static void vlan_info_free(struct vlan_info *vlan_info)
140 vlan_group_free(&vlan_info->grp);
144 static void vlan_info_rcu_free(struct rcu_head *rcu)
146 vlan_info_free(container_of(rcu, struct vlan_info, rcu));
149 static struct vlan_info *vlan_info_alloc(struct net_device *dev)
151 struct vlan_info *vlan_info;
153 vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
157 vlan_info->real_dev = dev;
158 INIT_LIST_HEAD(&vlan_info->vid_list);
162 struct vlan_vid_info {
163 struct list_head list;
169 static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto)
171 if (proto == htons(ETH_P_8021Q) &&
172 dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
174 if (proto == htons(ETH_P_8021AD) &&
175 dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
180 static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
181 __be16 proto, u16 vid)
183 struct vlan_vid_info *vid_info;
185 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
186 if (vid_info->proto == proto && vid_info->vid == vid)
192 static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
194 struct vlan_vid_info *vid_info;
196 vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
199 vid_info->proto = proto;
205 static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
207 if (!vlan_hw_filter_capable(dev, proto))
210 if (netif_device_present(dev))
211 return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid);
216 static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
218 if (!vlan_hw_filter_capable(dev, proto))
221 if (netif_device_present(dev))
222 return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
227 int vlan_for_each(struct net_device *dev,
228 int (*action)(struct net_device *dev, int vid, void *arg),
231 struct vlan_vid_info *vid_info;
232 struct vlan_info *vlan_info;
233 struct net_device *vdev;
238 vlan_info = rtnl_dereference(dev->vlan_info);
242 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
243 vdev = vlan_group_get_device(&vlan_info->grp, vid_info->proto,
245 ret = action(vdev, vid_info->vid, arg);
252 EXPORT_SYMBOL(vlan_for_each);
254 int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto)
256 struct net_device *real_dev = vlan_info->real_dev;
257 struct vlan_vid_info *vlan_vid_info;
260 list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) {
261 if (vlan_vid_info->proto == proto) {
262 err = vlan_add_rx_filter_info(real_dev, proto,
272 list_for_each_entry_continue_reverse(vlan_vid_info,
273 &vlan_info->vid_list, list) {
274 if (vlan_vid_info->proto == proto)
275 vlan_kill_rx_filter_info(real_dev, proto,
281 EXPORT_SYMBOL(vlan_filter_push_vids);
283 void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto)
285 struct vlan_vid_info *vlan_vid_info;
287 list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list)
288 if (vlan_vid_info->proto == proto)
289 vlan_kill_rx_filter_info(vlan_info->real_dev,
290 vlan_vid_info->proto,
293 EXPORT_SYMBOL(vlan_filter_drop_vids);
295 static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
296 struct vlan_vid_info **pvid_info)
298 struct net_device *dev = vlan_info->real_dev;
299 struct vlan_vid_info *vid_info;
302 vid_info = vlan_vid_info_alloc(proto, vid);
306 err = vlan_add_rx_filter_info(dev, proto, vid);
312 list_add(&vid_info->list, &vlan_info->vid_list);
313 vlan_info->nr_vids++;
314 *pvid_info = vid_info;
318 int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
320 struct vlan_info *vlan_info;
321 struct vlan_vid_info *vid_info;
322 bool vlan_info_created = false;
327 vlan_info = rtnl_dereference(dev->vlan_info);
329 vlan_info = vlan_info_alloc(dev);
332 vlan_info_created = true;
334 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
336 err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
338 goto out_free_vlan_info;
340 vid_info->refcount++;
342 if (vlan_info_created)
343 rcu_assign_pointer(dev->vlan_info, vlan_info);
348 if (vlan_info_created)
352 EXPORT_SYMBOL(vlan_vid_add);
354 static void __vlan_vid_del(struct vlan_info *vlan_info,
355 struct vlan_vid_info *vid_info)
357 struct net_device *dev = vlan_info->real_dev;
358 __be16 proto = vid_info->proto;
359 u16 vid = vid_info->vid;
362 err = vlan_kill_rx_filter_info(dev, proto, vid);
363 if (err && dev->reg_state != NETREG_UNREGISTERING)
364 netdev_warn(dev, "failed to kill vid %04x/%d\n", proto, vid);
366 list_del(&vid_info->list);
368 vlan_info->nr_vids--;
371 void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
373 struct vlan_info *vlan_info;
374 struct vlan_vid_info *vid_info;
378 vlan_info = rtnl_dereference(dev->vlan_info);
382 vid_info = vlan_vid_info_get(vlan_info, proto, vid);
385 vid_info->refcount--;
386 if (vid_info->refcount == 0) {
387 __vlan_vid_del(vlan_info, vid_info);
388 if (vlan_info->nr_vids == 0) {
389 RCU_INIT_POINTER(dev->vlan_info, NULL);
390 call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
394 EXPORT_SYMBOL(vlan_vid_del);
396 int vlan_vids_add_by_dev(struct net_device *dev,
397 const struct net_device *by_dev)
399 struct vlan_vid_info *vid_info;
400 struct vlan_info *vlan_info;
405 vlan_info = rtnl_dereference(by_dev->vlan_info);
409 list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
410 err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
417 list_for_each_entry_continue_reverse(vid_info,
418 &vlan_info->vid_list,
420 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
425 EXPORT_SYMBOL(vlan_vids_add_by_dev);
427 void vlan_vids_del_by_dev(struct net_device *dev,
428 const struct net_device *by_dev)
430 struct vlan_vid_info *vid_info;
431 struct vlan_info *vlan_info;
435 vlan_info = rtnl_dereference(by_dev->vlan_info);
439 list_for_each_entry(vid_info, &vlan_info->vid_list, list)
440 vlan_vid_del(dev, vid_info->proto, vid_info->vid);
442 EXPORT_SYMBOL(vlan_vids_del_by_dev);
444 bool vlan_uses_dev(const struct net_device *dev)
446 struct vlan_info *vlan_info;
450 vlan_info = rtnl_dereference(dev->vlan_info);
453 return vlan_info->grp.nr_vlan_devs ? true : false;
455 EXPORT_SYMBOL(vlan_uses_dev);
457 static struct sk_buff *vlan_gro_receive(struct list_head *head,
460 const struct packet_offload *ptype;
461 unsigned int hlen, off_vlan;
462 struct sk_buff *pp = NULL;
463 struct vlan_hdr *vhdr;
468 off_vlan = skb_gro_offset(skb);
469 hlen = off_vlan + sizeof(*vhdr);
470 vhdr = skb_gro_header_fast(skb, off_vlan);
471 if (skb_gro_header_hard(skb, hlen)) {
472 vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
477 type = vhdr->h_vlan_encapsulated_proto;
480 ptype = gro_find_receive_by_type(type);
486 list_for_each_entry(p, head, list) {
487 struct vlan_hdr *vhdr2;
489 if (!NAPI_GRO_CB(p)->same_flow)
492 vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
493 if (compare_vlan_header(vhdr, vhdr2))
494 NAPI_GRO_CB(p)->same_flow = 0;
497 skb_gro_pull(skb, sizeof(*vhdr));
498 skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
500 pp = indirect_call_gro_receive_inet(ptype->callbacks.gro_receive,
501 ipv6_gro_receive, inet_gro_receive,
507 skb_gro_flush_final(skb, pp, flush);
512 static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
514 struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
515 __be16 type = vhdr->h_vlan_encapsulated_proto;
516 struct packet_offload *ptype;
520 ptype = gro_find_complete_by_type(type);
522 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
523 ipv6_gro_complete, inet_gro_complete,
524 skb, nhoff + sizeof(*vhdr));
530 static struct packet_offload vlan_packet_offloads[] __read_mostly = {
532 .type = cpu_to_be16(ETH_P_8021Q),
535 .gro_receive = vlan_gro_receive,
536 .gro_complete = vlan_gro_complete,
540 .type = cpu_to_be16(ETH_P_8021AD),
543 .gro_receive = vlan_gro_receive,
544 .gro_complete = vlan_gro_complete,
549 static int __init vlan_offload_init(void)
553 for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
554 dev_add_offload(&vlan_packet_offloads[i]);
559 fs_initcall(vlan_offload_init);