af_unix: Call kfree_skb() for dead unix_(sk)->oob_skb in GC.
[platform/kernel/linux-starfive.git] / net / dsa / tag.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * DSA tagging protocol handling
4  *
5  * Copyright (c) 2008-2009 Marvell Semiconductor
6  * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
7  * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
8  */
9
10 #include <linux/netdevice.h>
11 #include <linux/ptp_classify.h>
12 #include <linux/skbuff.h>
13 #include <net/dsa.h>
14 #include <net/dst_metadata.h>
15
16 #include "slave.h"
17 #include "tag.h"
18
19 static LIST_HEAD(dsa_tag_drivers_list);
20 static DEFINE_MUTEX(dsa_tag_drivers_lock);
21
22 /* Determine if we should defer delivery of skb until we have a rx timestamp.
23  *
24  * Called from dsa_switch_rcv. For now, this will only work if tagging is
25  * enabled on the switch. Normally the MAC driver would retrieve the hardware
26  * timestamp when it reads the packet out of the hardware. However in a DSA
27  * switch, the DSA driver owning the interface to which the packet is
28  * delivered is never notified unless we do so here.
29  */
30 static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p,
31                                        struct sk_buff *skb)
32 {
33         struct dsa_switch *ds = p->dp->ds;
34         unsigned int type;
35
36         if (!ds->ops->port_rxtstamp)
37                 return false;
38
39         if (skb_headroom(skb) < ETH_HLEN)
40                 return false;
41
42         __skb_push(skb, ETH_HLEN);
43
44         type = ptp_classify_raw(skb);
45
46         __skb_pull(skb, ETH_HLEN);
47
48         if (type == PTP_CLASS_NONE)
49                 return false;
50
51         return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type);
52 }
53
54 static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
55                           struct packet_type *pt, struct net_device *unused)
56 {
57         struct metadata_dst *md_dst = skb_metadata_dst(skb);
58         struct dsa_port *cpu_dp = dev->dsa_ptr;
59         struct sk_buff *nskb = NULL;
60         struct dsa_slave_priv *p;
61
62         if (unlikely(!cpu_dp)) {
63                 kfree_skb(skb);
64                 return 0;
65         }
66
67         skb = skb_unshare(skb, GFP_ATOMIC);
68         if (!skb)
69                 return 0;
70
71         if (md_dst && md_dst->type == METADATA_HW_PORT_MUX) {
72                 unsigned int port = md_dst->u.port_info.port_id;
73
74                 skb_dst_drop(skb);
75                 if (!skb_has_extensions(skb))
76                         skb->slow_gro = 0;
77
78                 skb->dev = dsa_master_find_slave(dev, 0, port);
79                 if (likely(skb->dev)) {
80                         dsa_default_offload_fwd_mark(skb);
81                         nskb = skb;
82                 }
83         } else {
84                 nskb = cpu_dp->rcv(skb, dev);
85         }
86
87         if (!nskb) {
88                 kfree_skb(skb);
89                 return 0;
90         }
91
92         skb = nskb;
93         skb_push(skb, ETH_HLEN);
94         skb->pkt_type = PACKET_HOST;
95         skb->protocol = eth_type_trans(skb, skb->dev);
96
97         if (unlikely(!dsa_slave_dev_check(skb->dev))) {
98                 /* Packet is to be injected directly on an upper
99                  * device, e.g. a team/bond, so skip all DSA-port
100                  * specific actions.
101                  */
102                 netif_rx(skb);
103                 return 0;
104         }
105
106         p = netdev_priv(skb->dev);
107
108         if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
109                 nskb = dsa_untag_bridge_pvid(skb);
110                 if (!nskb) {
111                         kfree_skb(skb);
112                         return 0;
113                 }
114                 skb = nskb;
115         }
116
117         dev_sw_netstats_rx_add(skb->dev, skb->len + ETH_HLEN);
118
119         if (dsa_skb_defer_rx_timestamp(p, skb))
120                 return 0;
121
122         gro_cells_receive(&p->gcells, skb);
123
124         return 0;
125 }
126
127 struct packet_type dsa_pack_type __read_mostly = {
128         .type   = cpu_to_be16(ETH_P_XDSA),
129         .func   = dsa_switch_rcv,
130 };
131
132 static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver,
133                                     struct module *owner)
134 {
135         dsa_tag_driver->owner = owner;
136
137         mutex_lock(&dsa_tag_drivers_lock);
138         list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list);
139         mutex_unlock(&dsa_tag_drivers_lock);
140 }
141
142 void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
143                               unsigned int count, struct module *owner)
144 {
145         unsigned int i;
146
147         for (i = 0; i < count; i++)
148                 dsa_tag_driver_register(dsa_tag_driver_array[i], owner);
149 }
150
151 static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver)
152 {
153         mutex_lock(&dsa_tag_drivers_lock);
154         list_del(&dsa_tag_driver->list);
155         mutex_unlock(&dsa_tag_drivers_lock);
156 }
157 EXPORT_SYMBOL_GPL(dsa_tag_drivers_register);
158
159 void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
160                                 unsigned int count)
161 {
162         unsigned int i;
163
164         for (i = 0; i < count; i++)
165                 dsa_tag_driver_unregister(dsa_tag_driver_array[i]);
166 }
167 EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister);
168
169 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops)
170 {
171         return ops->name;
172 };
173
174 /* Function takes a reference on the module owning the tagger,
175  * so dsa_tag_driver_put must be called afterwards.
176  */
177 const struct dsa_device_ops *dsa_tag_driver_get_by_name(const char *name)
178 {
179         const struct dsa_device_ops *ops = ERR_PTR(-ENOPROTOOPT);
180         struct dsa_tag_driver *dsa_tag_driver;
181
182         request_module("%s%s", DSA_TAG_DRIVER_ALIAS, name);
183
184         mutex_lock(&dsa_tag_drivers_lock);
185         list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
186                 const struct dsa_device_ops *tmp = dsa_tag_driver->ops;
187
188                 if (strcmp(name, tmp->name))
189                         continue;
190
191                 if (!try_module_get(dsa_tag_driver->owner))
192                         break;
193
194                 ops = tmp;
195                 break;
196         }
197         mutex_unlock(&dsa_tag_drivers_lock);
198
199         return ops;
200 }
201
202 const struct dsa_device_ops *dsa_tag_driver_get_by_id(int tag_protocol)
203 {
204         struct dsa_tag_driver *dsa_tag_driver;
205         const struct dsa_device_ops *ops;
206         bool found = false;
207
208         request_module("%sid-%d", DSA_TAG_DRIVER_ALIAS, tag_protocol);
209
210         mutex_lock(&dsa_tag_drivers_lock);
211         list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
212                 ops = dsa_tag_driver->ops;
213                 if (ops->proto == tag_protocol) {
214                         found = true;
215                         break;
216                 }
217         }
218
219         if (found) {
220                 if (!try_module_get(dsa_tag_driver->owner))
221                         ops = ERR_PTR(-ENOPROTOOPT);
222         } else {
223                 ops = ERR_PTR(-ENOPROTOOPT);
224         }
225
226         mutex_unlock(&dsa_tag_drivers_lock);
227
228         return ops;
229 }
230
231 void dsa_tag_driver_put(const struct dsa_device_ops *ops)
232 {
233         struct dsa_tag_driver *dsa_tag_driver;
234
235         mutex_lock(&dsa_tag_drivers_lock);
236         list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) {
237                 if (dsa_tag_driver->ops == ops) {
238                         module_put(dsa_tag_driver->owner);
239                         break;
240                 }
241         }
242         mutex_unlock(&dsa_tag_drivers_lock);
243 }