1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * net/dsa/dsa_priv.h - Hardware switch handling
4 * Copyright (c) 2008-2009 Marvell Semiconductor
10 #include <linux/if_bridge.h>
11 #include <linux/phy.h>
12 #include <linux/netdevice.h>
13 #include <linux/netpoll.h>
15 #include <net/gro_cells.h>
18 DSA_NOTIFIER_AGEING_TIME,
19 DSA_NOTIFIER_BRIDGE_JOIN,
20 DSA_NOTIFIER_BRIDGE_LEAVE,
23 DSA_NOTIFIER_HSR_JOIN,
24 DSA_NOTIFIER_HSR_LEAVE,
25 DSA_NOTIFIER_LAG_CHANGE,
26 DSA_NOTIFIER_LAG_JOIN,
27 DSA_NOTIFIER_LAG_LEAVE,
30 DSA_NOTIFIER_VLAN_ADD,
31 DSA_NOTIFIER_VLAN_DEL,
33 DSA_NOTIFIER_TAG_PROTO,
36 DSA_NOTIFIER_MRP_ADD_RING_ROLE,
37 DSA_NOTIFIER_MRP_DEL_RING_ROLE,
40 /* DSA_NOTIFIER_AGEING_TIME */
41 struct dsa_notifier_ageing_time_info {
42 unsigned int ageing_time;
45 /* DSA_NOTIFIER_BRIDGE_* */
46 struct dsa_notifier_bridge_info {
47 struct net_device *br;
53 /* DSA_NOTIFIER_FDB_* */
54 struct dsa_notifier_fdb_info {
57 const unsigned char *addr;
61 /* DSA_NOTIFIER_MDB_* */
62 struct dsa_notifier_mdb_info {
63 const struct switchdev_obj_port_mdb *mdb;
68 /* DSA_NOTIFIER_LAG_* */
69 struct dsa_notifier_lag_info {
70 struct net_device *lag;
74 struct netdev_lag_upper_info *info;
77 /* DSA_NOTIFIER_VLAN_* */
78 struct dsa_notifier_vlan_info {
79 const struct switchdev_obj_port_vlan *vlan;
82 struct netlink_ext_ack *extack;
85 /* DSA_NOTIFIER_MTU */
86 struct dsa_notifier_mtu_info {
87 bool propagate_upstream;
93 /* DSA_NOTIFIER_TAG_PROTO_* */
94 struct dsa_notifier_tag_proto_info {
95 const struct dsa_device_ops *tag_ops;
98 /* DSA_NOTIFIER_MRP_* */
99 struct dsa_notifier_mrp_info {
100 const struct switchdev_obj_mrp *mrp;
105 /* DSA_NOTIFIER_MRP_* */
106 struct dsa_notifier_mrp_ring_role_info {
107 const struct switchdev_obj_ring_role_mrp *mrp;
112 struct dsa_switchdev_event_work {
113 struct dsa_switch *ds;
115 struct work_struct work;
117 /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
118 * SWITCHDEV_FDB_DEL_TO_DEVICE
120 unsigned char addr[ETH_ALEN];
124 /* DSA_NOTIFIER_HSR_* */
125 struct dsa_notifier_hsr_info {
126 struct net_device *hsr;
131 struct dsa_slave_priv {
132 /* Copy of CPU port xmit for faster access in slave transmit hot path */
133 struct sk_buff * (*xmit)(struct sk_buff *skb,
134 struct net_device *dev);
136 struct gro_cells gcells;
138 /* DSA port data, such as switch, port index, etc. */
141 #ifdef CONFIG_NET_POLL_CONTROLLER
142 struct netpoll *netpoll;
146 struct list_head mall_tc_list;
150 const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
151 void dsa_tag_driver_put(const struct dsa_device_ops *ops);
152 const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
154 bool dsa_schedule_work(struct work_struct *work);
155 const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
158 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
159 void dsa_master_teardown(struct net_device *dev);
161 static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
162 int device, int port)
164 struct dsa_port *cpu_dp = dev->dsa_ptr;
165 struct dsa_switch_tree *dst = cpu_dp->dst;
168 list_for_each_entry(dp, &dst->ports, list)
169 if (dp->ds->index == device && dp->index == port &&
170 dp->type == DSA_PORT_TYPE_USER)
177 void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
178 const struct dsa_device_ops *tag_ops);
179 int dsa_port_set_state(struct dsa_port *dp, u8 state);
180 int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
181 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
182 void dsa_port_disable_rt(struct dsa_port *dp);
183 void dsa_port_disable(struct dsa_port *dp);
184 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
185 struct netlink_ext_ack *extack);
186 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
187 int dsa_port_lag_change(struct dsa_port *dp,
188 struct netdev_lag_lower_state_info *linfo);
189 int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
190 struct netdev_lag_upper_info *uinfo,
191 struct netlink_ext_ack *extack);
192 void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
193 int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
194 struct netlink_ext_ack *extack);
195 bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
196 int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
197 int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
198 bool propagate_upstream);
199 int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
201 int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
203 int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
204 int dsa_port_mdb_add(const struct dsa_port *dp,
205 const struct switchdev_obj_port_mdb *mdb);
206 int dsa_port_mdb_del(const struct dsa_port *dp,
207 const struct switchdev_obj_port_mdb *mdb);
208 int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
209 struct switchdev_brport_flags flags,
210 struct netlink_ext_ack *extack);
211 int dsa_port_bridge_flags(const struct dsa_port *dp,
212 struct switchdev_brport_flags flags,
213 struct netlink_ext_ack *extack);
214 int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
215 struct netlink_ext_ack *extack);
216 int dsa_port_vlan_add(struct dsa_port *dp,
217 const struct switchdev_obj_port_vlan *vlan,
218 struct netlink_ext_ack *extack);
219 int dsa_port_vlan_del(struct dsa_port *dp,
220 const struct switchdev_obj_port_vlan *vlan);
221 int dsa_port_mrp_add(const struct dsa_port *dp,
222 const struct switchdev_obj_mrp *mrp);
223 int dsa_port_mrp_del(const struct dsa_port *dp,
224 const struct switchdev_obj_mrp *mrp);
225 int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
226 const struct switchdev_obj_ring_role_mrp *mrp);
227 int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
228 const struct switchdev_obj_ring_role_mrp *mrp);
229 int dsa_port_link_register_of(struct dsa_port *dp);
230 void dsa_port_link_unregister_of(struct dsa_port *dp);
231 int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
232 void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
233 extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
235 static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
236 struct net_device *dev)
238 return dsa_port_to_bridge_port(dp) == dev;
241 static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
242 struct net_device *bridge_dev)
244 /* DSA ports connected to a bridge, and event was emitted
247 return dp->bridge_dev == bridge_dev;
250 /* Returns true if any port of this tree offloads the given net_device */
251 static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
252 struct net_device *dev)
256 list_for_each_entry(dp, &dst->ports, list)
257 if (dsa_port_offloads_bridge_port(dp, dev))
264 extern const struct dsa_device_ops notag_netdev_ops;
265 extern struct notifier_block dsa_slave_switchdev_notifier;
266 extern struct notifier_block dsa_slave_switchdev_blocking_notifier;
268 void dsa_slave_mii_bus_init(struct dsa_switch *ds);
269 int dsa_slave_create(struct dsa_port *dp);
270 void dsa_slave_destroy(struct net_device *slave_dev);
271 int dsa_slave_suspend(struct net_device *slave_dev);
272 int dsa_slave_resume(struct net_device *slave_dev);
273 int dsa_slave_register_notifier(void);
274 void dsa_slave_unregister_notifier(void);
275 void dsa_slave_setup_tagger(struct net_device *slave);
276 int dsa_slave_change_mtu(struct net_device *dev, int new_mtu);
278 static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
280 struct dsa_slave_priv *p = netdev_priv(dev);
285 static inline struct net_device *
286 dsa_slave_to_master(const struct net_device *dev)
288 struct dsa_port *dp = dsa_slave_to_port(dev);
290 return dp->cpu_dp->master;
293 /* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
294 * frames as untagged, since the bridge will not untag them.
296 static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
298 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
299 struct net_device *br = dp->bridge_dev;
300 struct net_device *dev = skb->dev;
301 struct net_device *upper_dev;
302 u16 vid, pvid, proto;
305 if (!br || br_vlan_enabled(br))
308 err = br_vlan_get_proto(br, &proto);
312 /* Move VLAN tag from data to hwaccel */
313 if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
314 skb = skb_vlan_untag(skb);
319 if (!skb_vlan_tag_present(skb))
322 vid = skb_vlan_tag_get_id(skb);
324 /* We already run under an RCU read-side critical section since
325 * we are called from netif_receive_skb_list_internal().
327 err = br_vlan_get_pvid_rcu(dev, &pvid);
334 /* The sad part about attempting to untag from DSA is that we
335 * don't know, unless we check, if the skb will end up in
336 * the bridge's data path - br_allowed_ingress() - or not.
337 * For example, there might be an 8021q upper for the
338 * default_pvid of the bridge, which will steal VLAN-tagged traffic
339 * from the bridge's data path. This is a configuration that DSA
340 * supports because vlan_filtering is 0. In that case, we should
341 * definitely keep the tag, to make sure it keeps working.
343 upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
347 __vlan_hwaccel_clear_tag(skb);
353 int dsa_switch_register_notifier(struct dsa_switch *ds);
354 void dsa_switch_unregister_notifier(struct dsa_switch *ds);
357 void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
358 void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
359 int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
360 int dsa_broadcast(unsigned long e, void *v);
361 int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
362 struct net_device *master,
363 const struct dsa_device_ops *tag_ops,
364 const struct dsa_device_ops *old_tag_ops);
366 extern struct list_head dsa_tree_list;