1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2011, Intel Corporation.
5 * Description: Data Center Bridging netlink interface
6 * Author: Lucy Liu <lucy.liu@intel.com>
9 #include <linux/netdevice.h>
10 #include <linux/netlink.h>
11 #include <linux/slab.h>
12 #include <net/netlink.h>
13 #include <net/rtnetlink.h>
14 #include <linux/dcbnl.h>
15 #include <net/dcbevent.h>
16 #include <linux/rtnetlink.h>
17 #include <linux/init.h>
20 /* Data Center Bridging (DCB) is a collection of Ethernet enhancements
21 * intended to allow network traffic with differing requirements
22 * (highly reliable, no drops vs. best effort vs. low latency) to operate
23 * and co-exist on Ethernet. Current DCB features are:
25 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
26 * framework for assigning bandwidth guarantees to traffic classes.
28 * Priority-based Flow Control (PFC) - provides a flow control mechanism which
29 * can work independently for each 802.1p priority.
31 * Congestion Notification - provides a mechanism for end-to-end congestion
32 * control for protocols which do not have built-in congestion management.
34 * More information about the emerging standards for these Ethernet features
35 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
37 * This file implements an rtnetlink interface to allow configuration of DCB
38 * features for capable devices.
41 /**************** DCB attribute policies *************************************/
43 /* DCB netlink attributes policy */
44 static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
45 [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
46 [DCB_ATTR_STATE] = {.type = NLA_U8},
47 [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
48 [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
49 [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
50 [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
51 [DCB_ATTR_CAP] = {.type = NLA_NESTED},
52 [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
53 [DCB_ATTR_BCN] = {.type = NLA_NESTED},
54 [DCB_ATTR_APP] = {.type = NLA_NESTED},
55 [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
56 [DCB_ATTR_DCBX] = {.type = NLA_U8},
57 [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
60 /* DCB priority flow control to User Priority nested attributes */
61 static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
62 [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
63 [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
64 [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
65 [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
66 [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
67 [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
68 [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
69 [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
70 [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
73 /* DCB priority grouping nested attributes */
74 static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
75 [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
76 [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
77 [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
78 [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
79 [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
80 [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
81 [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
82 [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
83 [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
84 [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
85 [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
86 [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
87 [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
88 [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
89 [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
90 [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
91 [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
92 [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
95 /* DCB traffic class nested attributes. */
96 static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
97 [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
98 [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
99 [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
100 [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
101 [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
104 /* DCB capabilities nested attributes. */
105 static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
106 [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
107 [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
108 [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
109 [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
110 [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
111 [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
112 [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
113 [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
114 [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
117 /* DCB capabilities nested attributes. */
118 static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
119 [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
120 [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
121 [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
124 /* DCB BCN nested attributes. */
125 static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
126 [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
127 [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
128 [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
129 [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
130 [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
131 [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
132 [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
133 [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
134 [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
135 [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
136 [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
137 [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
138 [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
139 [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
140 [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
141 [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
142 [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
143 [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
144 [DCB_BCN_ATTR_W] = {.type = NLA_U32},
145 [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
146 [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
147 [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
148 [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
149 [DCB_BCN_ATTR_C] = {.type = NLA_U32},
150 [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
153 /* DCB APP nested attributes. */
154 static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
155 [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
156 [DCB_APP_ATTR_ID] = {.type = NLA_U16},
157 [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
160 /* IEEE 802.1Qaz nested attributes. */
161 static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
162 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
163 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
164 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
165 [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
166 [DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)},
167 [DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)},
168 [DCB_ATTR_DCB_BUFFER] = {.len = sizeof(struct dcbnl_buffer)},
169 [DCB_ATTR_DCB_APP_TRUST_TABLE] = {.type = NLA_NESTED},
172 /* DCB number of traffic classes nested attributes. */
173 static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
174 [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
175 [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
176 [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
177 [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
180 static LIST_HEAD(dcb_app_list);
181 static LIST_HEAD(dcb_rewr_list);
182 static DEFINE_SPINLOCK(dcb_lock);
184 static enum ieee_attrs_app dcbnl_app_attr_type_get(u8 selector)
187 case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
188 case IEEE_8021QAZ_APP_SEL_STREAM:
189 case IEEE_8021QAZ_APP_SEL_DGRAM:
190 case IEEE_8021QAZ_APP_SEL_ANY:
191 case IEEE_8021QAZ_APP_SEL_DSCP:
192 return DCB_ATTR_IEEE_APP;
193 case DCB_APP_SEL_PCP:
194 return DCB_ATTR_DCB_APP;
196 return DCB_ATTR_IEEE_APP_UNSPEC;
200 static bool dcbnl_app_attr_type_validate(enum ieee_attrs_app type)
203 case DCB_ATTR_IEEE_APP:
204 case DCB_ATTR_DCB_APP:
211 static bool dcbnl_app_selector_validate(enum ieee_attrs_app type, u8 selector)
213 return dcbnl_app_attr_type_get(selector) == type;
216 static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
217 u32 flags, struct nlmsghdr **nlhp)
221 struct nlmsghdr *nlh;
223 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
227 nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
230 dcb = nlmsg_data(nlh);
231 dcb->dcb_family = AF_UNSPEC;
241 static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
242 u32 seq, struct nlattr **tb, struct sk_buff *skb)
244 /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
245 if (!netdev->dcbnl_ops->getstate)
248 return nla_put_u8(skb, DCB_ATTR_STATE,
249 netdev->dcbnl_ops->getstate(netdev));
252 static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
253 u32 seq, struct nlattr **tb, struct sk_buff *skb)
255 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
261 if (!tb[DCB_ATTR_PFC_CFG])
264 if (!netdev->dcbnl_ops->getpfccfg)
267 ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
268 tb[DCB_ATTR_PFC_CFG],
269 dcbnl_pfc_up_nest, NULL);
273 nest = nla_nest_start_noflag(skb, DCB_ATTR_PFC_CFG);
277 if (data[DCB_PFC_UP_ATTR_ALL])
280 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
281 if (!getall && !data[i])
284 netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
286 ret = nla_put_u8(skb, i, value);
288 nla_nest_cancel(skb, nest);
292 nla_nest_end(skb, nest);
297 static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
298 u32 seq, struct nlattr **tb, struct sk_buff *skb)
300 u8 perm_addr[MAX_ADDR_LEN];
302 if (!netdev->dcbnl_ops->getpermhwaddr)
305 memset(perm_addr, 0, sizeof(perm_addr));
306 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
308 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
311 static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
312 u32 seq, struct nlattr **tb, struct sk_buff *skb)
314 struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
320 if (!tb[DCB_ATTR_CAP])
323 if (!netdev->dcbnl_ops->getcap)
326 ret = nla_parse_nested_deprecated(data, DCB_CAP_ATTR_MAX,
327 tb[DCB_ATTR_CAP], dcbnl_cap_nest,
332 nest = nla_nest_start_noflag(skb, DCB_ATTR_CAP);
336 if (data[DCB_CAP_ATTR_ALL])
339 for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
340 if (!getall && !data[i])
343 if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
344 ret = nla_put_u8(skb, i, value);
346 nla_nest_cancel(skb, nest);
351 nla_nest_end(skb, nest);
356 static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
357 u32 seq, struct nlattr **tb, struct sk_buff *skb)
359 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
365 if (!tb[DCB_ATTR_NUMTCS])
368 if (!netdev->dcbnl_ops->getnumtcs)
371 ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
373 dcbnl_numtcs_nest, NULL);
377 nest = nla_nest_start_noflag(skb, DCB_ATTR_NUMTCS);
381 if (data[DCB_NUMTCS_ATTR_ALL])
384 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
385 if (!getall && !data[i])
388 ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
390 ret = nla_put_u8(skb, i, value);
392 nla_nest_cancel(skb, nest);
398 nla_nest_end(skb, nest);
403 static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
404 u32 seq, struct nlattr **tb, struct sk_buff *skb)
406 struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
411 if (!tb[DCB_ATTR_NUMTCS])
414 if (!netdev->dcbnl_ops->setnumtcs)
417 ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
419 dcbnl_numtcs_nest, NULL);
423 for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
427 value = nla_get_u8(data[i]);
429 ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
434 return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
437 static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
438 u32 seq, struct nlattr **tb, struct sk_buff *skb)
440 if (!netdev->dcbnl_ops->getpfcstate)
443 return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
444 netdev->dcbnl_ops->getpfcstate(netdev));
447 static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
448 u32 seq, struct nlattr **tb, struct sk_buff *skb)
452 if (!tb[DCB_ATTR_PFC_STATE])
455 if (!netdev->dcbnl_ops->setpfcstate)
458 value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
460 netdev->dcbnl_ops->setpfcstate(netdev, value);
462 return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
465 static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
466 u32 seq, struct nlattr **tb, struct sk_buff *skb)
468 struct nlattr *app_nest;
469 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
474 if (!tb[DCB_ATTR_APP])
477 ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
478 tb[DCB_ATTR_APP], dcbnl_app_nest,
483 /* all must be non-null */
484 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
485 (!app_tb[DCB_APP_ATTR_ID]))
488 /* either by eth type or by socket number */
489 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
490 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
491 (idtype != DCB_APP_IDTYPE_PORTNUM))
494 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
496 if (netdev->dcbnl_ops->getapp) {
497 ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
503 struct dcb_app app = {
507 up = dcb_getapp(netdev, &app);
510 app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP);
514 ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
518 ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
522 ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
526 nla_nest_end(skb, app_nest);
531 nla_nest_cancel(skb, app_nest);
535 static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
536 u32 seq, struct nlattr **tb, struct sk_buff *skb)
541 struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
543 if (!tb[DCB_ATTR_APP])
546 ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
547 tb[DCB_ATTR_APP], dcbnl_app_nest,
552 /* all must be non-null */
553 if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
554 (!app_tb[DCB_APP_ATTR_ID]) ||
555 (!app_tb[DCB_APP_ATTR_PRIORITY]))
558 /* either by eth type or by socket number */
559 idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
560 if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
561 (idtype != DCB_APP_IDTYPE_PORTNUM))
564 id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
565 up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
567 if (netdev->dcbnl_ops->setapp) {
568 ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
573 app.selector = idtype;
576 ret = dcb_setapp(netdev, &app);
579 ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
580 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
585 static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
586 struct nlattr **tb, struct sk_buff *skb, int dir)
588 struct nlattr *pg_nest, *param_nest, *data;
589 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
590 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
591 u8 prio, pgid, tc_pct, up_map;
596 if (!tb[DCB_ATTR_PG_CFG])
599 if (!netdev->dcbnl_ops->getpgtccfgtx ||
600 !netdev->dcbnl_ops->getpgtccfgrx ||
601 !netdev->dcbnl_ops->getpgbwgcfgtx ||
602 !netdev->dcbnl_ops->getpgbwgcfgrx)
605 ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
606 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
611 pg_nest = nla_nest_start_noflag(skb, DCB_ATTR_PG_CFG);
615 if (pg_tb[DCB_PG_ATTR_TC_ALL])
618 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
619 if (!getall && !pg_tb[i])
622 if (pg_tb[DCB_PG_ATTR_TC_ALL])
623 data = pg_tb[DCB_PG_ATTR_TC_ALL];
626 ret = nla_parse_nested_deprecated(param_tb,
627 DCB_TC_ATTR_PARAM_MAX, data,
628 dcbnl_tc_param_nest, NULL);
632 param_nest = nla_nest_start_noflag(skb, i);
636 pgid = DCB_ATTR_VALUE_UNDEFINED;
637 prio = DCB_ATTR_VALUE_UNDEFINED;
638 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
639 up_map = DCB_ATTR_VALUE_UNDEFINED;
643 netdev->dcbnl_ops->getpgtccfgrx(netdev,
644 i - DCB_PG_ATTR_TC_0, &prio,
645 &pgid, &tc_pct, &up_map);
648 netdev->dcbnl_ops->getpgtccfgtx(netdev,
649 i - DCB_PG_ATTR_TC_0, &prio,
650 &pgid, &tc_pct, &up_map);
653 if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
654 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
655 ret = nla_put_u8(skb,
656 DCB_TC_ATTR_PARAM_PGID, pgid);
660 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
661 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
662 ret = nla_put_u8(skb,
663 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
667 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
668 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
669 ret = nla_put_u8(skb,
670 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
674 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
675 param_tb[DCB_TC_ATTR_PARAM_ALL]) {
676 ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
681 nla_nest_end(skb, param_nest);
684 if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
689 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
690 if (!getall && !pg_tb[i])
693 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
697 netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
698 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
701 netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
702 i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
704 ret = nla_put_u8(skb, i, tc_pct);
709 nla_nest_end(skb, pg_nest);
714 nla_nest_cancel(skb, param_nest);
716 nla_nest_cancel(skb, pg_nest);
721 static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
722 u32 seq, struct nlattr **tb, struct sk_buff *skb)
724 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
727 static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
728 u32 seq, struct nlattr **tb, struct sk_buff *skb)
730 return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
733 static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
734 u32 seq, struct nlattr **tb, struct sk_buff *skb)
738 if (!tb[DCB_ATTR_STATE])
741 if (!netdev->dcbnl_ops->setstate)
744 value = nla_get_u8(tb[DCB_ATTR_STATE]);
746 return nla_put_u8(skb, DCB_ATTR_STATE,
747 netdev->dcbnl_ops->setstate(netdev, value));
750 static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
751 u32 seq, struct nlattr **tb, struct sk_buff *skb)
753 struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
758 if (!tb[DCB_ATTR_PFC_CFG])
761 if (!netdev->dcbnl_ops->setpfccfg)
764 ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
765 tb[DCB_ATTR_PFC_CFG],
766 dcbnl_pfc_up_nest, NULL);
770 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
773 value = nla_get_u8(data[i]);
774 netdev->dcbnl_ops->setpfccfg(netdev,
775 data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
778 return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
781 static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
782 u32 seq, struct nlattr **tb, struct sk_buff *skb)
786 if (!tb[DCB_ATTR_SET_ALL])
789 if (!netdev->dcbnl_ops->setall)
792 ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
793 netdev->dcbnl_ops->setall(netdev));
794 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
799 static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
800 u32 seq, struct nlattr **tb, struct sk_buff *skb,
803 struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
804 struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
812 if (!tb[DCB_ATTR_PG_CFG])
815 if (!netdev->dcbnl_ops->setpgtccfgtx ||
816 !netdev->dcbnl_ops->setpgtccfgrx ||
817 !netdev->dcbnl_ops->setpgbwgcfgtx ||
818 !netdev->dcbnl_ops->setpgbwgcfgrx)
821 ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
822 tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
827 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
831 ret = nla_parse_nested_deprecated(param_tb,
832 DCB_TC_ATTR_PARAM_MAX,
834 dcbnl_tc_param_nest, NULL);
838 pgid = DCB_ATTR_VALUE_UNDEFINED;
839 prio = DCB_ATTR_VALUE_UNDEFINED;
840 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
841 up_map = DCB_ATTR_VALUE_UNDEFINED;
843 if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
845 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
847 if (param_tb[DCB_TC_ATTR_PARAM_PGID])
848 pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
850 if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
851 tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
853 if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
855 nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
857 /* dir: Tx = 0, Rx = 1 */
860 netdev->dcbnl_ops->setpgtccfgrx(netdev,
861 i - DCB_PG_ATTR_TC_0,
862 prio, pgid, tc_pct, up_map);
865 netdev->dcbnl_ops->setpgtccfgtx(netdev,
866 i - DCB_PG_ATTR_TC_0,
867 prio, pgid, tc_pct, up_map);
871 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
875 tc_pct = nla_get_u8(pg_tb[i]);
877 /* dir: Tx = 0, Rx = 1 */
880 netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
881 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
884 netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
885 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
889 return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
892 static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
893 u32 seq, struct nlattr **tb, struct sk_buff *skb)
895 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
898 static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
899 u32 seq, struct nlattr **tb, struct sk_buff *skb)
901 return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
904 static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
905 u32 seq, struct nlattr **tb, struct sk_buff *skb)
907 struct nlattr *bcn_nest;
908 struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
915 if (!tb[DCB_ATTR_BCN])
918 if (!netdev->dcbnl_ops->getbcnrp ||
919 !netdev->dcbnl_ops->getbcncfg)
922 ret = nla_parse_nested_deprecated(bcn_tb, DCB_BCN_ATTR_MAX,
923 tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
928 bcn_nest = nla_nest_start_noflag(skb, DCB_ATTR_BCN);
932 if (bcn_tb[DCB_BCN_ATTR_ALL])
935 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
936 if (!getall && !bcn_tb[i])
939 netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
941 ret = nla_put_u8(skb, i, value_byte);
946 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
947 if (!getall && !bcn_tb[i])
950 netdev->dcbnl_ops->getbcncfg(netdev, i,
952 ret = nla_put_u32(skb, i, value_integer);
957 nla_nest_end(skb, bcn_nest);
962 nla_nest_cancel(skb, bcn_nest);
966 static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
967 u32 seq, struct nlattr **tb, struct sk_buff *skb)
969 struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
975 if (!tb[DCB_ATTR_BCN])
978 if (!netdev->dcbnl_ops->setbcncfg ||
979 !netdev->dcbnl_ops->setbcnrp)
982 ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX,
983 tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
988 for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
991 value_byte = nla_get_u8(data[i]);
992 netdev->dcbnl_ops->setbcnrp(netdev,
993 data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
996 for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
999 value_int = nla_get_u32(data[i]);
1000 netdev->dcbnl_ops->setbcncfg(netdev,
1004 return nla_put_u8(skb, DCB_ATTR_BCN, 0);
1007 static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
1008 int app_nested_type, int app_info_type,
1011 struct dcb_peer_app_info info;
1012 struct dcb_app *table = NULL;
1013 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1019 * retrieve the peer app configuration form the driver. If the driver
1020 * handlers fail exit without doing anything
1022 err = ops->peer_getappinfo(netdev, &info, &app_count);
1023 if (!err && app_count) {
1024 table = kmalloc_array(app_count, sizeof(struct dcb_app),
1029 err = ops->peer_getapptable(netdev, table);
1037 * build the message, from here on the only possible failure
1038 * is due to the skb size
1042 app = nla_nest_start_noflag(skb, app_nested_type);
1044 goto nla_put_failure;
1046 if (app_info_type &&
1047 nla_put(skb, app_info_type, sizeof(info), &info))
1048 goto nla_put_failure;
1050 for (i = 0; i < app_count; i++) {
1051 if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
1053 goto nla_put_failure;
1055 nla_nest_end(skb, app);
1064 static int dcbnl_getapptrust(struct net_device *netdev, struct sk_buff *skb)
1066 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1067 enum ieee_attrs_app type;
1068 struct nlattr *apptrust;
1069 int nselectors, err, i;
1072 selectors = kzalloc(IEEE_8021QAZ_APP_SEL_MAX + 1, GFP_KERNEL);
1076 err = ops->dcbnl_getapptrust(netdev, selectors, &nselectors);
1082 apptrust = nla_nest_start(skb, DCB_ATTR_DCB_APP_TRUST_TABLE);
1088 for (i = 0; i < nselectors; i++) {
1089 type = dcbnl_app_attr_type_get(selectors[i]);
1090 err = nla_put_u8(skb, type, selectors[i]);
1092 nla_nest_cancel(skb, apptrust);
1096 nla_nest_end(skb, apptrust);
1103 /* Set or delete APP table or rewrite table entries. The APP struct is validated
1104 * and the appropriate callback function is called.
1106 static int dcbnl_app_table_setdel(struct nlattr *attr,
1107 struct net_device *netdev,
1108 int (*setdel)(struct net_device *dev,
1109 struct dcb_app *app))
1111 struct dcb_app *app_data;
1112 enum ieee_attrs_app type;
1113 struct nlattr *attr_itr;
1116 nla_for_each_nested(attr_itr, attr, rem) {
1117 type = nla_type(attr_itr);
1119 if (!dcbnl_app_attr_type_validate(type))
1122 if (nla_len(attr_itr) < sizeof(struct dcb_app))
1125 app_data = nla_data(attr_itr);
1127 if (!dcbnl_app_selector_validate(type, app_data->selector))
1130 err = setdel(netdev, app_data);
1138 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
1139 static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1141 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1142 struct nlattr *ieee, *app, *rewr;
1143 struct dcb_app_type *itr;
1147 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1150 ieee = nla_nest_start_noflag(skb, DCB_ATTR_IEEE);
1154 if (ops->ieee_getets) {
1155 struct ieee_ets ets;
1156 memset(&ets, 0, sizeof(ets));
1157 err = ops->ieee_getets(netdev, &ets);
1159 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1163 if (ops->ieee_getmaxrate) {
1164 struct ieee_maxrate maxrate;
1165 memset(&maxrate, 0, sizeof(maxrate));
1166 err = ops->ieee_getmaxrate(netdev, &maxrate);
1168 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
1169 sizeof(maxrate), &maxrate);
1175 if (ops->ieee_getqcn) {
1176 struct ieee_qcn qcn;
1178 memset(&qcn, 0, sizeof(qcn));
1179 err = ops->ieee_getqcn(netdev, &qcn);
1181 err = nla_put(skb, DCB_ATTR_IEEE_QCN,
1188 if (ops->ieee_getqcnstats) {
1189 struct ieee_qcn_stats qcn_stats;
1191 memset(&qcn_stats, 0, sizeof(qcn_stats));
1192 err = ops->ieee_getqcnstats(netdev, &qcn_stats);
1194 err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
1195 sizeof(qcn_stats), &qcn_stats);
1201 if (ops->ieee_getpfc) {
1202 struct ieee_pfc pfc;
1203 memset(&pfc, 0, sizeof(pfc));
1204 err = ops->ieee_getpfc(netdev, &pfc);
1206 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1210 if (ops->dcbnl_getbuffer) {
1211 struct dcbnl_buffer buffer;
1213 memset(&buffer, 0, sizeof(buffer));
1214 err = ops->dcbnl_getbuffer(netdev, &buffer);
1216 nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer))
1220 app = nla_nest_start_noflag(skb, DCB_ATTR_IEEE_APP_TABLE);
1224 spin_lock_bh(&dcb_lock);
1225 list_for_each_entry(itr, &dcb_app_list, list) {
1226 if (itr->ifindex == netdev->ifindex) {
1227 enum ieee_attrs_app type =
1228 dcbnl_app_attr_type_get(itr->app.selector);
1229 err = nla_put(skb, type, sizeof(itr->app), &itr->app);
1231 spin_unlock_bh(&dcb_lock);
1237 if (netdev->dcbnl_ops->getdcbx)
1238 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1242 spin_unlock_bh(&dcb_lock);
1243 nla_nest_end(skb, app);
1245 rewr = nla_nest_start(skb, DCB_ATTR_DCB_REWR_TABLE);
1249 spin_lock_bh(&dcb_lock);
1250 list_for_each_entry(itr, &dcb_rewr_list, list) {
1251 if (itr->ifindex == netdev->ifindex) {
1252 enum ieee_attrs_app type =
1253 dcbnl_app_attr_type_get(itr->app.selector);
1254 err = nla_put(skb, type, sizeof(itr->app), &itr->app);
1256 spin_unlock_bh(&dcb_lock);
1257 nla_nest_cancel(skb, rewr);
1263 spin_unlock_bh(&dcb_lock);
1264 nla_nest_end(skb, rewr);
1266 if (ops->dcbnl_getapptrust) {
1267 err = dcbnl_getapptrust(netdev, skb);
1272 /* get peer info if available */
1273 if (ops->ieee_peer_getets) {
1274 struct ieee_ets ets;
1275 memset(&ets, 0, sizeof(ets));
1276 err = ops->ieee_peer_getets(netdev, &ets);
1278 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1282 if (ops->ieee_peer_getpfc) {
1283 struct ieee_pfc pfc;
1284 memset(&pfc, 0, sizeof(pfc));
1285 err = ops->ieee_peer_getpfc(netdev, &pfc);
1287 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1291 if (ops->peer_getappinfo && ops->peer_getapptable) {
1292 err = dcbnl_build_peer_app(netdev, skb,
1293 DCB_ATTR_IEEE_PEER_APP,
1294 DCB_ATTR_IEEE_APP_UNSPEC,
1300 nla_nest_end(skb, ieee);
1302 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1310 static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1313 u8 pgid, up_map, prio, tc_pct;
1314 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1315 int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1316 struct nlattr *pg = nla_nest_start_noflag(skb, i);
1321 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1322 struct nlattr *tc_nest = nla_nest_start_noflag(skb, i);
1327 pgid = DCB_ATTR_VALUE_UNDEFINED;
1328 prio = DCB_ATTR_VALUE_UNDEFINED;
1329 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1330 up_map = DCB_ATTR_VALUE_UNDEFINED;
1333 ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1334 &prio, &pgid, &tc_pct, &up_map);
1336 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1337 &prio, &pgid, &tc_pct, &up_map);
1339 if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
1340 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1341 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1342 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1344 nla_nest_end(skb, tc_nest);
1347 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1348 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1351 ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1354 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1356 if (nla_put_u8(skb, i, tc_pct))
1359 nla_nest_end(skb, pg);
1363 static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1365 struct nlattr *cee, *app;
1366 struct dcb_app_type *itr;
1367 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1368 int dcbx, i, err = -EMSGSIZE;
1371 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1372 goto nla_put_failure;
1373 cee = nla_nest_start_noflag(skb, DCB_ATTR_CEE);
1375 goto nla_put_failure;
1378 if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1379 err = dcbnl_cee_pg_fill(skb, netdev, 1);
1381 goto nla_put_failure;
1384 if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1385 err = dcbnl_cee_pg_fill(skb, netdev, 0);
1387 goto nla_put_failure;
1391 if (ops->getpfccfg) {
1392 struct nlattr *pfc_nest = nla_nest_start_noflag(skb,
1396 goto nla_put_failure;
1398 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1399 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1400 if (nla_put_u8(skb, i, value))
1401 goto nla_put_failure;
1403 nla_nest_end(skb, pfc_nest);
1407 spin_lock_bh(&dcb_lock);
1408 app = nla_nest_start_noflag(skb, DCB_ATTR_CEE_APP_TABLE);
1412 list_for_each_entry(itr, &dcb_app_list, list) {
1413 if (itr->ifindex == netdev->ifindex) {
1414 struct nlattr *app_nest = nla_nest_start_noflag(skb,
1419 err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1424 err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1429 err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1434 nla_nest_end(skb, app_nest);
1437 nla_nest_end(skb, app);
1439 if (netdev->dcbnl_ops->getdcbx)
1440 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1444 spin_unlock_bh(&dcb_lock);
1446 /* features flags */
1447 if (ops->getfeatcfg) {
1448 struct nlattr *feat = nla_nest_start_noflag(skb,
1451 goto nla_put_failure;
1453 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1455 if (!ops->getfeatcfg(netdev, i, &value) &&
1456 nla_put_u8(skb, i, value))
1457 goto nla_put_failure;
1459 nla_nest_end(skb, feat);
1462 /* peer info if available */
1463 if (ops->cee_peer_getpg) {
1465 memset(&pg, 0, sizeof(pg));
1466 err = ops->cee_peer_getpg(netdev, &pg);
1468 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
1469 goto nla_put_failure;
1472 if (ops->cee_peer_getpfc) {
1474 memset(&pfc, 0, sizeof(pfc));
1475 err = ops->cee_peer_getpfc(netdev, &pfc);
1477 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
1478 goto nla_put_failure;
1481 if (ops->peer_getappinfo && ops->peer_getapptable) {
1482 err = dcbnl_build_peer_app(netdev, skb,
1483 DCB_ATTR_CEE_PEER_APP_TABLE,
1484 DCB_ATTR_CEE_PEER_APP_INFO,
1485 DCB_ATTR_CEE_PEER_APP);
1487 goto nla_put_failure;
1489 nla_nest_end(skb, cee);
1493 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1495 goto nla_put_failure;
1500 spin_unlock_bh(&dcb_lock);
1506 static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1507 u32 seq, u32 portid, int dcbx_ver)
1509 struct net *net = dev_net(dev);
1510 struct sk_buff *skb;
1511 struct nlmsghdr *nlh;
1512 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1518 skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
1522 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1523 err = dcbnl_ieee_fill(skb, dev);
1525 err = dcbnl_cee_fill(skb, dev);
1528 /* Report error to broadcast listeners */
1530 rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1532 /* End nlmsg and notify broadcast listeners */
1533 nlmsg_end(skb, nlh);
1534 rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1540 int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1541 u32 seq, u32 portid)
1543 return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
1545 EXPORT_SYMBOL(dcbnl_ieee_notify);
1547 int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1548 u32 seq, u32 portid)
1550 return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
1552 EXPORT_SYMBOL(dcbnl_cee_notify);
1554 /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
1555 * If any requested operation can not be completed
1556 * the entire msg is aborted and error value is returned.
1557 * No attempt is made to reconcile the case where only part of the
1558 * cmd can be completed.
1560 static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
1561 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1563 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1564 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1571 if (!tb[DCB_ATTR_IEEE])
1574 err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
1576 dcbnl_ieee_policy, NULL);
1580 if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1581 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1582 err = ops->ieee_setets(netdev, ets);
1587 if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
1588 struct ieee_maxrate *maxrate =
1589 nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
1590 err = ops->ieee_setmaxrate(netdev, maxrate);
1595 if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
1596 struct ieee_qcn *qcn =
1597 nla_data(ieee[DCB_ATTR_IEEE_QCN]);
1599 err = ops->ieee_setqcn(netdev, qcn);
1604 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1605 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1606 err = ops->ieee_setpfc(netdev, pfc);
1611 if (ieee[DCB_ATTR_DCB_BUFFER] && ops->dcbnl_setbuffer) {
1612 struct dcbnl_buffer *buffer =
1613 nla_data(ieee[DCB_ATTR_DCB_BUFFER]);
1615 for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) {
1616 if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) {
1622 err = ops->dcbnl_setbuffer(netdev, buffer);
1627 if (ieee[DCB_ATTR_DCB_REWR_TABLE]) {
1628 err = dcbnl_app_table_setdel(ieee[DCB_ATTR_DCB_REWR_TABLE],
1630 ops->dcbnl_setrewr ?: dcb_setrewr);
1635 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1636 err = dcbnl_app_table_setdel(ieee[DCB_ATTR_IEEE_APP_TABLE],
1637 netdev, ops->ieee_setapp ?:
1643 if (ieee[DCB_ATTR_DCB_APP_TRUST_TABLE]) {
1644 u8 selectors[IEEE_8021QAZ_APP_SEL_MAX + 1] = {0};
1645 struct nlattr *attr;
1649 if (!ops->dcbnl_setapptrust) {
1654 nla_for_each_nested(attr, ieee[DCB_ATTR_DCB_APP_TRUST_TABLE],
1656 enum ieee_attrs_app type = nla_type(attr);
1660 if (!dcbnl_app_attr_type_validate(type) ||
1661 nla_len(attr) != 1 ||
1662 nselectors >= sizeof(selectors)) {
1667 selector = nla_get_u8(attr);
1669 if (!dcbnl_app_selector_validate(type, selector)) {
1674 /* Duplicate selector ? */
1675 for (i = 0; i < nselectors; i++) {
1676 if (selectors[i] == selector) {
1682 selectors[nselectors++] = selector;
1685 err = ops->dcbnl_setapptrust(netdev, selectors, nselectors);
1691 err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1692 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1696 static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1697 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1699 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1704 return dcbnl_ieee_fill(skb, netdev);
1707 static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
1708 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1710 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1711 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1717 if (!tb[DCB_ATTR_IEEE])
1720 err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
1722 dcbnl_ieee_policy, NULL);
1726 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1727 err = dcbnl_app_table_setdel(ieee[DCB_ATTR_IEEE_APP_TABLE],
1728 netdev, ops->ieee_delapp ?:
1734 if (ieee[DCB_ATTR_DCB_REWR_TABLE]) {
1735 err = dcbnl_app_table_setdel(ieee[DCB_ATTR_DCB_REWR_TABLE],
1737 ops->dcbnl_delrewr ?: dcb_delrewr);
1743 err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1744 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1749 /* DCBX configuration */
1750 static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1751 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1753 if (!netdev->dcbnl_ops->getdcbx)
1756 return nla_put_u8(skb, DCB_ATTR_DCBX,
1757 netdev->dcbnl_ops->getdcbx(netdev));
1760 static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1761 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1765 if (!netdev->dcbnl_ops->setdcbx)
1768 if (!tb[DCB_ATTR_DCBX])
1771 value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1773 return nla_put_u8(skb, DCB_ATTR_DCBX,
1774 netdev->dcbnl_ops->setdcbx(netdev, value));
1777 static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1778 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1780 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1785 if (!netdev->dcbnl_ops->getfeatcfg)
1788 if (!tb[DCB_ATTR_FEATCFG])
1791 ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
1792 tb[DCB_ATTR_FEATCFG],
1793 dcbnl_featcfg_nest, NULL);
1797 nest = nla_nest_start_noflag(skb, DCB_ATTR_FEATCFG);
1801 if (data[DCB_FEATCFG_ATTR_ALL])
1804 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1805 if (!getall && !data[i])
1808 ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1810 ret = nla_put_u8(skb, i, value);
1813 nla_nest_cancel(skb, nest);
1814 goto nla_put_failure;
1817 nla_nest_end(skb, nest);
1823 static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1824 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1826 struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1830 if (!netdev->dcbnl_ops->setfeatcfg)
1833 if (!tb[DCB_ATTR_FEATCFG])
1836 ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
1837 tb[DCB_ATTR_FEATCFG],
1838 dcbnl_featcfg_nest, NULL);
1843 for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1844 if (data[i] == NULL)
1847 value = nla_get_u8(data[i]);
1849 ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1855 ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
1860 /* Handle CEE DCBX GET commands. */
1861 static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1862 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1864 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1869 return dcbnl_cee_fill(skb, netdev);
1873 /* reply netlink message type */
1876 /* function to fill message contents */
1877 int (*cb)(struct net_device *, struct nlmsghdr *, u32,
1878 struct nlattr **, struct sk_buff *);
1881 static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
1882 [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
1883 [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
1884 [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
1885 [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
1886 [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
1887 [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
1888 [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
1889 [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
1890 [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
1891 [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
1892 [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
1893 [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
1894 [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
1895 [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
1896 [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
1897 [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
1898 [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
1899 [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
1900 [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
1901 [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
1902 [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
1903 [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
1904 [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
1905 [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
1906 [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
1907 [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
1908 [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
1911 static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1912 struct netlink_ext_ack *extack)
1914 struct net *net = sock_net(skb->sk);
1915 struct net_device *netdev;
1916 struct dcbmsg *dcb = nlmsg_data(nlh);
1917 struct nlattr *tb[DCB_ATTR_MAX + 1];
1918 u32 portid = NETLINK_CB(skb).portid;
1920 struct sk_buff *reply_skb;
1921 struct nlmsghdr *reply_nlh = NULL;
1922 const struct reply_func *fn;
1924 if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
1927 ret = nlmsg_parse_deprecated(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1928 dcbnl_rtnl_policy, extack);
1932 if (dcb->cmd > DCB_CMD_MAX)
1935 /* check if a reply function has been defined for the command */
1936 fn = &reply_funcs[dcb->cmd];
1939 if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN))
1942 if (!tb[DCB_ATTR_IFNAME])
1945 netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
1949 if (!netdev->dcbnl_ops)
1952 reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
1953 nlh->nlmsg_flags, &reply_nlh);
1957 ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
1959 nlmsg_free(reply_skb);
1963 nlmsg_end(reply_skb, reply_nlh);
1965 ret = rtnl_unicast(reply_skb, net, portid);
1970 static struct dcb_app_type *dcb_rewr_lookup(const struct dcb_app *app,
1971 int ifindex, int proto)
1973 struct dcb_app_type *itr;
1975 list_for_each_entry(itr, &dcb_rewr_list, list) {
1976 if (itr->app.selector == app->selector &&
1977 itr->app.priority == app->priority &&
1978 itr->ifindex == ifindex &&
1979 ((proto == -1) || itr->app.protocol == proto))
1986 static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
1987 int ifindex, int prio)
1989 struct dcb_app_type *itr;
1991 list_for_each_entry(itr, &dcb_app_list, list) {
1992 if (itr->app.selector == app->selector &&
1993 itr->app.protocol == app->protocol &&
1994 itr->ifindex == ifindex &&
1995 ((prio == -1) || itr->app.priority == prio))
2002 static int dcb_app_add(struct list_head *list, const struct dcb_app *app,
2005 struct dcb_app_type *entry;
2007 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2011 memcpy(&entry->app, app, sizeof(*app));
2012 entry->ifindex = ifindex;
2013 list_add(&entry->list, list);
2019 * dcb_getapp - retrieve the DCBX application user priority
2020 * @dev: network interface
2021 * @app: application to get user priority of
2023 * On success returns a non-zero 802.1p user priority bitmap
2024 * otherwise returns 0 as the invalid user priority bitmap to
2025 * indicate an error.
2027 u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
2029 struct dcb_app_type *itr;
2032 spin_lock_bh(&dcb_lock);
2033 itr = dcb_app_lookup(app, dev->ifindex, -1);
2035 prio = itr->app.priority;
2036 spin_unlock_bh(&dcb_lock);
2040 EXPORT_SYMBOL(dcb_getapp);
2043 * dcb_setapp - add CEE dcb application data to app list
2044 * @dev: network interface
2045 * @new: application data to add
2047 * Priority 0 is an invalid priority in CEE spec. This routine
2048 * removes applications from the app list if the priority is
2049 * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap
2051 int dcb_setapp(struct net_device *dev, struct dcb_app *new)
2053 struct dcb_app_type *itr;
2054 struct dcb_app_type event;
2057 event.ifindex = dev->ifindex;
2058 memcpy(&event.app, new, sizeof(event.app));
2059 if (dev->dcbnl_ops->getdcbx)
2060 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2062 spin_lock_bh(&dcb_lock);
2063 /* Search for existing match and replace */
2064 itr = dcb_app_lookup(new, dev->ifindex, -1);
2067 itr->app.priority = new->priority;
2069 list_del(&itr->list);
2074 /* App type does not exist add new application type */
2076 err = dcb_app_add(&dcb_app_list, new, dev->ifindex);
2078 spin_unlock_bh(&dcb_lock);
2080 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2083 EXPORT_SYMBOL(dcb_setapp);
2086 * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
2087 * @dev: network interface
2088 * @app: where to store the retrieve application data
2090 * Helper routine which on success returns a non-zero 802.1Qaz user
2091 * priority bitmap otherwise returns 0 to indicate the dcb_app was
2092 * not found in APP list.
2094 u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
2096 struct dcb_app_type *itr;
2099 spin_lock_bh(&dcb_lock);
2100 itr = dcb_app_lookup(app, dev->ifindex, -1);
2102 prio |= 1 << itr->app.priority;
2103 spin_unlock_bh(&dcb_lock);
2107 EXPORT_SYMBOL(dcb_ieee_getapp_mask);
2109 /* Get protocol value from rewrite entry. */
2110 u16 dcb_getrewr(struct net_device *dev, struct dcb_app *app)
2112 struct dcb_app_type *itr;
2115 spin_lock_bh(&dcb_lock);
2116 itr = dcb_rewr_lookup(app, dev->ifindex, -1);
2118 proto = itr->app.protocol;
2119 spin_unlock_bh(&dcb_lock);
2123 EXPORT_SYMBOL(dcb_getrewr);
2125 /* Add rewrite entry to the rewrite list. */
2126 int dcb_setrewr(struct net_device *dev, struct dcb_app *new)
2130 spin_lock_bh(&dcb_lock);
2131 /* Search for existing match and abort if found. */
2132 if (dcb_rewr_lookup(new, dev->ifindex, new->protocol)) {
2137 err = dcb_app_add(&dcb_rewr_list, new, dev->ifindex);
2139 spin_unlock_bh(&dcb_lock);
2143 EXPORT_SYMBOL(dcb_setrewr);
2145 /* Delete rewrite entry from the rewrite list. */
2146 int dcb_delrewr(struct net_device *dev, struct dcb_app *del)
2148 struct dcb_app_type *itr;
2151 spin_lock_bh(&dcb_lock);
2152 /* Search for existing match and remove it. */
2153 itr = dcb_rewr_lookup(del, dev->ifindex, del->protocol);
2155 list_del(&itr->list);
2160 spin_unlock_bh(&dcb_lock);
2164 EXPORT_SYMBOL(dcb_delrewr);
2167 * dcb_ieee_setapp - add IEEE dcb application data to app list
2168 * @dev: network interface
2169 * @new: application data to add
2171 * This adds Application data to the list. Multiple application
2172 * entries may exists for the same selector and protocol as long
2173 * as the priorities are different. Priority is expected to be a
2174 * 3-bit unsigned integer
2176 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2178 struct dcb_app_type event;
2181 event.ifindex = dev->ifindex;
2182 memcpy(&event.app, new, sizeof(event.app));
2183 if (dev->dcbnl_ops->getdcbx)
2184 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2186 spin_lock_bh(&dcb_lock);
2187 /* Search for existing match and abort if found */
2188 if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
2193 err = dcb_app_add(&dcb_app_list, new, dev->ifindex);
2195 spin_unlock_bh(&dcb_lock);
2197 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2200 EXPORT_SYMBOL(dcb_ieee_setapp);
2203 * dcb_ieee_delapp - delete IEEE dcb application data from list
2204 * @dev: network interface
2205 * @del: application data to delete
2207 * This removes a matching APP data from the APP list
2209 int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
2211 struct dcb_app_type *itr;
2212 struct dcb_app_type event;
2215 event.ifindex = dev->ifindex;
2216 memcpy(&event.app, del, sizeof(event.app));
2217 if (dev->dcbnl_ops->getdcbx)
2218 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
2220 spin_lock_bh(&dcb_lock);
2221 /* Search for existing match and remove it. */
2222 if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
2223 list_del(&itr->list);
2228 spin_unlock_bh(&dcb_lock);
2230 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2233 EXPORT_SYMBOL(dcb_ieee_delapp);
2235 /* dcb_getrewr_prio_pcp_mask_map - For a given device, find mapping from
2236 * priorities to the PCP and DEI values assigned to that priority.
2238 void dcb_getrewr_prio_pcp_mask_map(const struct net_device *dev,
2239 struct dcb_rewr_prio_pcp_map *p_map)
2241 int ifindex = dev->ifindex;
2242 struct dcb_app_type *itr;
2245 memset(p_map->map, 0, sizeof(p_map->map));
2247 spin_lock_bh(&dcb_lock);
2248 list_for_each_entry(itr, &dcb_rewr_list, list) {
2249 if (itr->ifindex == ifindex &&
2250 itr->app.selector == DCB_APP_SEL_PCP &&
2251 itr->app.protocol < 16 &&
2252 itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
2253 prio = itr->app.priority;
2254 p_map->map[prio] |= 1 << itr->app.protocol;
2257 spin_unlock_bh(&dcb_lock);
2259 EXPORT_SYMBOL(dcb_getrewr_prio_pcp_mask_map);
2261 /* dcb_getrewr_prio_dscp_mask_map - For a given device, find mapping from
2262 * priorities to the DSCP values assigned to that priority.
2264 void dcb_getrewr_prio_dscp_mask_map(const struct net_device *dev,
2265 struct dcb_ieee_app_prio_map *p_map)
2267 int ifindex = dev->ifindex;
2268 struct dcb_app_type *itr;
2271 memset(p_map->map, 0, sizeof(p_map->map));
2273 spin_lock_bh(&dcb_lock);
2274 list_for_each_entry(itr, &dcb_rewr_list, list) {
2275 if (itr->ifindex == ifindex &&
2276 itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
2277 itr->app.protocol < 64 &&
2278 itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
2279 prio = itr->app.priority;
2280 p_map->map[prio] |= 1ULL << itr->app.protocol;
2283 spin_unlock_bh(&dcb_lock);
2285 EXPORT_SYMBOL(dcb_getrewr_prio_dscp_mask_map);
2288 * dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from
2289 * priorities to the DSCP values assigned to that priority. Initialize p_map
2290 * such that each map element holds a bit mask of DSCP values configured for
2291 * that priority by APP entries.
2293 void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
2294 struct dcb_ieee_app_prio_map *p_map)
2296 int ifindex = dev->ifindex;
2297 struct dcb_app_type *itr;
2300 memset(p_map->map, 0, sizeof(p_map->map));
2302 spin_lock_bh(&dcb_lock);
2303 list_for_each_entry(itr, &dcb_app_list, list) {
2304 if (itr->ifindex == ifindex &&
2305 itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
2306 itr->app.protocol < 64 &&
2307 itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
2308 prio = itr->app.priority;
2309 p_map->map[prio] |= 1ULL << itr->app.protocol;
2312 spin_unlock_bh(&dcb_lock);
2314 EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map);
2317 * dcb_ieee_getapp_dscp_prio_mask_map - For a given device, find mapping from
2318 * DSCP values to the priorities assigned to that DSCP value. Initialize p_map
2319 * such that each map element holds a bit mask of priorities configured for a
2320 * given DSCP value by APP entries.
2323 dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
2324 struct dcb_ieee_app_dscp_map *p_map)
2326 int ifindex = dev->ifindex;
2327 struct dcb_app_type *itr;
2329 memset(p_map->map, 0, sizeof(p_map->map));
2331 spin_lock_bh(&dcb_lock);
2332 list_for_each_entry(itr, &dcb_app_list, list) {
2333 if (itr->ifindex == ifindex &&
2334 itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
2335 itr->app.protocol < 64 &&
2336 itr->app.priority < IEEE_8021QAZ_MAX_TCS)
2337 p_map->map[itr->app.protocol] |= 1 << itr->app.priority;
2339 spin_unlock_bh(&dcb_lock);
2341 EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map);
2344 * Per 802.1Q-2014, the selector value of 1 is used for matching on Ethernet
2345 * type, with valid PID values >= 1536. A special meaning is then assigned to
2346 * protocol value of 0: "default priority. For use when priority is not
2347 * otherwise specified".
2349 * dcb_ieee_getapp_default_prio_mask - For a given device, find all APP entries
2350 * of the form {$PRIO, ETHERTYPE, 0} and construct a bit mask of all default
2351 * priorities set by these entries.
2353 u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev)
2355 int ifindex = dev->ifindex;
2356 struct dcb_app_type *itr;
2359 spin_lock_bh(&dcb_lock);
2360 list_for_each_entry(itr, &dcb_app_list, list) {
2361 if (itr->ifindex == ifindex &&
2362 itr->app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
2363 itr->app.protocol == 0 &&
2364 itr->app.priority < IEEE_8021QAZ_MAX_TCS)
2365 mask |= 1 << itr->app.priority;
2367 spin_unlock_bh(&dcb_lock);
2371 EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask);
2373 static void dcbnl_flush_dev(struct net_device *dev)
2375 struct dcb_app_type *itr, *tmp;
2377 spin_lock_bh(&dcb_lock);
2379 list_for_each_entry_safe(itr, tmp, &dcb_app_list, list) {
2380 if (itr->ifindex == dev->ifindex) {
2381 list_del(&itr->list);
2386 spin_unlock_bh(&dcb_lock);
2389 static int dcbnl_netdevice_event(struct notifier_block *nb,
2390 unsigned long event, void *ptr)
2392 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2395 case NETDEV_UNREGISTER:
2396 if (!dev->dcbnl_ops)
2399 dcbnl_flush_dev(dev);
2407 static struct notifier_block dcbnl_nb __read_mostly = {
2408 .notifier_call = dcbnl_netdevice_event,
2411 static int __init dcbnl_init(void)
2415 err = register_netdevice_notifier(&dcbnl_nb);
2419 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0);
2420 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0);
2424 device_initcall(dcbnl_init);