1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/ethtool_netlink.h>
4 #include <net/udp_tunnel.h>
11 static const struct nla_policy
12 ethtool_tunnel_info_policy[ETHTOOL_A_TUNNEL_INFO_MAX + 1] = {
13 [ETHTOOL_A_TUNNEL_INFO_UNSPEC] = { .type = NLA_REJECT },
14 [ETHTOOL_A_TUNNEL_INFO_HEADER] = { .type = NLA_NESTED },
17 static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN == ilog2(UDP_TUNNEL_TYPE_VXLAN));
18 static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE));
19 static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE ==
20 ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE));
22 static ssize_t ethnl_udp_table_reply_size(unsigned int types, bool compact)
26 size = ethnl_bitset32_size(&types, NULL, __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
27 udp_tunnel_type_names, compact);
32 nla_total_size(0) + /* _UDP_TABLE */
33 nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */
37 ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base,
38 struct netlink_ext_ack *extack)
40 bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
41 const struct udp_tunnel_nic_info *info;
46 info = req_base->dev->udp_tunnel_nic_info;
48 NL_SET_ERR_MSG(extack,
49 "device does not report tunnel offload info");
53 size = nla_total_size(0); /* _INFO_UDP_PORTS */
55 for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
56 if (!info->tables[i].n_entries)
59 ret = ethnl_udp_table_reply_size(info->tables[i].tunnel_types,
65 size += udp_tunnel_nic_dump_size(req_base->dev, i);
68 if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
69 ret = ethnl_udp_table_reply_size(0, compact);
74 size += nla_total_size(0) + /* _TABLE_ENTRY */
75 nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */
76 nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */
83 ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base,
86 bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
87 const struct udp_tunnel_nic_info *info;
88 struct nlattr *ports, *table, *entry;
91 info = req_base->dev->udp_tunnel_nic_info;
95 ports = nla_nest_start(skb, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS);
99 for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
100 if (!info->tables[i].n_entries)
103 table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
105 goto err_cancel_ports;
107 if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE,
108 info->tables[i].n_entries))
109 goto err_cancel_table;
111 if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
112 &info->tables[i].tunnel_types, NULL,
113 __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
114 udp_tunnel_type_names, compact))
115 goto err_cancel_table;
117 if (udp_tunnel_nic_dump_write(req_base->dev, i, skb))
118 goto err_cancel_table;
120 nla_nest_end(skb, table);
123 if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
126 table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
128 goto err_cancel_ports;
130 if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, 1))
131 goto err_cancel_table;
133 if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
135 __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
136 udp_tunnel_type_names, compact))
137 goto err_cancel_table;
139 entry = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY);
141 if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,
142 htons(IANA_VXLAN_UDP_PORT)) ||
143 nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,
144 ilog2(UDP_TUNNEL_TYPE_VXLAN)))
145 goto err_cancel_entry;
147 nla_nest_end(skb, entry);
148 nla_nest_end(skb, table);
151 nla_nest_end(skb, ports);
156 nla_nest_cancel(skb, entry);
158 nla_nest_cancel(skb, table);
160 nla_nest_cancel(skb, ports);
165 ethnl_tunnel_info_req_parse(struct ethnl_req_info *req_info,
166 const struct nlmsghdr *nlhdr, struct net *net,
167 struct netlink_ext_ack *extack, bool require_dev)
169 struct nlattr *tb[ETHTOOL_A_TUNNEL_INFO_MAX + 1];
172 ret = nlmsg_parse(nlhdr, GENL_HDRLEN, tb, ETHTOOL_A_TUNNEL_INFO_MAX,
173 ethtool_tunnel_info_policy, extack);
177 return ethnl_parse_header_dev_get(req_info,
178 tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
179 net, extack, require_dev);
182 int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info)
184 struct ethnl_req_info req_info = {};
185 struct sk_buff *rskb;
190 ret = ethnl_tunnel_info_req_parse(&req_info, info->nlhdr,
191 genl_info_net(info), info->extack,
197 ret = ethnl_tunnel_info_reply_size(&req_info, info->extack);
199 goto err_unlock_rtnl;
200 reply_len = ret + ethnl_reply_header_size();
202 rskb = ethnl_reply_init(reply_len, req_info.dev,
203 ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY,
204 ETHTOOL_A_TUNNEL_INFO_HEADER,
205 info, &reply_payload);
208 goto err_unlock_rtnl;
211 ret = ethnl_tunnel_info_fill_reply(&req_info, rskb);
215 dev_put(req_info.dev);
216 genlmsg_end(rskb, reply_payload);
218 return genlmsg_reply(rskb, info);
224 dev_put(req_info.dev);
228 struct ethnl_tunnel_info_dump_ctx {
229 struct ethnl_req_info req_info;
234 int ethnl_tunnel_info_start(struct netlink_callback *cb)
236 struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
239 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
241 memset(ctx, 0, sizeof(*ctx));
243 ret = ethnl_tunnel_info_req_parse(&ctx->req_info, cb->nlh,
244 sock_net(cb->skb->sk), cb->extack,
246 if (ctx->req_info.dev) {
247 dev_put(ctx->req_info.dev);
248 ctx->req_info.dev = NULL;
254 int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
256 struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
257 struct net *net = sock_net(skb->sk);
258 int s_idx = ctx->pos_idx;
264 cb->seq = net->dev_base_seq;
265 for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
266 struct hlist_head *head;
267 struct net_device *dev;
269 head = &net->dev_index_head[h];
271 hlist_for_each_entry(dev, head, index_hlist) {
275 ehdr = ethnl_dump_put(skb, cb,
276 ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY);
282 ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_TUNNEL_INFO_HEADER);
284 genlmsg_cancel(skb, ehdr);
288 ctx->req_info.dev = dev;
289 ret = ethnl_tunnel_info_fill_reply(&ctx->req_info, skb);
290 ctx->req_info.dev = NULL;
292 genlmsg_cancel(skb, ehdr);
293 if (ret == -EOPNOTSUPP)
297 genlmsg_end(skb, ehdr);
307 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
309 if (ret == -EMSGSIZE && skb->len)