1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/ethtool_netlink.h>
4 #include <net/udp_tunnel.h>
11 const struct nla_policy ethnl_tunnel_info_get_policy[] = {
12 [ETHTOOL_A_TUNNEL_INFO_HEADER] =
13 NLA_POLICY_NESTED(ethnl_header_policy),
16 static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN == ilog2(UDP_TUNNEL_TYPE_VXLAN));
17 static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE));
18 static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE ==
19 ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE));
21 static ssize_t ethnl_udp_table_reply_size(unsigned int types, bool compact)
25 size = ethnl_bitset32_size(&types, NULL, __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
26 udp_tunnel_type_names, compact);
31 nla_total_size(0) + /* _UDP_TABLE */
32 nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */
36 ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base,
37 struct netlink_ext_ack *extack)
39 bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
40 const struct udp_tunnel_nic_info *info;
45 info = req_base->dev->udp_tunnel_nic_info;
47 NL_SET_ERR_MSG(extack,
48 "device does not report tunnel offload info");
52 size = nla_total_size(0); /* _INFO_UDP_PORTS */
54 for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
55 if (!info->tables[i].n_entries)
58 ret = ethnl_udp_table_reply_size(info->tables[i].tunnel_types,
64 size += udp_tunnel_nic_dump_size(req_base->dev, i);
67 if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
68 ret = ethnl_udp_table_reply_size(0, compact);
73 size += nla_total_size(0) + /* _TABLE_ENTRY */
74 nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */
75 nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */
82 ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base,
85 bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
86 const struct udp_tunnel_nic_info *info;
87 struct nlattr *ports, *table, *entry;
90 info = req_base->dev->udp_tunnel_nic_info;
94 ports = nla_nest_start(skb, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS);
98 for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
99 if (!info->tables[i].n_entries)
102 table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
104 goto err_cancel_ports;
106 if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE,
107 info->tables[i].n_entries))
108 goto err_cancel_table;
110 if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
111 &info->tables[i].tunnel_types, NULL,
112 __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
113 udp_tunnel_type_names, compact))
114 goto err_cancel_table;
116 if (udp_tunnel_nic_dump_write(req_base->dev, i, skb))
117 goto err_cancel_table;
119 nla_nest_end(skb, table);
122 if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
125 table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
127 goto err_cancel_ports;
129 if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, 1))
130 goto err_cancel_table;
132 if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
134 __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
135 udp_tunnel_type_names, compact))
136 goto err_cancel_table;
138 entry = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY);
140 goto err_cancel_entry;
142 if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,
143 htons(IANA_VXLAN_UDP_PORT)) ||
144 nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,
145 ilog2(UDP_TUNNEL_TYPE_VXLAN)))
146 goto err_cancel_entry;
148 nla_nest_end(skb, entry);
149 nla_nest_end(skb, table);
152 nla_nest_end(skb, ports);
157 nla_nest_cancel(skb, entry);
159 nla_nest_cancel(skb, table);
161 nla_nest_cancel(skb, ports);
165 int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info)
167 struct ethnl_req_info req_info = {};
168 struct nlattr **tb = info->attrs;
169 struct sk_buff *rskb;
174 ret = ethnl_parse_header_dev_get(&req_info,
175 tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
176 genl_info_net(info), info->extack,
182 ret = ethnl_tunnel_info_reply_size(&req_info, info->extack);
184 goto err_unlock_rtnl;
185 reply_len = ret + ethnl_reply_header_size();
187 rskb = ethnl_reply_init(reply_len, req_info.dev,
188 ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY,
189 ETHTOOL_A_TUNNEL_INFO_HEADER,
190 info, &reply_payload);
193 goto err_unlock_rtnl;
196 ret = ethnl_tunnel_info_fill_reply(&req_info, rskb);
200 ethnl_parse_header_dev_put(&req_info);
201 genlmsg_end(rskb, reply_payload);
203 return genlmsg_reply(rskb, info);
209 ethnl_parse_header_dev_put(&req_info);
213 struct ethnl_tunnel_info_dump_ctx {
214 struct ethnl_req_info req_info;
219 int ethnl_tunnel_info_start(struct netlink_callback *cb)
221 const struct genl_dumpit_info *info = genl_dumpit_info(cb);
222 struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
223 struct nlattr **tb = info->attrs;
226 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
228 memset(ctx, 0, sizeof(*ctx));
230 ret = ethnl_parse_header_dev_get(&ctx->req_info,
231 tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
232 sock_net(cb->skb->sk), cb->extack,
234 if (ctx->req_info.dev) {
235 ethnl_parse_header_dev_put(&ctx->req_info);
236 ctx->req_info.dev = NULL;
242 int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
244 struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
245 struct net *net = sock_net(skb->sk);
246 int s_idx = ctx->pos_idx;
252 cb->seq = net->dev_base_seq;
253 for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
254 struct hlist_head *head;
255 struct net_device *dev;
257 head = &net->dev_index_head[h];
259 hlist_for_each_entry(dev, head, index_hlist) {
263 ehdr = ethnl_dump_put(skb, cb,
264 ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY);
270 ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_TUNNEL_INFO_HEADER);
272 genlmsg_cancel(skb, ehdr);
276 ctx->req_info.dev = dev;
277 ret = ethnl_tunnel_info_fill_reply(&ctx->req_info, skb);
278 ctx->req_info.dev = NULL;
280 genlmsg_cancel(skb, ehdr);
281 if (ret == -EOPNOTSUPP)
285 genlmsg_end(skb, ehdr);
295 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
297 if (ret == -EMSGSIZE && skb->len)