1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (c) 2020, Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
3 #include <linux/kernel.h>
4 #include <linux/netdevice.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/slab.h>
7 #include <net/ip_tunnels.h>
9 #include "br_private.h"
10 #include "br_private_tunnel.h"
12 static bool __vlan_tun_put(struct sk_buff *skb, const struct net_bridge_vlan *v)
14 __be32 tid = tunnel_id_to_key32(v->tinfo.tunnel_id);
17 if (!v->tinfo.tunnel_dst)
20 nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_TUNNEL_INFO);
23 if (nla_put_u32(skb, BRIDGE_VLANDB_TINFO_ID, be32_to_cpu(tid))) {
24 nla_nest_cancel(skb, nest);
27 nla_nest_end(skb, nest);
32 static bool __vlan_tun_can_enter_range(const struct net_bridge_vlan *v_curr,
33 const struct net_bridge_vlan *range_end)
35 return (!v_curr->tinfo.tunnel_dst && !range_end->tinfo.tunnel_dst) ||
36 vlan_tunid_inrange(v_curr, range_end);
39 /* check if the options' state of v_curr allow it to enter the range */
40 bool br_vlan_opts_eq_range(const struct net_bridge_vlan *v_curr,
41 const struct net_bridge_vlan *range_end)
43 u8 range_mc_rtr = br_vlan_multicast_router(range_end);
44 u8 curr_mc_rtr = br_vlan_multicast_router(v_curr);
46 return v_curr->state == range_end->state &&
47 __vlan_tun_can_enter_range(v_curr, range_end) &&
48 curr_mc_rtr == range_mc_rtr;
51 bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v)
53 if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE, br_vlan_get_state(v)) ||
54 !__vlan_tun_put(skb, v))
57 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
58 if (nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_MCAST_ROUTER,
59 br_vlan_multicast_router(v)))
66 size_t br_vlan_opts_nl_size(void)
68 return nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_STATE */
69 + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY_TUNNEL_INFO */
70 + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_TINFO_ID */
71 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
72 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_MCAST_ROUTER */
77 static int br_vlan_modify_state(struct net_bridge_vlan_group *vg,
78 struct net_bridge_vlan *v,
81 struct netlink_ext_ack *extack)
83 struct net_bridge *br;
87 if (state > BR_STATE_BLOCKING) {
88 NL_SET_ERR_MSG_MOD(extack, "Invalid vlan state");
92 if (br_vlan_is_brentry(v))
97 if (br->stp_enabled == BR_KERNEL_STP) {
98 NL_SET_ERR_MSG_MOD(extack, "Can't modify vlan state when using kernel STP");
102 if (v->state == state)
105 if (v->vid == br_get_pvid(vg))
106 br_vlan_set_pvid_state(vg, state);
108 br_vlan_set_state(v, state);
114 static const struct nla_policy br_vlandb_tinfo_pol[BRIDGE_VLANDB_TINFO_MAX + 1] = {
115 [BRIDGE_VLANDB_TINFO_ID] = { .type = NLA_U32 },
116 [BRIDGE_VLANDB_TINFO_CMD] = { .type = NLA_U32 },
119 static int br_vlan_modify_tunnel(const struct net_bridge_port *p,
120 struct net_bridge_vlan *v,
123 struct netlink_ext_ack *extack)
125 struct nlattr *tun_tb[BRIDGE_VLANDB_TINFO_MAX + 1], *attr;
126 struct bridge_vlan_info *vinfo;
131 NL_SET_ERR_MSG_MOD(extack, "Can't modify tunnel mapping of non-port vlans");
134 if (!(p->flags & BR_VLAN_TUNNEL)) {
135 NL_SET_ERR_MSG_MOD(extack, "Port doesn't have tunnel flag set");
139 attr = tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO];
140 err = nla_parse_nested(tun_tb, BRIDGE_VLANDB_TINFO_MAX, attr,
141 br_vlandb_tinfo_pol, extack);
145 if (!tun_tb[BRIDGE_VLANDB_TINFO_CMD]) {
146 NL_SET_ERR_MSG_MOD(extack, "Missing tunnel command attribute");
149 cmd = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_CMD]);
152 if (!tun_tb[BRIDGE_VLANDB_TINFO_ID]) {
153 NL_SET_ERR_MSG_MOD(extack, "Missing tunnel id attribute");
156 /* when working on vlan ranges this is the starting tunnel id */
157 tun_id = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_ID]);
158 /* vlan info attr is guaranteed by br_vlan_rtm_process_one */
159 vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
160 /* tunnel ids are mapped to each vlan in increasing order,
161 * the starting vlan is in BRIDGE_VLANDB_ENTRY_INFO and v is the
162 * current vlan, so we compute: tun_id + v - vinfo->vid
164 tun_id += v->vid - vinfo->vid;
169 NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel command");
173 return br_vlan_tunnel_info(p, cmd, v->vid, tun_id, changed);
176 static int br_vlan_process_one_opts(const struct net_bridge *br,
177 const struct net_bridge_port *p,
178 struct net_bridge_vlan_group *vg,
179 struct net_bridge_vlan *v,
182 struct netlink_ext_ack *extack)
187 if (tb[BRIDGE_VLANDB_ENTRY_STATE]) {
188 u8 state = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_STATE]);
190 err = br_vlan_modify_state(vg, v, state, changed, extack);
194 if (tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO]) {
195 err = br_vlan_modify_tunnel(p, v, tb, changed, extack);
200 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
201 if (tb[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]) {
204 val = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]);
205 err = br_multicast_set_vlan_router(v, val);
215 int br_vlan_process_options(const struct net_bridge *br,
216 const struct net_bridge_port *p,
217 struct net_bridge_vlan *range_start,
218 struct net_bridge_vlan *range_end,
220 struct netlink_ext_ack *extack)
222 struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL;
223 struct net_bridge_vlan_group *vg;
228 vg = nbp_vlan_group(p);
230 vg = br_vlan_group(br);
232 if (!range_start || !br_vlan_should_use(range_start)) {
233 NL_SET_ERR_MSG_MOD(extack, "Vlan range start doesn't exist, can't process options");
236 if (!range_end || !br_vlan_should_use(range_end)) {
237 NL_SET_ERR_MSG_MOD(extack, "Vlan range end doesn't exist, can't process options");
241 pvid = br_get_pvid(vg);
242 for (vid = range_start->vid; vid <= range_end->vid; vid++) {
243 bool changed = false;
245 v = br_vlan_find(vg, vid);
246 if (!v || !br_vlan_should_use(v)) {
247 NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process options");
252 err = br_vlan_process_one_opts(br, p, vg, v, tb, &changed,
258 /* vlan options changed, check for range */
265 if (v->vid == pvid ||
266 !br_vlan_can_enter_range(v, curr_end)) {
267 br_vlan_notify(br, p, curr_start->vid,
268 curr_end->vid, RTM_NEWVLAN);
273 /* nothing changed and nothing to notify yet */
277 br_vlan_notify(br, p, curr_start->vid, curr_end->vid,
284 br_vlan_notify(br, p, curr_start->vid, curr_end->vid,
290 bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr,
291 const struct net_bridge_vlan *r_end)
293 return v_curr->vid - r_end->vid == 1 &&
294 ((v_curr->priv_flags ^ r_end->priv_flags) &
295 BR_VLFLAG_GLOBAL_MCAST_ENABLED) == 0 &&
296 br_multicast_ctx_options_equal(&v_curr->br_mcast_ctx,
297 &r_end->br_mcast_ctx);
300 bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range,
301 const struct net_bridge_vlan *v_opts)
303 struct nlattr *nest2 __maybe_unused;
304 u64 clockval __maybe_unused;
307 nest = nla_nest_start(skb, BRIDGE_VLANDB_GLOBAL_OPTIONS);
311 if (nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_ID, vid))
314 if (vid_range && vid < vid_range &&
315 nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_RANGE, vid_range))
318 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
319 if (nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING,
320 !!(v_opts->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) ||
321 nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION,
322 v_opts->br_mcast_ctx.multicast_igmp_version) ||
323 nla_put_u32(skb, BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT,
324 v_opts->br_mcast_ctx.multicast_last_member_count) ||
325 nla_put_u32(skb, BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT,
326 v_opts->br_mcast_ctx.multicast_startup_query_count) ||
327 nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER,
328 v_opts->br_mcast_ctx.multicast_querier) ||
329 br_multicast_dump_querier_state(skb, &v_opts->br_mcast_ctx,
330 BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE))
333 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_last_member_interval);
334 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL,
335 clockval, BRIDGE_VLANDB_GOPTS_PAD))
337 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_membership_interval);
338 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL,
339 clockval, BRIDGE_VLANDB_GOPTS_PAD))
341 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_querier_interval);
342 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL,
343 clockval, BRIDGE_VLANDB_GOPTS_PAD))
345 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_interval);
346 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL,
347 clockval, BRIDGE_VLANDB_GOPTS_PAD))
349 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_response_interval);
350 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL,
351 clockval, BRIDGE_VLANDB_GOPTS_PAD))
353 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_startup_query_interval);
354 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL,
355 clockval, BRIDGE_VLANDB_GOPTS_PAD))
358 if (br_rports_have_mc_router(&v_opts->br_mcast_ctx)) {
359 nest2 = nla_nest_start(skb,
360 BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS);
365 if (br_rports_fill_info(skb, &v_opts->br_mcast_ctx)) {
367 nla_nest_cancel(skb, nest2);
372 nla_nest_end(skb, nest2);
375 #if IS_ENABLED(CONFIG_IPV6)
376 if (nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION,
377 v_opts->br_mcast_ctx.multicast_mld_version))
382 nla_nest_end(skb, nest);
387 nla_nest_cancel(skb, nest);
391 static size_t rtnl_vlan_global_opts_nlmsg_size(const struct net_bridge_vlan *v)
393 return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
394 + nla_total_size(0) /* BRIDGE_VLANDB_GLOBAL_OPTIONS */
395 + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_GOPTS_ID */
396 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
397 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING */
398 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION */
399 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION */
400 + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT */
401 + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT */
402 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL */
403 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL */
404 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL */
405 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL */
406 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL */
407 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL */
408 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER */
409 + br_multicast_querier_state_size() /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_STATE */
410 + nla_total_size(0) /* BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS */
411 + br_rports_size(&v->br_mcast_ctx) /* BRIDGE_VLANDB_GOPTS_MCAST_ROUTER_PORTS */
413 + nla_total_size(sizeof(u16)); /* BRIDGE_VLANDB_GOPTS_RANGE */
416 static void br_vlan_global_opts_notify(const struct net_bridge *br,
417 u16 vid, u16 vid_range)
419 struct net_bridge_vlan *v;
420 struct br_vlan_msg *bvm;
421 struct nlmsghdr *nlh;
425 /* right now notifications are done only with rtnl held */
428 /* need to find the vlan due to flags/options */
429 v = br_vlan_find(br_vlan_group(br), vid);
433 skb = nlmsg_new(rtnl_vlan_global_opts_nlmsg_size(v), GFP_KERNEL);
438 nlh = nlmsg_put(skb, 0, 0, RTM_NEWVLAN, sizeof(*bvm), 0);
441 bvm = nlmsg_data(nlh);
442 memset(bvm, 0, sizeof(*bvm));
443 bvm->family = AF_BRIDGE;
444 bvm->ifindex = br->dev->ifindex;
446 if (!br_vlan_global_opts_fill(skb, vid, vid_range, v))
450 rtnl_notify(skb, dev_net(br->dev), 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
454 rtnl_set_sk_err(dev_net(br->dev), RTNLGRP_BRVLAN, err);
458 static int br_vlan_process_global_one_opts(const struct net_bridge *br,
459 struct net_bridge_vlan_group *vg,
460 struct net_bridge_vlan *v,
463 struct netlink_ext_ack *extack)
465 int err __maybe_unused;
468 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
469 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING]) {
472 mc_snooping = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING]);
473 if (br_multicast_toggle_global_vlan(v, !!mc_snooping))
476 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION]) {
479 ver = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION]);
480 err = br_multicast_set_igmp_version(&v->br_mcast_ctx, ver);
485 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT]) {
488 cnt = nla_get_u32(tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT]);
489 v->br_mcast_ctx.multicast_last_member_count = cnt;
492 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT]) {
495 cnt = nla_get_u32(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT]);
496 v->br_mcast_ctx.multicast_startup_query_count = cnt;
499 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL]) {
502 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL]);
503 v->br_mcast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val);
506 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL]) {
509 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL]);
510 v->br_mcast_ctx.multicast_membership_interval = clock_t_to_jiffies(val);
513 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL]) {
516 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL]);
517 v->br_mcast_ctx.multicast_querier_interval = clock_t_to_jiffies(val);
520 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]) {
523 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]);
524 v->br_mcast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
527 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) {
530 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]);
531 v->br_mcast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val);
534 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]) {
537 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]);
538 v->br_mcast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
541 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]) {
544 val = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]);
545 err = br_multicast_set_querier(&v->br_mcast_ctx, val);
550 #if IS_ENABLED(CONFIG_IPV6)
551 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION]) {
554 ver = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION]);
555 err = br_multicast_set_mld_version(&v->br_mcast_ctx, ver);
566 static const struct nla_policy br_vlan_db_gpol[BRIDGE_VLANDB_GOPTS_MAX + 1] = {
567 [BRIDGE_VLANDB_GOPTS_ID] = { .type = NLA_U16 },
568 [BRIDGE_VLANDB_GOPTS_RANGE] = { .type = NLA_U16 },
569 [BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING] = { .type = NLA_U8 },
570 [BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION] = { .type = NLA_U8 },
571 [BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
572 [BRIDGE_VLANDB_GOPTS_MCAST_QUERIER] = { .type = NLA_U8 },
573 [BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
574 [BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
575 [BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
576 [BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
577 [BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
578 [BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
579 [BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
580 [BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
583 int br_vlan_rtm_process_global_options(struct net_device *dev,
584 const struct nlattr *attr,
586 struct netlink_ext_ack *extack)
588 struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL;
589 struct nlattr *tb[BRIDGE_VLANDB_GOPTS_MAX + 1];
590 struct net_bridge_vlan_group *vg;
591 u16 vid, vid_range = 0;
592 struct net_bridge *br;
595 if (cmd != RTM_NEWVLAN) {
596 NL_SET_ERR_MSG_MOD(extack, "Global vlan options support only set operation");
599 if (!netif_is_bridge_master(dev)) {
600 NL_SET_ERR_MSG_MOD(extack, "Global vlan options can only be set on bridge device");
603 br = netdev_priv(dev);
604 vg = br_vlan_group(br);
608 err = nla_parse_nested(tb, BRIDGE_VLANDB_GOPTS_MAX, attr,
609 br_vlan_db_gpol, extack);
613 if (!tb[BRIDGE_VLANDB_GOPTS_ID]) {
614 NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry id");
617 vid = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_ID]);
618 if (!br_vlan_valid_id(vid, extack))
621 if (tb[BRIDGE_VLANDB_GOPTS_RANGE]) {
622 vid_range = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_RANGE]);
623 if (!br_vlan_valid_id(vid_range, extack))
625 if (vid >= vid_range) {
626 NL_SET_ERR_MSG_MOD(extack, "End vlan id is less than or equal to start vlan id");
633 for (; vid <= vid_range; vid++) {
634 bool changed = false;
636 v = br_vlan_find(vg, vid);
638 NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process global options");
643 err = br_vlan_process_global_one_opts(br, vg, v, tb, &changed,
649 /* vlan options changed, check for range */
656 if (!br_vlan_global_opts_can_enter_range(v, curr_end)) {
657 br_vlan_global_opts_notify(br, curr_start->vid,
663 /* nothing changed and nothing to notify yet */
667 br_vlan_global_opts_notify(br, curr_start->vid,
674 br_vlan_global_opts_notify(br, curr_start->vid, curr_end->vid);