1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2014-2019 aQuantia Corporation. */
4 /* File aq_filters.c: RX filters related functions. */
6 #include "aq_filters.h"
8 static bool __must_check
9 aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp)
11 if (fsp->flow_type & FLOW_MAC_EXT)
14 switch (fsp->flow_type & ~FLOW_EXT) {
26 switch (fsp->h_u.usr_ip4_spec.proto) {
36 switch (fsp->h_u.usr_ip6_spec.l4_proto) {
52 static bool __must_check
53 aq_match_filter(struct ethtool_rx_flow_spec *fsp1,
54 struct ethtool_rx_flow_spec *fsp2)
56 if (fsp1->flow_type != fsp2->flow_type ||
57 memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) ||
58 memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) ||
59 memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) ||
60 memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext)))
66 static bool __must_check
67 aq_rule_already_exists(struct aq_nic_s *aq_nic,
68 struct ethtool_rx_flow_spec *fsp)
70 struct aq_rx_filter *rule;
71 struct hlist_node *aq_node2;
72 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
74 hlist_for_each_entry_safe(rule, aq_node2,
75 &rx_fltrs->filter_list, aq_node) {
76 if (rule->aq_fsp.location == fsp->location)
78 if (aq_match_filter(&rule->aq_fsp, fsp)) {
79 netdev_err(aq_nic->ndev,
80 "ethtool: This filter is already set\n");
88 static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
89 struct aq_hw_rx_fltrs_s *rx_fltrs,
90 struct ethtool_rx_flow_spec *fsp)
92 u32 last_location = AQ_RX_LAST_LOC_FL3L4 -
93 aq_nic->aq_hw_rx_fltrs.fl3l4.reserved_count;
95 if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
96 fsp->location > last_location) {
97 netdev_err(aq_nic->ndev,
98 "ethtool: location must be in range [%d, %d]",
99 AQ_RX_FIRST_LOC_FL3L4, last_location);
102 if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
103 rx_fltrs->fl3l4.is_ipv6 = false;
104 netdev_err(aq_nic->ndev,
105 "ethtool: mixing ipv4 and ipv6 is not allowed");
107 } else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) {
108 rx_fltrs->fl3l4.is_ipv6 = true;
109 netdev_err(aq_nic->ndev,
110 "ethtool: mixing ipv4 and ipv6 is not allowed");
112 } else if (rx_fltrs->fl3l4.is_ipv6 &&
113 fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 &&
114 fsp->location != AQ_RX_FIRST_LOC_FL3L4) {
115 netdev_err(aq_nic->ndev,
116 "ethtool: The specified location for ipv6 must be %d or %d",
117 AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4);
124 static int __must_check
125 aq_check_approve_fl2(struct aq_nic_s *aq_nic,
126 struct aq_hw_rx_fltrs_s *rx_fltrs,
127 struct ethtool_rx_flow_spec *fsp)
129 u32 last_location = AQ_RX_LAST_LOC_FETHERT -
130 aq_nic->aq_hw_rx_fltrs.fet_reserved_count;
132 if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
133 fsp->location > last_location) {
134 netdev_err(aq_nic->ndev,
135 "ethtool: location must be in range [%d, %d]",
136 AQ_RX_FIRST_LOC_FETHERT,
141 if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK &&
142 fsp->m_u.ether_spec.h_proto == 0U) {
143 netdev_err(aq_nic->ndev,
144 "ethtool: proto (ether_type) parameter must be specified");
151 static int __must_check
152 aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
153 struct aq_hw_rx_fltrs_s *rx_fltrs,
154 struct ethtool_rx_flow_spec *fsp)
156 struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg;
158 if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
159 fsp->location > AQ_RX_LAST_LOC_FVLANID) {
160 netdev_err(aq_nic->ndev,
161 "ethtool: location must be in range [%d, %d]",
162 AQ_RX_FIRST_LOC_FVLANID,
163 AQ_RX_LAST_LOC_FVLANID);
167 if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
168 (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK,
169 aq_nic->active_vlans))) {
170 netdev_err(aq_nic->ndev,
171 "ethtool: unknown vlan-id specified");
175 if (fsp->ring_cookie > cfg->num_rss_queues * cfg->tcs) {
176 netdev_err(aq_nic->ndev,
177 "ethtool: queue number must be in range [0, %d]",
178 cfg->num_rss_queues * cfg->tcs - 1);
184 static int __must_check
185 aq_check_filter(struct aq_nic_s *aq_nic,
186 struct ethtool_rx_flow_spec *fsp)
189 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
191 if (fsp->flow_type & FLOW_EXT) {
192 if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) {
193 err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp);
194 } else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) {
195 err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
197 netdev_err(aq_nic->ndev,
198 "ethtool: invalid vlan mask 0x%x specified",
199 be16_to_cpu(fsp->m_ext.vlan_tci));
203 switch (fsp->flow_type & ~FLOW_EXT) {
205 err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
212 rx_fltrs->fl3l4.is_ipv6 = false;
213 err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
220 rx_fltrs->fl3l4.is_ipv6 = true;
221 err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
224 netdev_err(aq_nic->ndev,
225 "ethtool: unknown flow-type specified");
233 static bool __must_check
234 aq_rule_is_not_support(struct aq_nic_s *aq_nic,
235 struct ethtool_rx_flow_spec *fsp)
237 bool rule_is_not_support = false;
239 if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) {
240 netdev_err(aq_nic->ndev,
241 "ethtool: Please, to enable the RX flow control:\n"
242 "ethtool -K %s ntuple on\n", aq_nic->ndev->name);
243 rule_is_not_support = true;
244 } else if (!aq_rule_is_approve(fsp)) {
245 netdev_err(aq_nic->ndev,
246 "ethtool: The specified flow type is not supported\n");
247 rule_is_not_support = true;
248 } else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW &&
249 (fsp->h_u.tcp_ip4_spec.tos ||
250 fsp->h_u.tcp_ip6_spec.tclass)) {
251 netdev_err(aq_nic->ndev,
252 "ethtool: The specified tos tclass are not supported\n");
253 rule_is_not_support = true;
254 } else if (fsp->flow_type & FLOW_MAC_EXT) {
255 netdev_err(aq_nic->ndev,
256 "ethtool: MAC_EXT is not supported");
257 rule_is_not_support = true;
260 return rule_is_not_support;
263 static bool __must_check
264 aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
265 struct ethtool_rx_flow_spec *fsp)
267 struct aq_nic_cfg_s *cfg = &aq_nic->aq_nic_cfg;
268 bool rule_is_not_correct = false;
271 rule_is_not_correct = true;
272 } else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) {
273 netdev_err(aq_nic->ndev,
274 "ethtool: The specified number %u rule is invalid\n",
276 rule_is_not_correct = true;
277 } else if (aq_check_filter(aq_nic, fsp)) {
278 rule_is_not_correct = true;
279 } else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
280 if (fsp->ring_cookie >= cfg->num_rss_queues * cfg->tcs) {
281 netdev_err(aq_nic->ndev,
282 "ethtool: The specified action is invalid.\n"
283 "Maximum allowable value action is %u.\n",
284 cfg->num_rss_queues * cfg->tcs - 1);
285 rule_is_not_correct = true;
289 return rule_is_not_correct;
292 static int __must_check
293 aq_check_rule(struct aq_nic_s *aq_nic,
294 struct ethtool_rx_flow_spec *fsp)
298 if (aq_rule_is_not_correct(aq_nic, fsp))
300 else if (aq_rule_is_not_support(aq_nic, fsp))
302 else if (aq_rule_already_exists(aq_nic, fsp))
308 static void aq_set_data_fl2(struct aq_nic_s *aq_nic,
309 struct aq_rx_filter *aq_rx_fltr,
310 struct aq_rx_filter_l2 *data, bool add)
312 const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
314 memset(data, 0, sizeof(*data));
316 data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT;
318 if (fsp->ring_cookie != RX_CLS_FLOW_DISC)
319 data->queue = fsp->ring_cookie;
323 data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto);
324 data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci)
326 data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci)
327 & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
330 static int aq_add_del_fether(struct aq_nic_s *aq_nic,
331 struct aq_rx_filter *aq_rx_fltr, bool add)
333 struct aq_rx_filter_l2 data;
334 struct aq_hw_s *aq_hw = aq_nic->aq_hw;
335 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
337 aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add);
339 if (unlikely(!aq_hw_ops->hw_filter_l2_set))
341 if (unlikely(!aq_hw_ops->hw_filter_l2_clear))
345 return aq_hw_ops->hw_filter_l2_set(aq_hw, &data);
347 return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
350 static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
354 for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
355 if (aq_vlans[i].enable &&
356 aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
357 aq_vlans[i].vlan_id == vlan) {
365 /* Function rebuilds array of vlan filters so that filters with assigned
366 * queue have a precedence over just vlans on the interface.
368 static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
369 unsigned long *active_vlans,
370 struct aq_rx_filter_vlan *aq_vlans)
372 bool vlan_busy = false;
376 for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
377 if (aq_vlans[i].enable &&
378 aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
381 vlan = find_next_bit(active_vlans,
384 if (vlan == VLAN_N_VID) {
385 aq_vlans[i].enable = 0U;
386 aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
387 aq_vlans[i].vlan_id = 0;
391 vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
393 aq_vlans[i].enable = 1U;
394 aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
395 aq_vlans[i].vlan_id = vlan;
397 } while (vlan_busy && vlan != VLAN_N_VID);
401 static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
402 struct aq_rx_filter *aq_rx_fltr,
403 struct aq_rx_filter_vlan *aq_vlans, bool add)
405 const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
406 int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
409 memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
414 /* remove vlan if it was in table without queue assignment */
415 for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
416 if (aq_vlans[i].vlan_id ==
417 (be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
418 aq_vlans[i].enable = false;
422 aq_vlans[location].location = location;
423 aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
425 aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
426 aq_vlans[location].enable = 1U;
431 int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
433 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
434 struct aq_rx_filter *rule = NULL;
435 struct hlist_node *aq_node2;
437 hlist_for_each_entry_safe(rule, aq_node2,
438 &rx_fltrs->filter_list, aq_node) {
439 if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
442 if (rule && rule->type == aq_rx_filter_vlan &&
443 be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
444 struct ethtool_rxnfc cmd;
446 cmd.fs.location = rule->aq_fsp.location;
447 return aq_del_rxnfc_rule(aq_nic, &cmd);
453 static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
454 struct aq_rx_filter *aq_rx_fltr, bool add)
456 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
458 if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
461 aq_set_data_fvlan(aq_nic,
463 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans,
466 return aq_filters_vlans_update(aq_nic);
469 static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic,
470 struct aq_rx_filter *aq_rx_fltr,
471 struct aq_rx_filter_l3l4 *data, bool add)
473 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
474 const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
476 memset(data, 0, sizeof(*data));
478 data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6;
479 data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location);
483 rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location);
485 rx_fltrs->fl3l4.active_ipv6 &=
486 ~BIT((data->location) / 4);
491 data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4;
493 switch (fsp->flow_type) {
496 data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
500 data->cmd |= HW_ATL_RX_UDP;
501 data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
505 data->cmd |= HW_ATL_RX_SCTP;
506 data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
512 if (!data->is_ipv6) {
514 ntohl(fsp->h_u.tcp_ip4_spec.ip4src);
516 ntohl(fsp->h_u.tcp_ip4_spec.ip4dst);
517 rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location);
521 rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4);
522 for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
524 ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]);
526 ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]);
528 data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6;
530 if (fsp->flow_type != IP_USER_FLOW &&
531 fsp->flow_type != IPV6_USER_FLOW) {
532 if (!data->is_ipv6) {
534 ntohs(fsp->h_u.tcp_ip4_spec.pdst);
536 ntohs(fsp->h_u.tcp_ip4_spec.psrc);
539 ntohs(fsp->h_u.tcp_ip6_spec.pdst);
541 ntohs(fsp->h_u.tcp_ip6_spec.psrc);
544 if (data->ip_src[0] && !data->is_ipv6)
545 data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3;
546 if (data->ip_dst[0] && !data->is_ipv6)
547 data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3;
549 data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4;
551 data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4;
552 if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
553 data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT;
554 data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
555 data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4;
557 data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT;
563 static int aq_set_fl3l4(struct aq_hw_s *aq_hw,
564 const struct aq_hw_ops *aq_hw_ops,
565 struct aq_rx_filter_l3l4 *data)
567 if (unlikely(!aq_hw_ops->hw_filter_l3l4_set))
570 return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data);
573 static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic,
574 struct aq_rx_filter *aq_rx_fltr, bool add)
576 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
577 struct aq_hw_s *aq_hw = aq_nic->aq_hw;
578 struct aq_rx_filter_l3l4 data;
580 if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 ||
581 aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4 ||
582 aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add)))
585 return aq_set_fl3l4(aq_hw, aq_hw_ops, &data);
588 static int aq_add_del_rule(struct aq_nic_s *aq_nic,
589 struct aq_rx_filter *aq_rx_fltr, bool add)
593 if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) {
594 if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
596 aq_rx_fltr->type = aq_rx_filter_vlan;
597 err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add);
598 } else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
600 aq_rx_fltr->type = aq_rx_filter_ethertype;
601 err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
604 switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) {
606 aq_rx_fltr->type = aq_rx_filter_ethertype;
607 err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
617 aq_rx_fltr->type = aq_rx_filter_l3l4;
618 err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add);
629 static int aq_update_table_filters(struct aq_nic_s *aq_nic,
630 struct aq_rx_filter *aq_rx_fltr, u16 index,
631 struct ethtool_rxnfc *cmd)
633 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
634 struct aq_rx_filter *rule = NULL, *parent = NULL;
635 struct hlist_node *aq_node2;
638 hlist_for_each_entry_safe(rule, aq_node2,
639 &rx_fltrs->filter_list, aq_node) {
640 if (rule->aq_fsp.location >= index)
645 if (rule && rule->aq_fsp.location == index) {
646 err = aq_add_del_rule(aq_nic, rule, false);
647 hlist_del(&rule->aq_node);
649 --rx_fltrs->active_filters;
652 if (unlikely(!aq_rx_fltr))
655 INIT_HLIST_NODE(&aq_rx_fltr->aq_node);
658 hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node);
660 hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list);
662 ++rx_fltrs->active_filters;
667 u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic)
669 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
671 return rx_fltrs->active_filters;
674 struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic)
676 return &aq_nic->aq_hw_rx_fltrs;
679 int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
681 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
682 struct ethtool_rx_flow_spec *fsp =
683 (struct ethtool_rx_flow_spec *)&cmd->fs;
684 struct aq_rx_filter *aq_rx_fltr;
687 err = aq_check_rule(aq_nic, fsp);
691 aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL);
692 if (unlikely(!aq_rx_fltr)) {
697 memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp));
699 err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL);
703 err = aq_add_del_rule(aq_nic, aq_rx_fltr, true);
705 hlist_del(&aq_rx_fltr->aq_node);
706 --rx_fltrs->active_filters;
718 int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
720 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
721 struct aq_rx_filter *rule = NULL;
722 struct hlist_node *aq_node2;
725 hlist_for_each_entry_safe(rule, aq_node2,
726 &rx_fltrs->filter_list, aq_node) {
727 if (rule->aq_fsp.location == cmd->fs.location)
731 if (rule && rule->aq_fsp.location == cmd->fs.location) {
732 err = aq_add_del_rule(aq_nic, rule, false);
733 hlist_del(&rule->aq_node);
735 --rx_fltrs->active_filters;
740 int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd)
742 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
743 struct ethtool_rx_flow_spec *fsp =
744 (struct ethtool_rx_flow_spec *)&cmd->fs;
745 struct aq_rx_filter *rule = NULL;
746 struct hlist_node *aq_node2;
748 hlist_for_each_entry_safe(rule, aq_node2,
749 &rx_fltrs->filter_list, aq_node)
750 if (fsp->location <= rule->aq_fsp.location)
753 if (unlikely(!rule || fsp->location != rule->aq_fsp.location))
756 memcpy(fsp, &rule->aq_fsp, sizeof(*fsp));
761 int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
764 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
765 struct hlist_node *aq_node2;
766 struct aq_rx_filter *rule;
769 cmd->data = aq_get_rxnfc_count_all_rules(aq_nic);
771 hlist_for_each_entry_safe(rule, aq_node2,
772 &rx_fltrs->filter_list, aq_node) {
773 if (unlikely(count == cmd->rule_cnt))
776 rule_locs[count++] = rule->aq_fsp.location;
779 cmd->rule_cnt = count;
784 int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic)
786 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
787 struct hlist_node *aq_node2;
788 struct aq_rx_filter *rule;
791 hlist_for_each_entry_safe(rule, aq_node2,
792 &rx_fltrs->filter_list, aq_node) {
793 err = aq_add_del_rule(aq_nic, rule, false);
796 hlist_del(&rule->aq_node);
798 --rx_fltrs->active_filters;
805 int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic)
807 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
808 struct hlist_node *aq_node2;
809 struct aq_rx_filter *rule;
812 hlist_for_each_entry_safe(rule, aq_node2,
813 &rx_fltrs->filter_list, aq_node) {
814 err = aq_add_del_rule(aq_nic, rule, true);
823 int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
825 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
826 struct aq_hw_s *aq_hw = aq_nic->aq_hw;
830 if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
832 if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
835 aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
836 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
838 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
839 hweight = bitmap_weight(aq_nic->active_vlans, VLAN_N_VID);
841 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
846 err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
847 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
852 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
853 if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) {
854 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
855 !(aq_nic->packet_filter & IFF_PROMISC));
856 aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
858 /* otherwise left in promiscue mode */
859 aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
866 int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
868 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
869 struct aq_hw_s *aq_hw = aq_nic->aq_hw;
872 bitmap_zero(aq_nic->active_vlans, VLAN_N_VID);
873 aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
874 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
876 if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
878 if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
881 aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
882 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
885 err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
886 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans