1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
4 #include <linux/netdevice.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/etherdevice.h>
7 #include <linux/list.h>
10 #include "ionic_lif.h"
11 #include "ionic_rx_filter.h"
13 void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f)
15 struct device *dev = lif->ionic->dev;
18 hlist_del(&f->by_hash);
22 void ionic_rx_filter_replay(struct ionic_lif *lif)
24 struct ionic_rx_filter_add_cmd *ac;
25 struct hlist_head new_id_list;
26 struct ionic_admin_ctx ctx;
27 struct ionic_rx_filter *f;
28 struct hlist_head *head;
29 struct hlist_node *tmp;
34 INIT_HLIST_HEAD(&new_id_list);
35 ac = &ctx.cmd.rx_filter_add;
37 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
38 head = &lif->rx_filters.by_id[i];
39 hlist_for_each_entry_safe(f, tmp, head, by_id) {
40 ctx.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work);
41 memcpy(ac, &f->cmd, sizeof(f->cmd));
42 dev_dbg(&lif->netdev->dev, "replay filter command:\n");
43 dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
44 &ctx.cmd, sizeof(ctx.cmd), true);
46 err = ionic_adminq_post_wait(lif, &ctx);
48 switch (le16_to_cpu(ac->match)) {
49 case IONIC_RX_FILTER_MATCH_VLAN:
50 netdev_info(lif->netdev, "Replay failed - %d: vlan %d\n",
52 le16_to_cpu(ac->vlan.vlan));
54 case IONIC_RX_FILTER_MATCH_MAC:
55 netdev_info(lif->netdev, "Replay failed - %d: mac %pM\n",
58 case IONIC_RX_FILTER_MATCH_MAC_VLAN:
59 netdev_info(lif->netdev, "Replay failed - %d: vlan %d mac %pM\n",
61 le16_to_cpu(ac->vlan.vlan),
65 spin_lock_bh(&lif->rx_filters.lock);
66 ionic_rx_filter_free(lif, f);
67 spin_unlock_bh(&lif->rx_filters.lock);
72 /* remove from old id list, save new id in tmp list */
73 spin_lock_bh(&lif->rx_filters.lock);
75 spin_unlock_bh(&lif->rx_filters.lock);
76 f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id);
77 hlist_add_head(&f->by_id, &new_id_list);
81 /* rebuild the by_id hash lists with the new filter ids */
82 spin_lock_bh(&lif->rx_filters.lock);
83 hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) {
84 key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
85 head = &lif->rx_filters.by_id[key];
86 hlist_add_head(&f->by_id, head);
88 spin_unlock_bh(&lif->rx_filters.lock);
91 int ionic_rx_filters_init(struct ionic_lif *lif)
95 spin_lock_init(&lif->rx_filters.lock);
97 spin_lock_bh(&lif->rx_filters.lock);
98 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
99 INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]);
100 INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]);
102 spin_unlock_bh(&lif->rx_filters.lock);
107 void ionic_rx_filters_deinit(struct ionic_lif *lif)
109 struct ionic_rx_filter *f;
110 struct hlist_head *head;
111 struct hlist_node *tmp;
114 spin_lock_bh(&lif->rx_filters.lock);
115 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
116 head = &lif->rx_filters.by_id[i];
117 hlist_for_each_entry_safe(f, tmp, head, by_id)
118 ionic_rx_filter_free(lif, f);
120 spin_unlock_bh(&lif->rx_filters.lock);
123 int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
124 u32 hash, struct ionic_admin_ctx *ctx,
125 enum ionic_filter_state state)
127 struct device *dev = lif->ionic->dev;
128 struct ionic_rx_filter_add_cmd *ac;
129 struct ionic_rx_filter *f = NULL;
130 struct hlist_head *head;
133 ac = &ctx->cmd.rx_filter_add;
135 switch (le16_to_cpu(ac->match)) {
136 case IONIC_RX_FILTER_MATCH_VLAN:
137 key = le16_to_cpu(ac->vlan.vlan);
138 f = ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
140 case IONIC_RX_FILTER_MATCH_MAC:
141 key = *(u32 *)ac->mac.addr;
142 f = ionic_rx_filter_by_addr(lif, ac->mac.addr);
144 case IONIC_RX_FILTER_MATCH_MAC_VLAN:
145 key = le16_to_cpu(ac->mac_vlan.vlan);
147 case IONIC_RX_FILTER_STEER_PKTCLASS:
155 /* remove from current linking so we can refresh it */
156 hlist_del(&f->by_id);
157 hlist_del(&f->by_hash);
159 f = devm_kzalloc(dev, sizeof(*f), GFP_ATOMIC);
164 f->flow_id = flow_id;
165 f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id);
167 f->rxq_index = rxq_index;
168 memcpy(&f->cmd, ac, sizeof(f->cmd));
169 netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id);
171 INIT_HLIST_NODE(&f->by_hash);
172 INIT_HLIST_NODE(&f->by_id);
174 key = hash_32(key, IONIC_RX_FILTER_HASH_BITS);
175 head = &lif->rx_filters.by_hash[key];
176 hlist_add_head(&f->by_hash, head);
178 key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
179 head = &lif->rx_filters.by_id[key];
180 hlist_add_head(&f->by_id, head);
185 struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid)
187 struct ionic_rx_filter *f;
188 struct hlist_head *head;
191 key = hash_32(vid, IONIC_RX_FILTER_HASH_BITS);
192 head = &lif->rx_filters.by_hash[key];
194 hlist_for_each_entry(f, head, by_hash) {
195 if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_VLAN)
197 if (le16_to_cpu(f->cmd.vlan.vlan) == vid)
204 struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif,
207 struct ionic_rx_filter *f;
208 struct hlist_head *head;
211 key = hash_32(*(u32 *)addr, IONIC_RX_FILTER_HASH_BITS);
212 head = &lif->rx_filters.by_hash[key];
214 hlist_for_each_entry(f, head, by_hash) {
215 if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_MAC)
217 if (memcmp(addr, f->cmd.mac.addr, ETH_ALEN) == 0)
224 struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif)
226 struct ionic_rx_filter *f;
227 struct hlist_head *head;
230 key = hash_32(0, IONIC_RX_FILTER_HASH_BITS);
231 head = &lif->rx_filters.by_hash[key];
233 hlist_for_each_entry(f, head, by_hash) {
234 if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_STEER_PKTCLASS)
242 static struct ionic_rx_filter *ionic_rx_filter_find(struct ionic_lif *lif,
243 struct ionic_rx_filter_add_cmd *ac)
245 switch (le16_to_cpu(ac->match)) {
246 case IONIC_RX_FILTER_MATCH_VLAN:
247 return ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
248 case IONIC_RX_FILTER_MATCH_MAC:
249 return ionic_rx_filter_by_addr(lif, ac->mac.addr);
251 netdev_err(lif->netdev, "unsupported filter match %d",
252 le16_to_cpu(ac->match));
257 int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode)
259 struct ionic_rx_filter *f;
262 spin_lock_bh(&lif->rx_filters.lock);
264 f = ionic_rx_filter_by_addr(lif, addr);
265 if (mode == ADD_ADDR && !f) {
266 struct ionic_admin_ctx ctx = {
267 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
268 .cmd.rx_filter_add = {
269 .opcode = IONIC_CMD_RX_FILTER_ADD,
270 .lif_index = cpu_to_le16(lif->index),
271 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
275 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
276 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
277 IONIC_FILTER_STATE_NEW);
279 spin_unlock_bh(&lif->rx_filters.lock);
283 } else if (mode == ADD_ADDR && f) {
284 if (f->state == IONIC_FILTER_STATE_OLD)
285 f->state = IONIC_FILTER_STATE_SYNCED;
287 } else if (mode == DEL_ADDR && f) {
288 if (f->state == IONIC_FILTER_STATE_NEW)
289 ionic_rx_filter_free(lif, f);
290 else if (f->state == IONIC_FILTER_STATE_SYNCED)
291 f->state = IONIC_FILTER_STATE_OLD;
292 } else if (mode == DEL_ADDR && !f) {
293 spin_unlock_bh(&lif->rx_filters.lock);
297 spin_unlock_bh(&lif->rx_filters.lock);
299 set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
304 int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
306 struct ionic_admin_ctx ctx = {
307 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
308 .cmd.rx_filter_add = {
309 .opcode = IONIC_CMD_RX_FILTER_ADD,
310 .lif_index = cpu_to_le16(lif->index),
311 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
314 int nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
315 bool mc = is_multicast_ether_addr(addr);
316 struct ionic_rx_filter *f;
319 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
321 spin_lock_bh(&lif->rx_filters.lock);
322 f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
324 /* don't bother if we already have it and it is sync'd */
325 if (f->state == IONIC_FILTER_STATE_SYNCED) {
326 spin_unlock_bh(&lif->rx_filters.lock);
330 /* mark preemptively as sync'd to block any parallel attempts */
331 f->state = IONIC_FILTER_STATE_SYNCED;
333 /* save as SYNCED to catch any DEL requests while processing */
334 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
335 IONIC_FILTER_STATE_SYNCED);
337 spin_unlock_bh(&lif->rx_filters.lock);
341 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
343 /* Don't bother with the write to FW if we know there's no room,
344 * we can try again on the next sync attempt.
346 if ((lif->nucast + lif->nmcast) >= nfilters)
349 err = ionic_adminq_post_wait(lif, &ctx);
351 spin_lock_bh(&lif->rx_filters.lock);
352 if (err && err != -EEXIST) {
353 /* set the state back to NEW so we can try again later */
354 f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
355 if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
356 f->state = IONIC_FILTER_STATE_NEW;
357 set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
360 spin_unlock_bh(&lif->rx_filters.lock);
373 f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
374 if (f && f->state == IONIC_FILTER_STATE_OLD) {
375 /* Someone requested a delete while we were adding
376 * so update the filter info with the results from the add
377 * and the data will be there for the delete on the next
380 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
381 IONIC_FILTER_STATE_OLD);
383 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
384 IONIC_FILTER_STATE_SYNCED);
387 spin_unlock_bh(&lif->rx_filters.lock);
392 int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
394 struct ionic_admin_ctx ctx = {
395 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
396 .cmd.rx_filter_del = {
397 .opcode = IONIC_CMD_RX_FILTER_DEL,
398 .lif_index = cpu_to_le16(lif->index),
401 struct ionic_rx_filter *f;
405 spin_lock_bh(&lif->rx_filters.lock);
406 f = ionic_rx_filter_by_addr(lif, addr);
408 spin_unlock_bh(&lif->rx_filters.lock);
412 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
416 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
417 ionic_rx_filter_free(lif, f);
419 if (is_multicast_ether_addr(addr) && lif->nmcast)
421 else if (!is_multicast_ether_addr(addr) && lif->nucast)
424 spin_unlock_bh(&lif->rx_filters.lock);
426 if (state != IONIC_FILTER_STATE_NEW) {
427 err = ionic_adminq_post_wait(lif, &ctx);
428 if (err && err != -EEXIST)
436 struct list_head list;
437 struct ionic_rx_filter f;
440 void ionic_rx_filter_sync(struct ionic_lif *lif)
442 struct device *dev = lif->ionic->dev;
443 struct list_head sync_add_list;
444 struct list_head sync_del_list;
445 struct sync_item *sync_item;
446 struct ionic_rx_filter *f;
447 struct hlist_head *head;
448 struct hlist_node *tmp;
449 struct sync_item *spos;
452 INIT_LIST_HEAD(&sync_add_list);
453 INIT_LIST_HEAD(&sync_del_list);
455 clear_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
457 /* Copy the filters to be added and deleted
458 * into a separate local list that needs no locking.
460 spin_lock_bh(&lif->rx_filters.lock);
461 for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
462 head = &lif->rx_filters.by_id[i];
463 hlist_for_each_entry_safe(f, tmp, head, by_id) {
464 if (f->state == IONIC_FILTER_STATE_NEW ||
465 f->state == IONIC_FILTER_STATE_OLD) {
466 sync_item = devm_kzalloc(dev, sizeof(*sync_item),
473 if (f->state == IONIC_FILTER_STATE_NEW)
474 list_add(&sync_item->list, &sync_add_list);
476 list_add(&sync_item->list, &sync_del_list);
481 spin_unlock_bh(&lif->rx_filters.lock);
483 /* If the add or delete fails, it won't get marked as sync'd
484 * and will be tried again in the next sync action.
485 * Do the deletes first in case we're in an overflow state and
486 * they can clear room for some new filters
488 list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) {
489 (void)ionic_lif_addr_del(lif, sync_item->f.cmd.mac.addr);
491 list_del(&sync_item->list);
492 devm_kfree(dev, sync_item);
495 list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
496 (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr);
498 list_del(&sync_item->list);
499 devm_kfree(dev, sync_item);