ionic: add generic filter search
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / pensando / ionic / ionic_rx_filter.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
4 #include <linux/netdevice.h>
5 #include <linux/dynamic_debug.h>
6 #include <linux/etherdevice.h>
7 #include <linux/list.h>
8
9 #include "ionic.h"
10 #include "ionic_lif.h"
11 #include "ionic_rx_filter.h"
12
13 void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f)
14 {
15         struct device *dev = lif->ionic->dev;
16
17         hlist_del(&f->by_id);
18         hlist_del(&f->by_hash);
19         devm_kfree(dev, f);
20 }
21
22 void ionic_rx_filter_replay(struct ionic_lif *lif)
23 {
24         struct ionic_rx_filter_add_cmd *ac;
25         struct hlist_head new_id_list;
26         struct ionic_admin_ctx ctx;
27         struct ionic_rx_filter *f;
28         struct hlist_head *head;
29         struct hlist_node *tmp;
30         unsigned int key;
31         unsigned int i;
32         int err;
33
34         INIT_HLIST_HEAD(&new_id_list);
35         ac = &ctx.cmd.rx_filter_add;
36
37         for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
38                 head = &lif->rx_filters.by_id[i];
39                 hlist_for_each_entry_safe(f, tmp, head, by_id) {
40                         ctx.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work);
41                         memcpy(ac, &f->cmd, sizeof(f->cmd));
42                         dev_dbg(&lif->netdev->dev, "replay filter command:\n");
43                         dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
44                                          &ctx.cmd, sizeof(ctx.cmd), true);
45
46                         err = ionic_adminq_post_wait(lif, &ctx);
47                         if (err) {
48                                 switch (le16_to_cpu(ac->match)) {
49                                 case IONIC_RX_FILTER_MATCH_VLAN:
50                                         netdev_info(lif->netdev, "Replay failed - %d: vlan %d\n",
51                                                     err,
52                                                     le16_to_cpu(ac->vlan.vlan));
53                                         break;
54                                 case IONIC_RX_FILTER_MATCH_MAC:
55                                         netdev_info(lif->netdev, "Replay failed - %d: mac %pM\n",
56                                                     err, ac->mac.addr);
57                                         break;
58                                 case IONIC_RX_FILTER_MATCH_MAC_VLAN:
59                                         netdev_info(lif->netdev, "Replay failed - %d: vlan %d mac %pM\n",
60                                                     err,
61                                                     le16_to_cpu(ac->vlan.vlan),
62                                                     ac->mac.addr);
63                                         break;
64                                 }
65                                 spin_lock_bh(&lif->rx_filters.lock);
66                                 ionic_rx_filter_free(lif, f);
67                                 spin_unlock_bh(&lif->rx_filters.lock);
68
69                                 continue;
70                         }
71
72                         /* remove from old id list, save new id in tmp list */
73                         spin_lock_bh(&lif->rx_filters.lock);
74                         hlist_del(&f->by_id);
75                         spin_unlock_bh(&lif->rx_filters.lock);
76                         f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id);
77                         hlist_add_head(&f->by_id, &new_id_list);
78                 }
79         }
80
81         /* rebuild the by_id hash lists with the new filter ids */
82         spin_lock_bh(&lif->rx_filters.lock);
83         hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) {
84                 key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
85                 head = &lif->rx_filters.by_id[key];
86                 hlist_add_head(&f->by_id, head);
87         }
88         spin_unlock_bh(&lif->rx_filters.lock);
89 }
90
91 int ionic_rx_filters_init(struct ionic_lif *lif)
92 {
93         unsigned int i;
94
95         spin_lock_init(&lif->rx_filters.lock);
96
97         spin_lock_bh(&lif->rx_filters.lock);
98         for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
99                 INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]);
100                 INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]);
101         }
102         spin_unlock_bh(&lif->rx_filters.lock);
103
104         return 0;
105 }
106
107 void ionic_rx_filters_deinit(struct ionic_lif *lif)
108 {
109         struct ionic_rx_filter *f;
110         struct hlist_head *head;
111         struct hlist_node *tmp;
112         unsigned int i;
113
114         spin_lock_bh(&lif->rx_filters.lock);
115         for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
116                 head = &lif->rx_filters.by_id[i];
117                 hlist_for_each_entry_safe(f, tmp, head, by_id)
118                         ionic_rx_filter_free(lif, f);
119         }
120         spin_unlock_bh(&lif->rx_filters.lock);
121 }
122
123 int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
124                          u32 hash, struct ionic_admin_ctx *ctx,
125                          enum ionic_filter_state state)
126 {
127         struct device *dev = lif->ionic->dev;
128         struct ionic_rx_filter_add_cmd *ac;
129         struct ionic_rx_filter *f = NULL;
130         struct hlist_head *head;
131         unsigned int key;
132
133         ac = &ctx->cmd.rx_filter_add;
134
135         switch (le16_to_cpu(ac->match)) {
136         case IONIC_RX_FILTER_MATCH_VLAN:
137                 key = le16_to_cpu(ac->vlan.vlan);
138                 f = ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
139                 break;
140         case IONIC_RX_FILTER_MATCH_MAC:
141                 key = *(u32 *)ac->mac.addr;
142                 f = ionic_rx_filter_by_addr(lif, ac->mac.addr);
143                 break;
144         case IONIC_RX_FILTER_MATCH_MAC_VLAN:
145                 key = le16_to_cpu(ac->mac_vlan.vlan);
146                 break;
147         case IONIC_RX_FILTER_STEER_PKTCLASS:
148                 key = 0;
149                 break;
150         default:
151                 return -EINVAL;
152         }
153
154         if (f) {
155                 /* remove from current linking so we can refresh it */
156                 hlist_del(&f->by_id);
157                 hlist_del(&f->by_hash);
158         } else {
159                 f = devm_kzalloc(dev, sizeof(*f), GFP_ATOMIC);
160                 if (!f)
161                         return -ENOMEM;
162         }
163
164         f->flow_id = flow_id;
165         f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id);
166         f->state = state;
167         f->rxq_index = rxq_index;
168         memcpy(&f->cmd, ac, sizeof(f->cmd));
169         netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id);
170
171         INIT_HLIST_NODE(&f->by_hash);
172         INIT_HLIST_NODE(&f->by_id);
173
174         key = hash_32(key, IONIC_RX_FILTER_HASH_BITS);
175         head = &lif->rx_filters.by_hash[key];
176         hlist_add_head(&f->by_hash, head);
177
178         key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
179         head = &lif->rx_filters.by_id[key];
180         hlist_add_head(&f->by_id, head);
181
182         return 0;
183 }
184
185 struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid)
186 {
187         struct ionic_rx_filter *f;
188         struct hlist_head *head;
189         unsigned int key;
190
191         key = hash_32(vid, IONIC_RX_FILTER_HASH_BITS);
192         head = &lif->rx_filters.by_hash[key];
193
194         hlist_for_each_entry(f, head, by_hash) {
195                 if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_VLAN)
196                         continue;
197                 if (le16_to_cpu(f->cmd.vlan.vlan) == vid)
198                         return f;
199         }
200
201         return NULL;
202 }
203
204 struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif,
205                                                 const u8 *addr)
206 {
207         struct ionic_rx_filter *f;
208         struct hlist_head *head;
209         unsigned int key;
210
211         key = hash_32(*(u32 *)addr, IONIC_RX_FILTER_HASH_BITS);
212         head = &lif->rx_filters.by_hash[key];
213
214         hlist_for_each_entry(f, head, by_hash) {
215                 if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_MATCH_MAC)
216                         continue;
217                 if (memcmp(addr, f->cmd.mac.addr, ETH_ALEN) == 0)
218                         return f;
219         }
220
221         return NULL;
222 }
223
224 struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif)
225 {
226         struct ionic_rx_filter *f;
227         struct hlist_head *head;
228         unsigned int key;
229
230         key = hash_32(0, IONIC_RX_FILTER_HASH_BITS);
231         head = &lif->rx_filters.by_hash[key];
232
233         hlist_for_each_entry(f, head, by_hash) {
234                 if (le16_to_cpu(f->cmd.match) != IONIC_RX_FILTER_STEER_PKTCLASS)
235                         continue;
236                 return f;
237         }
238
239         return NULL;
240 }
241
242 static struct ionic_rx_filter *ionic_rx_filter_find(struct ionic_lif *lif,
243                                                     struct ionic_rx_filter_add_cmd *ac)
244 {
245         switch (le16_to_cpu(ac->match)) {
246         case IONIC_RX_FILTER_MATCH_VLAN:
247                 return ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
248         case IONIC_RX_FILTER_MATCH_MAC:
249                 return ionic_rx_filter_by_addr(lif, ac->mac.addr);
250         default:
251                 netdev_err(lif->netdev, "unsupported filter match %d",
252                            le16_to_cpu(ac->match));
253                 return NULL;
254         }
255 }
256
257 int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode)
258 {
259         struct ionic_rx_filter *f;
260         int err;
261
262         spin_lock_bh(&lif->rx_filters.lock);
263
264         f = ionic_rx_filter_by_addr(lif, addr);
265         if (mode == ADD_ADDR && !f) {
266                 struct ionic_admin_ctx ctx = {
267                         .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
268                         .cmd.rx_filter_add = {
269                                 .opcode = IONIC_CMD_RX_FILTER_ADD,
270                                 .lif_index = cpu_to_le16(lif->index),
271                                 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
272                         },
273                 };
274
275                 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
276                 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
277                                            IONIC_FILTER_STATE_NEW);
278                 if (err) {
279                         spin_unlock_bh(&lif->rx_filters.lock);
280                         return err;
281                 }
282
283         } else if (mode == ADD_ADDR && f) {
284                 if (f->state == IONIC_FILTER_STATE_OLD)
285                         f->state = IONIC_FILTER_STATE_SYNCED;
286
287         } else if (mode == DEL_ADDR && f) {
288                 if (f->state == IONIC_FILTER_STATE_NEW)
289                         ionic_rx_filter_free(lif, f);
290                 else if (f->state == IONIC_FILTER_STATE_SYNCED)
291                         f->state = IONIC_FILTER_STATE_OLD;
292         } else if (mode == DEL_ADDR && !f) {
293                 spin_unlock_bh(&lif->rx_filters.lock);
294                 return -ENOENT;
295         }
296
297         spin_unlock_bh(&lif->rx_filters.lock);
298
299         set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
300
301         return 0;
302 }
303
304 int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
305 {
306         struct ionic_admin_ctx ctx = {
307                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
308                 .cmd.rx_filter_add = {
309                         .opcode = IONIC_CMD_RX_FILTER_ADD,
310                         .lif_index = cpu_to_le16(lif->index),
311                         .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
312                 },
313         };
314         int nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
315         bool mc = is_multicast_ether_addr(addr);
316         struct ionic_rx_filter *f;
317         int err = 0;
318
319         memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
320
321         spin_lock_bh(&lif->rx_filters.lock);
322         f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
323         if (f) {
324                 /* don't bother if we already have it and it is sync'd */
325                 if (f->state == IONIC_FILTER_STATE_SYNCED) {
326                         spin_unlock_bh(&lif->rx_filters.lock);
327                         return 0;
328                 }
329
330                 /* mark preemptively as sync'd to block any parallel attempts */
331                 f->state = IONIC_FILTER_STATE_SYNCED;
332         } else {
333                 /* save as SYNCED to catch any DEL requests while processing */
334                 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
335                                            IONIC_FILTER_STATE_SYNCED);
336         }
337         spin_unlock_bh(&lif->rx_filters.lock);
338         if (err)
339                 return err;
340
341         netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
342
343         /* Don't bother with the write to FW if we know there's no room,
344          * we can try again on the next sync attempt.
345          */
346         if ((lif->nucast + lif->nmcast) >= nfilters)
347                 err = -ENOSPC;
348         else
349                 err = ionic_adminq_post_wait(lif, &ctx);
350
351         spin_lock_bh(&lif->rx_filters.lock);
352         if (err && err != -EEXIST) {
353                 /* set the state back to NEW so we can try again later */
354                 f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
355                 if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
356                         f->state = IONIC_FILTER_STATE_NEW;
357                         set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
358                 }
359
360                 spin_unlock_bh(&lif->rx_filters.lock);
361
362                 if (err == -ENOSPC)
363                         return 0;
364                 else
365                         return err;
366         }
367
368         if (mc)
369                 lif->nmcast++;
370         else
371                 lif->nucast++;
372
373         f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
374         if (f && f->state == IONIC_FILTER_STATE_OLD) {
375                 /* Someone requested a delete while we were adding
376                  * so update the filter info with the results from the add
377                  * and the data will be there for the delete on the next
378                  * sync cycle.
379                  */
380                 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
381                                            IONIC_FILTER_STATE_OLD);
382         } else {
383                 err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
384                                            IONIC_FILTER_STATE_SYNCED);
385         }
386
387         spin_unlock_bh(&lif->rx_filters.lock);
388
389         return err;
390 }
391
392 int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
393 {
394         struct ionic_admin_ctx ctx = {
395                 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
396                 .cmd.rx_filter_del = {
397                         .opcode = IONIC_CMD_RX_FILTER_DEL,
398                         .lif_index = cpu_to_le16(lif->index),
399                 },
400         };
401         struct ionic_rx_filter *f;
402         int state;
403         int err;
404
405         spin_lock_bh(&lif->rx_filters.lock);
406         f = ionic_rx_filter_by_addr(lif, addr);
407         if (!f) {
408                 spin_unlock_bh(&lif->rx_filters.lock);
409                 return -ENOENT;
410         }
411
412         netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
413                    addr, f->filter_id);
414
415         state = f->state;
416         ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
417         ionic_rx_filter_free(lif, f);
418
419         if (is_multicast_ether_addr(addr) && lif->nmcast)
420                 lif->nmcast--;
421         else if (!is_multicast_ether_addr(addr) && lif->nucast)
422                 lif->nucast--;
423
424         spin_unlock_bh(&lif->rx_filters.lock);
425
426         if (state != IONIC_FILTER_STATE_NEW) {
427                 err = ionic_adminq_post_wait(lif, &ctx);
428                 if (err && err != -EEXIST)
429                         return err;
430         }
431
432         return 0;
433 }
434
435 struct sync_item {
436         struct list_head list;
437         struct ionic_rx_filter f;
438 };
439
440 void ionic_rx_filter_sync(struct ionic_lif *lif)
441 {
442         struct device *dev = lif->ionic->dev;
443         struct list_head sync_add_list;
444         struct list_head sync_del_list;
445         struct sync_item *sync_item;
446         struct ionic_rx_filter *f;
447         struct hlist_head *head;
448         struct hlist_node *tmp;
449         struct sync_item *spos;
450         unsigned int i;
451
452         INIT_LIST_HEAD(&sync_add_list);
453         INIT_LIST_HEAD(&sync_del_list);
454
455         clear_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
456
457         /* Copy the filters to be added and deleted
458          * into a separate local list that needs no locking.
459          */
460         spin_lock_bh(&lif->rx_filters.lock);
461         for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
462                 head = &lif->rx_filters.by_id[i];
463                 hlist_for_each_entry_safe(f, tmp, head, by_id) {
464                         if (f->state == IONIC_FILTER_STATE_NEW ||
465                             f->state == IONIC_FILTER_STATE_OLD) {
466                                 sync_item = devm_kzalloc(dev, sizeof(*sync_item),
467                                                          GFP_ATOMIC);
468                                 if (!sync_item)
469                                         goto loop_out;
470
471                                 sync_item->f = *f;
472
473                                 if (f->state == IONIC_FILTER_STATE_NEW)
474                                         list_add(&sync_item->list, &sync_add_list);
475                                 else
476                                         list_add(&sync_item->list, &sync_del_list);
477                         }
478                 }
479         }
480 loop_out:
481         spin_unlock_bh(&lif->rx_filters.lock);
482
483         /* If the add or delete fails, it won't get marked as sync'd
484          * and will be tried again in the next sync action.
485          * Do the deletes first in case we're in an overflow state and
486          * they can clear room for some new filters
487          */
488         list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) {
489                 (void)ionic_lif_addr_del(lif, sync_item->f.cmd.mac.addr);
490
491                 list_del(&sync_item->list);
492                 devm_kfree(dev, sync_item);
493         }
494
495         list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
496                 (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr);
497
498                 list_del(&sync_item->list);
499                 devm_kfree(dev, sync_item);
500         }
501 }