Merge tag 'asoc-fix-v5.8-rc3' of https://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_acl_tcam.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/bitops.h>
8 #include <linux/list.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <trace/events/mlxsw.h>
13
14 #include "reg.h"
15 #include "core.h"
16 #include "resources.h"
17 #include "spectrum.h"
18 #include "spectrum_acl_tcam.h"
19 #include "core_acl_flex_keys.h"
20
21 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
22 {
23         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
24
25         return ops->priv_size;
26 }
27
28 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
29 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
30 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
31
32 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
33                            struct mlxsw_sp_acl_tcam *tcam)
34 {
35         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
36         u64 max_tcam_regions;
37         u64 max_regions;
38         u64 max_groups;
39         size_t alloc_size;
40         int err;
41
42         mutex_init(&tcam->lock);
43         tcam->vregion_rehash_intrvl =
44                         MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
45         INIT_LIST_HEAD(&tcam->vregion_list);
46
47         max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
48                                               ACL_MAX_TCAM_REGIONS);
49         max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
50
51         /* Use 1:1 mapping between ACL region and TCAM region */
52         if (max_tcam_regions < max_regions)
53                 max_regions = max_tcam_regions;
54
55         alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
56         tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
57         if (!tcam->used_regions)
58                 return -ENOMEM;
59         tcam->max_regions = max_regions;
60
61         max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
62         alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
63         tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
64         if (!tcam->used_groups) {
65                 err = -ENOMEM;
66                 goto err_alloc_used_groups;
67         }
68         tcam->max_groups = max_groups;
69         tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
70                                                  ACL_MAX_GROUP_SIZE);
71
72         err = ops->init(mlxsw_sp, tcam->priv, tcam);
73         if (err)
74                 goto err_tcam_init;
75
76         return 0;
77
78 err_tcam_init:
79         kfree(tcam->used_groups);
80 err_alloc_used_groups:
81         kfree(tcam->used_regions);
82         return err;
83 }
84
85 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
86                             struct mlxsw_sp_acl_tcam *tcam)
87 {
88         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
89
90         mutex_destroy(&tcam->lock);
91         ops->fini(mlxsw_sp, tcam->priv);
92         kfree(tcam->used_groups);
93         kfree(tcam->used_regions);
94 }
95
96 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
97                                    struct mlxsw_sp_acl_rule_info *rulei,
98                                    u32 *priority, bool fillup_priority)
99 {
100         u64 max_priority;
101
102         if (!fillup_priority) {
103                 *priority = 0;
104                 return 0;
105         }
106
107         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
108                 return -EIO;
109
110         /* Priority range is 1..cap_kvd_size-1. */
111         max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
112         if (rulei->priority >= max_priority)
113                 return -EINVAL;
114
115         /* Unlike in TC, in HW, higher number means higher priority. */
116         *priority = max_priority - rulei->priority;
117         return 0;
118 }
119
120 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
121                                            u16 *p_id)
122 {
123         u16 id;
124
125         id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
126         if (id < tcam->max_regions) {
127                 __set_bit(id, tcam->used_regions);
128                 *p_id = id;
129                 return 0;
130         }
131         return -ENOBUFS;
132 }
133
134 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
135                                             u16 id)
136 {
137         __clear_bit(id, tcam->used_regions);
138 }
139
140 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
141                                           u16 *p_id)
142 {
143         u16 id;
144
145         id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
146         if (id < tcam->max_groups) {
147                 __set_bit(id, tcam->used_groups);
148                 *p_id = id;
149                 return 0;
150         }
151         return -ENOBUFS;
152 }
153
154 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
155                                            u16 id)
156 {
157         __clear_bit(id, tcam->used_groups);
158 }
159
160 struct mlxsw_sp_acl_tcam_pattern {
161         const enum mlxsw_afk_element *elements;
162         unsigned int elements_count;
163 };
164
165 struct mlxsw_sp_acl_tcam_group {
166         struct mlxsw_sp_acl_tcam *tcam;
167         u16 id;
168         struct mutex lock; /* guards region list updates */
169         struct list_head region_list;
170         unsigned int region_count;
171 };
172
173 struct mlxsw_sp_acl_tcam_vgroup {
174         struct mlxsw_sp_acl_tcam_group group;
175         struct list_head vregion_list;
176         struct rhashtable vchunk_ht;
177         const struct mlxsw_sp_acl_tcam_pattern *patterns;
178         unsigned int patterns_count;
179         bool tmplt_elusage_set;
180         struct mlxsw_afk_element_usage tmplt_elusage;
181         bool vregion_rehash_enabled;
182 };
183
184 struct mlxsw_sp_acl_tcam_rehash_ctx {
185         void *hints_priv;
186         bool this_is_rollback;
187         struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being
188                                                           * currently migrated.
189                                                           */
190         struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start
191                                                         * migration from in
192                                                         * a vchunk being
193                                                         * currently migrated.
194                                                         */
195         struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop
196                                                        * migration at
197                                                        * a vchunk being
198                                                        * currently migrated.
199                                                        */
200 };
201
202 struct mlxsw_sp_acl_tcam_vregion {
203         struct mutex lock; /* Protects consistency of region, region2 pointers
204                             * and vchunk_list.
205                             */
206         struct mlxsw_sp_acl_tcam_region *region;
207         struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
208         struct list_head list; /* Member of a TCAM group */
209         struct list_head tlist; /* Member of a TCAM */
210         struct list_head vchunk_list; /* List of vchunks under this vregion */
211         struct mlxsw_afk_key_info *key_info;
212         struct mlxsw_sp_acl_tcam *tcam;
213         struct mlxsw_sp_acl_tcam_vgroup *vgroup;
214         struct {
215                 struct delayed_work dw;
216                 struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
217         } rehash;
218         struct mlxsw_sp *mlxsw_sp;
219         unsigned int ref_count;
220 };
221
222 struct mlxsw_sp_acl_tcam_vchunk;
223
224 struct mlxsw_sp_acl_tcam_chunk {
225         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
226         struct mlxsw_sp_acl_tcam_region *region;
227         unsigned long priv[];
228         /* priv has to be always the last item */
229 };
230
231 struct mlxsw_sp_acl_tcam_vchunk {
232         struct mlxsw_sp_acl_tcam_chunk *chunk;
233         struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
234         struct list_head list; /* Member of a TCAM vregion */
235         struct rhash_head ht_node; /* Member of a chunk HT */
236         struct list_head ventry_list;
237         unsigned int priority; /* Priority within the vregion and group */
238         struct mlxsw_sp_acl_tcam_vgroup *vgroup;
239         struct mlxsw_sp_acl_tcam_vregion *vregion;
240         unsigned int ref_count;
241 };
242
243 struct mlxsw_sp_acl_tcam_entry {
244         struct mlxsw_sp_acl_tcam_ventry *ventry;
245         struct mlxsw_sp_acl_tcam_chunk *chunk;
246         unsigned long priv[];
247         /* priv has to be always the last item */
248 };
249
250 struct mlxsw_sp_acl_tcam_ventry {
251         struct mlxsw_sp_acl_tcam_entry *entry;
252         struct list_head list; /* Member of a TCAM vchunk */
253         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
254         struct mlxsw_sp_acl_rule_info *rulei;
255 };
256
257 static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
258         .key_len = sizeof(unsigned int),
259         .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
260         .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
261         .automatic_shrinking = true,
262 };
263
264 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
265                                           struct mlxsw_sp_acl_tcam_group *group)
266 {
267         struct mlxsw_sp_acl_tcam_region *region;
268         char pagt_pl[MLXSW_REG_PAGT_LEN];
269         int acl_index = 0;
270
271         mlxsw_reg_pagt_pack(pagt_pl, group->id);
272         list_for_each_entry(region, &group->region_list, list) {
273                 bool multi = false;
274
275                 /* Check if the next entry in the list has the same vregion. */
276                 if (region->list.next != &group->region_list &&
277                     list_next_entry(region, list)->vregion == region->vregion)
278                         multi = true;
279                 mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
280                                            region->id, multi);
281         }
282         mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
283         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
284 }
285
286 static int
287 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
288                             struct mlxsw_sp_acl_tcam_group *group)
289 {
290         int err;
291
292         group->tcam = tcam;
293         mutex_init(&group->lock);
294         INIT_LIST_HEAD(&group->region_list);
295
296         err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
297         if (err)
298                 return err;
299
300         return 0;
301 }
302
303 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
304 {
305         struct mlxsw_sp_acl_tcam *tcam = group->tcam;
306
307         mutex_destroy(&group->lock);
308         mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
309         WARN_ON(!list_empty(&group->region_list));
310 }
311
312 static int
313 mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
314                              struct mlxsw_sp_acl_tcam *tcam,
315                              struct mlxsw_sp_acl_tcam_vgroup *vgroup,
316                              const struct mlxsw_sp_acl_tcam_pattern *patterns,
317                              unsigned int patterns_count,
318                              struct mlxsw_afk_element_usage *tmplt_elusage,
319                              bool vregion_rehash_enabled)
320 {
321         int err;
322
323         vgroup->patterns = patterns;
324         vgroup->patterns_count = patterns_count;
325         vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
326
327         if (tmplt_elusage) {
328                 vgroup->tmplt_elusage_set = true;
329                 memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
330                        sizeof(vgroup->tmplt_elusage));
331         }
332         INIT_LIST_HEAD(&vgroup->vregion_list);
333
334         err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
335         if (err)
336                 return err;
337
338         err = rhashtable_init(&vgroup->vchunk_ht,
339                               &mlxsw_sp_acl_tcam_vchunk_ht_params);
340         if (err)
341                 goto err_rhashtable_init;
342
343         return 0;
344
345 err_rhashtable_init:
346         mlxsw_sp_acl_tcam_group_del(&vgroup->group);
347         return err;
348 }
349
350 static void
351 mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
352 {
353         rhashtable_destroy(&vgroup->vchunk_ht);
354         mlxsw_sp_acl_tcam_group_del(&vgroup->group);
355         WARN_ON(!list_empty(&vgroup->vregion_list));
356 }
357
358 static int
359 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
360                              struct mlxsw_sp_acl_tcam_group *group,
361                              struct mlxsw_sp_port *mlxsw_sp_port,
362                              bool ingress)
363 {
364         char ppbt_pl[MLXSW_REG_PPBT_LEN];
365
366         mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
367                                                MLXSW_REG_PXBT_E_EACL,
368                             MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
369                             group->id);
370         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
371 }
372
373 static void
374 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
375                                struct mlxsw_sp_acl_tcam_group *group,
376                                struct mlxsw_sp_port *mlxsw_sp_port,
377                                bool ingress)
378 {
379         char ppbt_pl[MLXSW_REG_PPBT_LEN];
380
381         mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
382                                                MLXSW_REG_PXBT_E_EACL,
383                             MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
384                             group->id);
385         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
386 }
387
388 static u16
389 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
390 {
391         return group->id;
392 }
393
394 static unsigned int
395 mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
396 {
397         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
398
399         if (list_empty(&vregion->vchunk_list))
400                 return 0;
401         /* As a priority of a vregion, return priority of the first vchunk */
402         vchunk = list_first_entry(&vregion->vchunk_list,
403                                   typeof(*vchunk), list);
404         return vchunk->priority;
405 }
406
407 static unsigned int
408 mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
409 {
410         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
411
412         if (list_empty(&vregion->vchunk_list))
413                 return 0;
414         vchunk = list_last_entry(&vregion->vchunk_list,
415                                  typeof(*vchunk), list);
416         return vchunk->priority;
417 }
418
419 static int
420 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
421                                       struct mlxsw_sp_acl_tcam_group *group,
422                                       struct mlxsw_sp_acl_tcam_region *region,
423                                       unsigned int priority,
424                                       struct mlxsw_sp_acl_tcam_region *next_region)
425 {
426         struct mlxsw_sp_acl_tcam_region *region2;
427         struct list_head *pos;
428         int err;
429
430         mutex_lock(&group->lock);
431         if (group->region_count == group->tcam->max_group_size) {
432                 err = -ENOBUFS;
433                 goto err_region_count_check;
434         }
435
436         if (next_region) {
437                 /* If the next region is defined, place the new one
438                  * before it. The next one is a sibling.
439                  */
440                 pos = &next_region->list;
441         } else {
442                 /* Position the region inside the list according to priority */
443                 list_for_each(pos, &group->region_list) {
444                         region2 = list_entry(pos, typeof(*region2), list);
445                         if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
446                             priority)
447                                 break;
448                 }
449         }
450         list_add_tail(&region->list, pos);
451         region->group = group;
452
453         err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
454         if (err)
455                 goto err_group_update;
456
457         group->region_count++;
458         mutex_unlock(&group->lock);
459         return 0;
460
461 err_group_update:
462         list_del(&region->list);
463 err_region_count_check:
464         mutex_unlock(&group->lock);
465         return err;
466 }
467
468 static void
469 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
470                                       struct mlxsw_sp_acl_tcam_region *region)
471 {
472         struct mlxsw_sp_acl_tcam_group *group = region->group;
473
474         mutex_lock(&group->lock);
475         list_del(&region->list);
476         group->region_count--;
477         mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
478         mutex_unlock(&group->lock);
479 }
480
481 static int
482 mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
483                                         struct mlxsw_sp_acl_tcam_vgroup *vgroup,
484                                         struct mlxsw_sp_acl_tcam_vregion *vregion,
485                                         unsigned int priority)
486 {
487         struct mlxsw_sp_acl_tcam_vregion *vregion2;
488         struct list_head *pos;
489         int err;
490
491         /* Position the vregion inside the list according to priority */
492         list_for_each(pos, &vgroup->vregion_list) {
493                 vregion2 = list_entry(pos, typeof(*vregion2), list);
494                 if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
495                         break;
496         }
497         list_add_tail(&vregion->list, pos);
498
499         err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
500                                                     vregion->region,
501                                                     priority, NULL);
502         if (err)
503                 goto err_region_attach;
504
505         return 0;
506
507 err_region_attach:
508         list_del(&vregion->list);
509         return err;
510 }
511
512 static void
513 mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
514                                         struct mlxsw_sp_acl_tcam_vregion *vregion)
515 {
516         list_del(&vregion->list);
517         if (vregion->region2)
518                 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
519                                                       vregion->region2);
520         mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
521 }
522
523 static struct mlxsw_sp_acl_tcam_vregion *
524 mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
525                                       unsigned int priority,
526                                       struct mlxsw_afk_element_usage *elusage,
527                                       bool *p_need_split)
528 {
529         struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
530         struct list_head *pos;
531         bool issubset;
532
533         list_for_each(pos, &vgroup->vregion_list) {
534                 vregion = list_entry(pos, typeof(*vregion), list);
535
536                 /* First, check if the requested priority does not rather belong
537                  * under some of the next vregions.
538                  */
539                 if (pos->next != &vgroup->vregion_list) { /* not last */
540                         vregion2 = list_entry(pos->next, typeof(*vregion2),
541                                               list);
542                         if (priority >=
543                             mlxsw_sp_acl_tcam_vregion_prio(vregion2))
544                                 continue;
545                 }
546
547                 issubset = mlxsw_afk_key_info_subset(vregion->key_info,
548                                                      elusage);
549
550                 /* If requested element usage would not fit and the priority
551                  * is lower than the currently inspected vregion we cannot
552                  * use this region, so return NULL to indicate new vregion has
553                  * to be created.
554                  */
555                 if (!issubset &&
556                     priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
557                         return NULL;
558
559                 /* If requested element usage would not fit and the priority
560                  * is higher than the currently inspected vregion we cannot
561                  * use this vregion. There is still some hope that the next
562                  * vregion would be the fit. So let it be processed and
563                  * eventually break at the check right above this.
564                  */
565                 if (!issubset &&
566                     priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
567                         continue;
568
569                 /* Indicate if the vregion needs to be split in order to add
570                  * the requested priority. Split is needed when requested
571                  * element usage won't fit into the found vregion.
572                  */
573                 *p_need_split = !issubset;
574                 return vregion;
575         }
576         return NULL; /* New vregion has to be created. */
577 }
578
579 static void
580 mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
581                                       struct mlxsw_afk_element_usage *elusage,
582                                       struct mlxsw_afk_element_usage *out)
583 {
584         const struct mlxsw_sp_acl_tcam_pattern *pattern;
585         int i;
586
587         /* In case the template is set, we don't have to look up the pattern
588          * and just use the template.
589          */
590         if (vgroup->tmplt_elusage_set) {
591                 memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
592                 WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
593                 return;
594         }
595
596         for (i = 0; i < vgroup->patterns_count; i++) {
597                 pattern = &vgroup->patterns[i];
598                 mlxsw_afk_element_usage_fill(out, pattern->elements,
599                                              pattern->elements_count);
600                 if (mlxsw_afk_element_usage_subset(elusage, out))
601                         return;
602         }
603         memcpy(out, elusage, sizeof(*out));
604 }
605
606 static int
607 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
608                                struct mlxsw_sp_acl_tcam_region *region)
609 {
610         struct mlxsw_afk_key_info *key_info = region->key_info;
611         char ptar_pl[MLXSW_REG_PTAR_LEN];
612         unsigned int encodings_count;
613         int i;
614         int err;
615
616         mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
617                             region->key_type,
618                             MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
619                             region->id, region->tcam_region_info);
620         encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
621         for (i = 0; i < encodings_count; i++) {
622                 u16 encoding;
623
624                 encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
625                 mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
626         }
627         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
628         if (err)
629                 return err;
630         mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
631         return 0;
632 }
633
634 static void
635 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
636                               struct mlxsw_sp_acl_tcam_region *region)
637 {
638         char ptar_pl[MLXSW_REG_PTAR_LEN];
639
640         mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
641                             region->key_type, 0, region->id,
642                             region->tcam_region_info);
643         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
644 }
645
646 static int
647 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
648                                 struct mlxsw_sp_acl_tcam_region *region)
649 {
650         char pacl_pl[MLXSW_REG_PACL_LEN];
651
652         mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
653                             region->tcam_region_info);
654         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
655 }
656
657 static void
658 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
659                                  struct mlxsw_sp_acl_tcam_region *region)
660 {
661         char pacl_pl[MLXSW_REG_PACL_LEN];
662
663         mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
664                             region->tcam_region_info);
665         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
666 }
667
668 static struct mlxsw_sp_acl_tcam_region *
669 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
670                                 struct mlxsw_sp_acl_tcam *tcam,
671                                 struct mlxsw_sp_acl_tcam_vregion *vregion,
672                                 void *hints_priv)
673 {
674         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
675         struct mlxsw_sp_acl_tcam_region *region;
676         int err;
677
678         region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
679         if (!region)
680                 return ERR_PTR(-ENOMEM);
681         region->mlxsw_sp = mlxsw_sp;
682         region->vregion = vregion;
683         region->key_info = vregion->key_info;
684
685         err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
686         if (err)
687                 goto err_region_id_get;
688
689         err = ops->region_associate(mlxsw_sp, region);
690         if (err)
691                 goto err_tcam_region_associate;
692
693         region->key_type = ops->key_type;
694         err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
695         if (err)
696                 goto err_tcam_region_alloc;
697
698         err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
699         if (err)
700                 goto err_tcam_region_enable;
701
702         err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
703                                region, hints_priv);
704         if (err)
705                 goto err_tcam_region_init;
706
707         return region;
708
709 err_tcam_region_init:
710         mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
711 err_tcam_region_enable:
712         mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
713 err_tcam_region_alloc:
714 err_tcam_region_associate:
715         mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
716 err_region_id_get:
717         kfree(region);
718         return ERR_PTR(err);
719 }
720
721 static void
722 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
723                                  struct mlxsw_sp_acl_tcam_region *region)
724 {
725         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
726
727         ops->region_fini(mlxsw_sp, region->priv);
728         mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
729         mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
730         mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
731                                         region->id);
732         kfree(region);
733 }
734
735 static void
736 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
737 {
738         unsigned long interval = vregion->tcam->vregion_rehash_intrvl;
739
740         if (!interval)
741                 return;
742         mlxsw_core_schedule_dw(&vregion->rehash.dw,
743                                msecs_to_jiffies(interval));
744 }
745
746 static void
747 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
748                                  struct mlxsw_sp_acl_tcam_vregion *vregion,
749                                  int *credits);
750
751 static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
752 {
753         struct mlxsw_sp_acl_tcam_vregion *vregion =
754                 container_of(work, struct mlxsw_sp_acl_tcam_vregion,
755                              rehash.dw.work);
756         int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
757
758         mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
759         if (credits < 0)
760                 /* Rehash gone out of credits so it was interrupted.
761                  * Schedule the work as soon as possible to continue.
762                  */
763                 mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
764         else
765                 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
766 }
767
768 static void
769 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
770 {
771         struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
772
773         /* If a rule was added or deleted from vchunk which is currently
774          * under rehash migration, we have to reset the ventry pointers
775          * to make sure all rules are properly migrated.
776          */
777         if (vregion->rehash.ctx.current_vchunk == vchunk) {
778                 vregion->rehash.ctx.start_ventry = NULL;
779                 vregion->rehash.ctx.stop_ventry = NULL;
780         }
781 }
782
783 static void
784 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion)
785 {
786         /* If a chunk was added or deleted from vregion we have to reset
787          * the current chunk pointer to make sure all chunks
788          * are properly migrated.
789          */
790         vregion->rehash.ctx.current_vchunk = NULL;
791 }
792
793 static struct mlxsw_sp_acl_tcam_vregion *
794 mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
795                                  struct mlxsw_sp_acl_tcam_vgroup *vgroup,
796                                  unsigned int priority,
797                                  struct mlxsw_afk_element_usage *elusage)
798 {
799         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
800         struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
801         struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
802         struct mlxsw_sp_acl_tcam_vregion *vregion;
803         int err;
804
805         vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
806         if (!vregion)
807                 return ERR_PTR(-ENOMEM);
808         INIT_LIST_HEAD(&vregion->vchunk_list);
809         mutex_init(&vregion->lock);
810         vregion->tcam = tcam;
811         vregion->mlxsw_sp = mlxsw_sp;
812         vregion->vgroup = vgroup;
813         vregion->ref_count = 1;
814
815         vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
816         if (IS_ERR(vregion->key_info)) {
817                 err = PTR_ERR(vregion->key_info);
818                 goto err_key_info_get;
819         }
820
821         vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
822                                                           vregion, NULL);
823         if (IS_ERR(vregion->region)) {
824                 err = PTR_ERR(vregion->region);
825                 goto err_region_create;
826         }
827
828         err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
829                                                       priority);
830         if (err)
831                 goto err_vgroup_vregion_attach;
832
833         if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
834                 /* Create the delayed work for vregion periodic rehash */
835                 INIT_DELAYED_WORK(&vregion->rehash.dw,
836                                   mlxsw_sp_acl_tcam_vregion_rehash_work);
837                 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
838                 mutex_lock(&tcam->lock);
839                 list_add_tail(&vregion->tlist, &tcam->vregion_list);
840                 mutex_unlock(&tcam->lock);
841         }
842
843         return vregion;
844
845 err_vgroup_vregion_attach:
846         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
847 err_region_create:
848         mlxsw_afk_key_info_put(vregion->key_info);
849 err_key_info_get:
850         kfree(vregion);
851         return ERR_PTR(err);
852 }
853
854 static void
855 mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
856                                   struct mlxsw_sp_acl_tcam_vregion *vregion)
857 {
858         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
859         struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
860         struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
861
862         if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
863                 mutex_lock(&tcam->lock);
864                 list_del(&vregion->tlist);
865                 mutex_unlock(&tcam->lock);
866                 cancel_delayed_work_sync(&vregion->rehash.dw);
867         }
868         mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
869         if (vregion->region2)
870                 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
871         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
872         mlxsw_afk_key_info_put(vregion->key_info);
873         mutex_destroy(&vregion->lock);
874         kfree(vregion);
875 }
876
877 u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
878                                                 struct mlxsw_sp_acl_tcam *tcam)
879 {
880         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
881         u32 vregion_rehash_intrvl;
882
883         if (WARN_ON(!ops->region_rehash_hints_get))
884                 return 0;
885         vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
886         return vregion_rehash_intrvl;
887 }
888
889 int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
890                                                 struct mlxsw_sp_acl_tcam *tcam,
891                                                 u32 val)
892 {
893         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
894         struct mlxsw_sp_acl_tcam_vregion *vregion;
895
896         if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
897                 return -EINVAL;
898         if (WARN_ON(!ops->region_rehash_hints_get))
899                 return -EOPNOTSUPP;
900         tcam->vregion_rehash_intrvl = val;
901         mutex_lock(&tcam->lock);
902         list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
903                 if (val)
904                         mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
905                 else
906                         cancel_delayed_work_sync(&vregion->rehash.dw);
907         }
908         mutex_unlock(&tcam->lock);
909         return 0;
910 }
911
912 static struct mlxsw_sp_acl_tcam_vregion *
913 mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
914                               struct mlxsw_sp_acl_tcam_vgroup *vgroup,
915                               unsigned int priority,
916                               struct mlxsw_afk_element_usage *elusage)
917 {
918         struct mlxsw_afk_element_usage vregion_elusage;
919         struct mlxsw_sp_acl_tcam_vregion *vregion;
920         bool need_split;
921
922         vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
923                                                         elusage, &need_split);
924         if (vregion) {
925                 if (need_split) {
926                         /* According to priority, new vchunk should belong to
927                          * an existing vregion. However, this vchunk needs
928                          * elements that vregion does not contain. We need
929                          * to split the existing vregion into two and create
930                          * a new vregion for the new vchunk in between.
931                          * This is not supported now.
932                          */
933                         return ERR_PTR(-EOPNOTSUPP);
934                 }
935                 vregion->ref_count++;
936                 return vregion;
937         }
938
939         mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
940                                               &vregion_elusage);
941
942         return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
943                                                 &vregion_elusage);
944 }
945
946 static void
947 mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
948                               struct mlxsw_sp_acl_tcam_vregion *vregion)
949 {
950         if (--vregion->ref_count)
951                 return;
952         mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
953 }
954
955 static struct mlxsw_sp_acl_tcam_chunk *
956 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
957                                struct mlxsw_sp_acl_tcam_vchunk *vchunk,
958                                struct mlxsw_sp_acl_tcam_region *region)
959 {
960         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
961         struct mlxsw_sp_acl_tcam_chunk *chunk;
962
963         chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
964         if (!chunk)
965                 return ERR_PTR(-ENOMEM);
966         chunk->vchunk = vchunk;
967         chunk->region = region;
968
969         ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
970         return chunk;
971 }
972
973 static void
974 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
975                                 struct mlxsw_sp_acl_tcam_chunk *chunk)
976 {
977         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
978
979         ops->chunk_fini(chunk->priv);
980         kfree(chunk);
981 }
982
983 static struct mlxsw_sp_acl_tcam_vchunk *
984 mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
985                                 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
986                                 unsigned int priority,
987                                 struct mlxsw_afk_element_usage *elusage)
988 {
989         struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
990         struct mlxsw_sp_acl_tcam_vregion *vregion;
991         struct list_head *pos;
992         int err;
993
994         if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
995                 return ERR_PTR(-EINVAL);
996
997         vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
998         if (!vchunk)
999                 return ERR_PTR(-ENOMEM);
1000         INIT_LIST_HEAD(&vchunk->ventry_list);
1001         vchunk->priority = priority;
1002         vchunk->vgroup = vgroup;
1003         vchunk->ref_count = 1;
1004
1005         vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
1006                                                 priority, elusage);
1007         if (IS_ERR(vregion)) {
1008                 err = PTR_ERR(vregion);
1009                 goto err_vregion_get;
1010         }
1011
1012         vchunk->vregion = vregion;
1013
1014         err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1015                                      mlxsw_sp_acl_tcam_vchunk_ht_params);
1016         if (err)
1017                 goto err_rhashtable_insert;
1018
1019         mutex_lock(&vregion->lock);
1020         vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
1021                                                        vchunk->vregion->region);
1022         if (IS_ERR(vchunk->chunk)) {
1023                 mutex_unlock(&vregion->lock);
1024                 err = PTR_ERR(vchunk->chunk);
1025                 goto err_chunk_create;
1026         }
1027
1028         mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1029
1030         /* Position the vchunk inside the list according to priority */
1031         list_for_each(pos, &vregion->vchunk_list) {
1032                 vchunk2 = list_entry(pos, typeof(*vchunk2), list);
1033                 if (vchunk2->priority > priority)
1034                         break;
1035         }
1036         list_add_tail(&vchunk->list, pos);
1037         mutex_unlock(&vregion->lock);
1038
1039         return vchunk;
1040
1041 err_chunk_create:
1042         rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1043                                mlxsw_sp_acl_tcam_vchunk_ht_params);
1044 err_rhashtable_insert:
1045         mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
1046 err_vregion_get:
1047         kfree(vchunk);
1048         return ERR_PTR(err);
1049 }
1050
1051 static void
1052 mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
1053                                  struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1054 {
1055         struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1056         struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
1057
1058         mutex_lock(&vregion->lock);
1059         mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1060         list_del(&vchunk->list);
1061         if (vchunk->chunk2)
1062                 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1063         mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
1064         mutex_unlock(&vregion->lock);
1065         rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1066                                mlxsw_sp_acl_tcam_vchunk_ht_params);
1067         mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
1068         kfree(vchunk);
1069 }
1070
1071 static struct mlxsw_sp_acl_tcam_vchunk *
1072 mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
1073                              struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1074                              unsigned int priority,
1075                              struct mlxsw_afk_element_usage *elusage)
1076 {
1077         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1078
1079         vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
1080                                         mlxsw_sp_acl_tcam_vchunk_ht_params);
1081         if (vchunk) {
1082                 if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
1083                                                        elusage)))
1084                         return ERR_PTR(-EINVAL);
1085                 vchunk->ref_count++;
1086                 return vchunk;
1087         }
1088         return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
1089                                                priority, elusage);
1090 }
1091
1092 static void
1093 mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
1094                              struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1095 {
1096         if (--vchunk->ref_count)
1097                 return;
1098         mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
1099 }
1100
1101 static struct mlxsw_sp_acl_tcam_entry *
1102 mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
1103                                struct mlxsw_sp_acl_tcam_ventry *ventry,
1104                                struct mlxsw_sp_acl_tcam_chunk *chunk)
1105 {
1106         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1107         struct mlxsw_sp_acl_tcam_entry *entry;
1108         int err;
1109
1110         entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
1111         if (!entry)
1112                 return ERR_PTR(-ENOMEM);
1113         entry->ventry = ventry;
1114         entry->chunk = chunk;
1115
1116         err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
1117                              entry->priv, ventry->rulei);
1118         if (err)
1119                 goto err_entry_add;
1120
1121         return entry;
1122
1123 err_entry_add:
1124         kfree(entry);
1125         return ERR_PTR(err);
1126 }
1127
1128 static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1129                                             struct mlxsw_sp_acl_tcam_entry *entry)
1130 {
1131         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1132
1133         ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
1134                        entry->chunk->priv, entry->priv);
1135         kfree(entry);
1136 }
1137
1138 static int
1139 mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
1140                                        struct mlxsw_sp_acl_tcam_region *region,
1141                                        struct mlxsw_sp_acl_tcam_entry *entry,
1142                                        struct mlxsw_sp_acl_rule_info *rulei)
1143 {
1144         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1145
1146         return ops->entry_action_replace(mlxsw_sp, region->priv,
1147                                          entry->priv, rulei);
1148 }
1149
1150 static int
1151 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
1152                                      struct mlxsw_sp_acl_tcam_entry *entry,
1153                                      bool *activity)
1154 {
1155         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1156
1157         return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
1158                                        entry->priv, activity);
1159 }
1160
1161 static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
1162                                         struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1163                                         struct mlxsw_sp_acl_tcam_ventry *ventry,
1164                                         struct mlxsw_sp_acl_rule_info *rulei)
1165 {
1166         struct mlxsw_sp_acl_tcam_vregion *vregion;
1167         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1168         int err;
1169
1170         vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
1171                                               &rulei->values.elusage);
1172         if (IS_ERR(vchunk))
1173                 return PTR_ERR(vchunk);
1174
1175         ventry->vchunk = vchunk;
1176         ventry->rulei = rulei;
1177         vregion = vchunk->vregion;
1178
1179         mutex_lock(&vregion->lock);
1180         ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
1181                                                        vchunk->chunk);
1182         if (IS_ERR(ventry->entry)) {
1183                 mutex_unlock(&vregion->lock);
1184                 err = PTR_ERR(ventry->entry);
1185                 goto err_entry_create;
1186         }
1187
1188         list_add_tail(&ventry->list, &vchunk->ventry_list);
1189         mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1190         mutex_unlock(&vregion->lock);
1191
1192         return 0;
1193
1194 err_entry_create:
1195         mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1196         return err;
1197 }
1198
1199 static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
1200                                          struct mlxsw_sp_acl_tcam_ventry *ventry)
1201 {
1202         struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1203         struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1204
1205         mutex_lock(&vregion->lock);
1206         mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1207         list_del(&ventry->list);
1208         mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1209         mutex_unlock(&vregion->lock);
1210         mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1211 }
1212
1213 static int
1214 mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
1215                                         struct mlxsw_sp_acl_tcam_ventry *ventry,
1216                                         struct mlxsw_sp_acl_rule_info *rulei)
1217 {
1218         struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1219
1220         return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
1221                                                       vchunk->vregion->region,
1222                                                       ventry->entry, rulei);
1223 }
1224
1225 static int
1226 mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
1227                                       struct mlxsw_sp_acl_tcam_ventry *ventry,
1228                                       bool *activity)
1229 {
1230         return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
1231                                                     ventry->entry, activity);
1232 }
1233
1234 static int
1235 mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
1236                                  struct mlxsw_sp_acl_tcam_ventry *ventry,
1237                                  struct mlxsw_sp_acl_tcam_chunk *chunk,
1238                                  int *credits)
1239 {
1240         struct mlxsw_sp_acl_tcam_entry *new_entry;
1241
1242         /* First check if the entry is not already where we want it to be. */
1243         if (ventry->entry->chunk == chunk)
1244                 return 0;
1245
1246         if (--(*credits) < 0)
1247                 return 0;
1248
1249         new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk);
1250         if (IS_ERR(new_entry))
1251                 return PTR_ERR(new_entry);
1252         mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1253         ventry->entry = new_entry;
1254         return 0;
1255 }
1256
1257 static int
1258 mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
1259                                        struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1260                                        struct mlxsw_sp_acl_tcam_region *region,
1261                                        struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1262 {
1263         struct mlxsw_sp_acl_tcam_chunk *new_chunk;
1264
1265         new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
1266         if (IS_ERR(new_chunk))
1267                 return PTR_ERR(new_chunk);
1268         vchunk->chunk2 = vchunk->chunk;
1269         vchunk->chunk = new_chunk;
1270         ctx->current_vchunk = vchunk;
1271         ctx->start_ventry = NULL;
1272         ctx->stop_ventry = NULL;
1273         return 0;
1274 }
1275
1276 static void
1277 mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
1278                                      struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1279                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1280 {
1281         mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1282         vchunk->chunk2 = NULL;
1283         ctx->current_vchunk = NULL;
1284 }
1285
1286 static int
1287 mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1288                                      struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1289                                      struct mlxsw_sp_acl_tcam_region *region,
1290                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1291                                      int *credits)
1292 {
1293         struct mlxsw_sp_acl_tcam_ventry *ventry;
1294         int err;
1295
1296         if (vchunk->chunk->region != region) {
1297                 err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk,
1298                                                              region, ctx);
1299                 if (err)
1300                         return err;
1301         } else if (!vchunk->chunk2) {
1302                 /* The chunk is already as it should be, nothing to do. */
1303                 return 0;
1304         }
1305
1306         /* If the migration got interrupted, we have the ventry to start from
1307          * stored in context.
1308          */
1309         if (ctx->start_ventry)
1310                 ventry = ctx->start_ventry;
1311         else
1312                 ventry = list_first_entry(&vchunk->ventry_list,
1313                                           typeof(*ventry), list);
1314
1315         list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
1316                 /* During rollback, once we reach the ventry that failed
1317                  * to migrate, we are done.
1318                  */
1319                 if (ventry == ctx->stop_ventry)
1320                         break;
1321
1322                 err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
1323                                                        vchunk->chunk, credits);
1324                 if (err) {
1325                         if (ctx->this_is_rollback) {
1326                                 /* Save the ventry which we ended with and try
1327                                  * to continue later on.
1328                                  */
1329                                 ctx->start_ventry = ventry;
1330                                 return err;
1331                         }
1332                         /* Swap the chunk and chunk2 pointers so the follow-up
1333                          * rollback call will see the original chunk pointer
1334                          * in vchunk->chunk.
1335                          */
1336                         swap(vchunk->chunk, vchunk->chunk2);
1337                         /* The rollback has to be done from beginning of the
1338                          * chunk, that is why we have to null the start_ventry.
1339                          * However, we know where to stop the rollback,
1340                          * at the current ventry.
1341                          */
1342                         ctx->start_ventry = NULL;
1343                         ctx->stop_ventry = ventry;
1344                         return err;
1345                 } else if (*credits < 0) {
1346                         /* We are out of credits, the rest of the ventries
1347                          * will be migrated later. Save the ventry
1348                          * which we ended with.
1349                          */
1350                         ctx->start_ventry = ventry;
1351                         return 0;
1352                 }
1353         }
1354
1355         mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
1356         return 0;
1357 }
1358
1359 static int
1360 mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1361                                      struct mlxsw_sp_acl_tcam_vregion *vregion,
1362                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1363                                      int *credits)
1364 {
1365         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1366         int err;
1367
1368         /* If the migration got interrupted, we have the vchunk
1369          * we are working on stored in context.
1370          */
1371         if (ctx->current_vchunk)
1372                 vchunk = ctx->current_vchunk;
1373         else
1374                 vchunk = list_first_entry(&vregion->vchunk_list,
1375                                           typeof(*vchunk), list);
1376
1377         list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) {
1378                 err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
1379                                                            vregion->region,
1380                                                            ctx, credits);
1381                 if (err || *credits < 0)
1382                         return err;
1383         }
1384         return 0;
1385 }
1386
1387 static int
1388 mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1389                                   struct mlxsw_sp_acl_tcam_vregion *vregion,
1390                                   struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1391                                   int *credits)
1392 {
1393         int err, err2;
1394
1395         trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
1396         mutex_lock(&vregion->lock);
1397         err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1398                                                    ctx, credits);
1399         if (err) {
1400                 /* In case migration was not successful, we need to swap
1401                  * so the original region pointer is assigned again
1402                  * to vregion->region.
1403                  */
1404                 swap(vregion->region, vregion->region2);
1405                 ctx->current_vchunk = NULL;
1406                 ctx->this_is_rollback = true;
1407                 err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1408                                                             ctx, credits);
1409                 if (err2) {
1410                         trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
1411                                                                                vregion);
1412                         dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
1413                         /* Let the rollback to be continued later on. */
1414                 }
1415         }
1416         mutex_unlock(&vregion->lock);
1417         trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
1418         return err;
1419 }
1420
1421 static bool
1422 mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1423 {
1424         return ctx->hints_priv;
1425 }
1426
1427 static int
1428 mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
1429                                        struct mlxsw_sp_acl_tcam_vregion *vregion,
1430                                        struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1431 {
1432         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1433         unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
1434         struct mlxsw_sp_acl_tcam_region *new_region;
1435         void *hints_priv;
1436         int err;
1437
1438         trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
1439
1440         hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
1441         if (IS_ERR(hints_priv))
1442                 return PTR_ERR(hints_priv);
1443
1444         new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
1445                                                      vregion, hints_priv);
1446         if (IS_ERR(new_region)) {
1447                 err = PTR_ERR(new_region);
1448                 goto err_region_create;
1449         }
1450
1451         /* vregion->region contains the pointer to the new region
1452          * we are going to migrate to.
1453          */
1454         vregion->region2 = vregion->region;
1455         vregion->region = new_region;
1456         err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
1457                                                     vregion->region2->group,
1458                                                     new_region, priority,
1459                                                     vregion->region2);
1460         if (err)
1461                 goto err_group_region_attach;
1462
1463         ctx->hints_priv = hints_priv;
1464         ctx->this_is_rollback = false;
1465
1466         return 0;
1467
1468 err_group_region_attach:
1469         vregion->region = vregion->region2;
1470         vregion->region2 = NULL;
1471         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region);
1472 err_region_create:
1473         ops->region_rehash_hints_put(hints_priv);
1474         return err;
1475 }
1476
1477 static void
1478 mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
1479                                      struct mlxsw_sp_acl_tcam_vregion *vregion,
1480                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1481 {
1482         struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
1483         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1484
1485         vregion->region2 = NULL;
1486         mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
1487         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
1488         ops->region_rehash_hints_put(ctx->hints_priv);
1489         ctx->hints_priv = NULL;
1490 }
1491
1492 static void
1493 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
1494                                  struct mlxsw_sp_acl_tcam_vregion *vregion,
1495                                  int *credits)
1496 {
1497         struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
1498         int err;
1499
1500         /* Check if the previous rehash work was interrupted
1501          * which means we have to continue it now.
1502          * If not, start a new rehash.
1503          */
1504         if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) {
1505                 err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp,
1506                                                              vregion, ctx);
1507                 if (err) {
1508                         if (err != -EAGAIN)
1509                                 dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
1510                         return;
1511                 }
1512         }
1513
1514         err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
1515                                                 ctx, credits);
1516         if (err) {
1517                 dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
1518         }
1519
1520         if (*credits >= 0)
1521                 mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
1522 }
1523
1524 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
1525         MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
1526         MLXSW_AFK_ELEMENT_DMAC_32_47,
1527         MLXSW_AFK_ELEMENT_DMAC_0_31,
1528         MLXSW_AFK_ELEMENT_SMAC_32_47,
1529         MLXSW_AFK_ELEMENT_SMAC_0_31,
1530         MLXSW_AFK_ELEMENT_ETHERTYPE,
1531         MLXSW_AFK_ELEMENT_IP_PROTO,
1532         MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1533         MLXSW_AFK_ELEMENT_DST_IP_0_31,
1534         MLXSW_AFK_ELEMENT_DST_L4_PORT,
1535         MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1536         MLXSW_AFK_ELEMENT_VID,
1537         MLXSW_AFK_ELEMENT_PCP,
1538         MLXSW_AFK_ELEMENT_TCP_FLAGS,
1539         MLXSW_AFK_ELEMENT_IP_TTL_,
1540         MLXSW_AFK_ELEMENT_IP_ECN,
1541         MLXSW_AFK_ELEMENT_IP_DSCP,
1542 };
1543
1544 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
1545         MLXSW_AFK_ELEMENT_ETHERTYPE,
1546         MLXSW_AFK_ELEMENT_IP_PROTO,
1547         MLXSW_AFK_ELEMENT_SRC_IP_96_127,
1548         MLXSW_AFK_ELEMENT_SRC_IP_64_95,
1549         MLXSW_AFK_ELEMENT_SRC_IP_32_63,
1550         MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1551         MLXSW_AFK_ELEMENT_DST_IP_96_127,
1552         MLXSW_AFK_ELEMENT_DST_IP_64_95,
1553         MLXSW_AFK_ELEMENT_DST_IP_32_63,
1554         MLXSW_AFK_ELEMENT_DST_IP_0_31,
1555         MLXSW_AFK_ELEMENT_DST_L4_PORT,
1556         MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1557 };
1558
1559 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
1560         {
1561                 .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1562                 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1563         },
1564         {
1565                 .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1566                 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1567         },
1568 };
1569
1570 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1571         ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1572
1573 struct mlxsw_sp_acl_tcam_flower_ruleset {
1574         struct mlxsw_sp_acl_tcam_vgroup vgroup;
1575 };
1576
1577 struct mlxsw_sp_acl_tcam_flower_rule {
1578         struct mlxsw_sp_acl_tcam_ventry ventry;
1579 };
1580
1581 static int
1582 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1583                                      struct mlxsw_sp_acl_tcam *tcam,
1584                                      void *ruleset_priv,
1585                                      struct mlxsw_afk_element_usage *tmplt_elusage)
1586 {
1587         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1588
1589         return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1590                                             mlxsw_sp_acl_tcam_patterns,
1591                                             MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1592                                             tmplt_elusage, true);
1593 }
1594
1595 static void
1596 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1597                                      void *ruleset_priv)
1598 {
1599         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1600
1601         mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1602 }
1603
1604 static int
1605 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1606                                       void *ruleset_priv,
1607                                       struct mlxsw_sp_port *mlxsw_sp_port,
1608                                       bool ingress)
1609 {
1610         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1611
1612         return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
1613                                             mlxsw_sp_port, ingress);
1614 }
1615
1616 static void
1617 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1618                                         void *ruleset_priv,
1619                                         struct mlxsw_sp_port *mlxsw_sp_port,
1620                                         bool ingress)
1621 {
1622         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1623
1624         mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
1625                                        mlxsw_sp_port, ingress);
1626 }
1627
1628 static u16
1629 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
1630 {
1631         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1632
1633         return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1634 }
1635
1636 static int
1637 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1638                                   void *ruleset_priv, void *rule_priv,
1639                                   struct mlxsw_sp_acl_rule_info *rulei)
1640 {
1641         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1642         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1643
1644         return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1645                                             &rule->ventry, rulei);
1646 }
1647
1648 static void
1649 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1650 {
1651         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1652
1653         mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1654 }
1655
1656 static int
1657 mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1658                                              void *rule_priv,
1659                                              struct mlxsw_sp_acl_rule_info *rulei)
1660 {
1661         return -EOPNOTSUPP;
1662 }
1663
1664 static int
1665 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1666                                            void *rule_priv, bool *activity)
1667 {
1668         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1669
1670         return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1671                                                      activity);
1672 }
1673
1674 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1675         .ruleset_priv_size      = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1676         .ruleset_add            = mlxsw_sp_acl_tcam_flower_ruleset_add,
1677         .ruleset_del            = mlxsw_sp_acl_tcam_flower_ruleset_del,
1678         .ruleset_bind           = mlxsw_sp_acl_tcam_flower_ruleset_bind,
1679         .ruleset_unbind         = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1680         .ruleset_group_id       = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
1681         .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1682         .rule_add               = mlxsw_sp_acl_tcam_flower_rule_add,
1683         .rule_del               = mlxsw_sp_acl_tcam_flower_rule_del,
1684         .rule_action_replace    = mlxsw_sp_acl_tcam_flower_rule_action_replace,
1685         .rule_activity_get      = mlxsw_sp_acl_tcam_flower_rule_activity_get,
1686 };
1687
1688 struct mlxsw_sp_acl_tcam_mr_ruleset {
1689         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1690         struct mlxsw_sp_acl_tcam_vgroup vgroup;
1691 };
1692
1693 struct mlxsw_sp_acl_tcam_mr_rule {
1694         struct mlxsw_sp_acl_tcam_ventry ventry;
1695 };
1696
1697 static int
1698 mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1699                                  struct mlxsw_sp_acl_tcam *tcam,
1700                                  void *ruleset_priv,
1701                                  struct mlxsw_afk_element_usage *tmplt_elusage)
1702 {
1703         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1704         int err;
1705
1706         err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1707                                            mlxsw_sp_acl_tcam_patterns,
1708                                            MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1709                                            tmplt_elusage, false);
1710         if (err)
1711                 return err;
1712
1713         /* For most of the TCAM clients it would make sense to take a tcam chunk
1714          * only when the first rule is written. This is not the case for
1715          * multicast router as it is required to bind the multicast router to a
1716          * specific ACL Group ID which must exist in HW before multicast router
1717          * is initialized.
1718          */
1719         ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
1720                                                        &ruleset->vgroup, 1,
1721                                                        tmplt_elusage);
1722         if (IS_ERR(ruleset->vchunk)) {
1723                 err = PTR_ERR(ruleset->vchunk);
1724                 goto err_chunk_get;
1725         }
1726
1727         return 0;
1728
1729 err_chunk_get:
1730         mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1731         return err;
1732 }
1733
1734 static void
1735 mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
1736 {
1737         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1738
1739         mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
1740         mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1741 }
1742
1743 static int
1744 mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1745                                   struct mlxsw_sp_port *mlxsw_sp_port,
1746                                   bool ingress)
1747 {
1748         /* Binding is done when initializing multicast router */
1749         return 0;
1750 }
1751
1752 static void
1753 mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1754                                     void *ruleset_priv,
1755                                     struct mlxsw_sp_port *mlxsw_sp_port,
1756                                     bool ingress)
1757 {
1758 }
1759
1760 static u16
1761 mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
1762 {
1763         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1764
1765         return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1766 }
1767
1768 static int
1769 mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1770                               void *rule_priv,
1771                               struct mlxsw_sp_acl_rule_info *rulei)
1772 {
1773         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1774         struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1775
1776         return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1777                                            &rule->ventry, rulei);
1778 }
1779
1780 static void
1781 mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1782 {
1783         struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1784
1785         mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1786 }
1787
1788 static int
1789 mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1790                                          void *rule_priv,
1791                                          struct mlxsw_sp_acl_rule_info *rulei)
1792 {
1793         struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1794
1795         return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
1796                                                        rulei);
1797 }
1798
1799 static int
1800 mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1801                                        void *rule_priv, bool *activity)
1802 {
1803         struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1804
1805         return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1806                                                      activity);
1807 }
1808
1809 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
1810         .ruleset_priv_size      = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
1811         .ruleset_add            = mlxsw_sp_acl_tcam_mr_ruleset_add,
1812         .ruleset_del            = mlxsw_sp_acl_tcam_mr_ruleset_del,
1813         .ruleset_bind           = mlxsw_sp_acl_tcam_mr_ruleset_bind,
1814         .ruleset_unbind         = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
1815         .ruleset_group_id       = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1816         .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
1817         .rule_add               = mlxsw_sp_acl_tcam_mr_rule_add,
1818         .rule_del               = mlxsw_sp_acl_tcam_mr_rule_del,
1819         .rule_action_replace    = mlxsw_sp_acl_tcam_mr_rule_action_replace,
1820         .rule_activity_get      = mlxsw_sp_acl_tcam_mr_rule_activity_get,
1821 };
1822
1823 static const struct mlxsw_sp_acl_profile_ops *
1824 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1825         [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1826         [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
1827 };
1828
1829 const struct mlxsw_sp_acl_profile_ops *
1830 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1831                               enum mlxsw_sp_acl_profile profile)
1832 {
1833         const struct mlxsw_sp_acl_profile_ops *ops;
1834
1835         if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1836                 return NULL;
1837         ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1838         if (WARN_ON(!ops))
1839                 return NULL;
1840         return ops;
1841 }