d50786b0a6ce47924c55a9fbc53200f50bd96335
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_acl_tcam.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/bitops.h>
8 #include <linux/list.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <net/devlink.h>
13 #include <trace/events/mlxsw.h>
14
15 #include "reg.h"
16 #include "core.h"
17 #include "resources.h"
18 #include "spectrum.h"
19 #include "spectrum_acl_tcam.h"
20 #include "core_acl_flex_keys.h"
21
22 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
23 {
24         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
25
26         return ops->priv_size;
27 }
28
29 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
30 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
31 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
32
33 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
34                                    struct mlxsw_sp_acl_rule_info *rulei,
35                                    u32 *priority, bool fillup_priority)
36 {
37         u64 max_priority;
38
39         if (!fillup_priority) {
40                 *priority = 0;
41                 return 0;
42         }
43
44         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
45                 return -EIO;
46
47         /* Priority range is 1..cap_kvd_size-1. */
48         max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
49         if (rulei->priority >= max_priority)
50                 return -EINVAL;
51
52         /* Unlike in TC, in HW, higher number means higher priority. */
53         *priority = max_priority - rulei->priority;
54         return 0;
55 }
56
57 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
58                                            u16 *p_id)
59 {
60         u16 id;
61
62         id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
63         if (id < tcam->max_regions) {
64                 __set_bit(id, tcam->used_regions);
65                 *p_id = id;
66                 return 0;
67         }
68         return -ENOBUFS;
69 }
70
71 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
72                                             u16 id)
73 {
74         __clear_bit(id, tcam->used_regions);
75 }
76
77 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
78                                           u16 *p_id)
79 {
80         u16 id;
81
82         id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
83         if (id < tcam->max_groups) {
84                 __set_bit(id, tcam->used_groups);
85                 *p_id = id;
86                 return 0;
87         }
88         return -ENOBUFS;
89 }
90
91 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
92                                            u16 id)
93 {
94         __clear_bit(id, tcam->used_groups);
95 }
96
97 struct mlxsw_sp_acl_tcam_pattern {
98         const enum mlxsw_afk_element *elements;
99         unsigned int elements_count;
100 };
101
102 struct mlxsw_sp_acl_tcam_group {
103         struct mlxsw_sp_acl_tcam *tcam;
104         u16 id;
105         struct mutex lock; /* guards region list updates */
106         struct list_head region_list;
107         unsigned int region_count;
108 };
109
110 struct mlxsw_sp_acl_tcam_vgroup {
111         struct mlxsw_sp_acl_tcam_group group;
112         struct list_head vregion_list;
113         struct rhashtable vchunk_ht;
114         const struct mlxsw_sp_acl_tcam_pattern *patterns;
115         unsigned int patterns_count;
116         bool tmplt_elusage_set;
117         struct mlxsw_afk_element_usage tmplt_elusage;
118         bool vregion_rehash_enabled;
119         unsigned int *p_min_prio;
120         unsigned int *p_max_prio;
121 };
122
123 struct mlxsw_sp_acl_tcam_rehash_ctx {
124         void *hints_priv;
125         bool this_is_rollback;
126         struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being
127                                                           * currently migrated.
128                                                           */
129         struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start
130                                                         * migration from in
131                                                         * a vchunk being
132                                                         * currently migrated.
133                                                         */
134         struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop
135                                                        * migration at
136                                                        * a vchunk being
137                                                        * currently migrated.
138                                                        */
139 };
140
141 struct mlxsw_sp_acl_tcam_vregion {
142         struct mutex lock; /* Protects consistency of region, region2 pointers
143                             * and vchunk_list.
144                             */
145         struct mlxsw_sp_acl_tcam_region *region;
146         struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
147         struct list_head list; /* Member of a TCAM group */
148         struct list_head tlist; /* Member of a TCAM */
149         struct list_head vchunk_list; /* List of vchunks under this vregion */
150         struct mlxsw_afk_key_info *key_info;
151         struct mlxsw_sp_acl_tcam *tcam;
152         struct mlxsw_sp_acl_tcam_vgroup *vgroup;
153         struct {
154                 struct delayed_work dw;
155                 struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
156         } rehash;
157         struct mlxsw_sp *mlxsw_sp;
158         unsigned int ref_count;
159 };
160
161 struct mlxsw_sp_acl_tcam_vchunk;
162
163 struct mlxsw_sp_acl_tcam_chunk {
164         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
165         struct mlxsw_sp_acl_tcam_region *region;
166         unsigned long priv[];
167         /* priv has to be always the last item */
168 };
169
170 struct mlxsw_sp_acl_tcam_vchunk {
171         struct mlxsw_sp_acl_tcam_chunk *chunk;
172         struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
173         struct list_head list; /* Member of a TCAM vregion */
174         struct rhash_head ht_node; /* Member of a chunk HT */
175         struct list_head ventry_list;
176         unsigned int priority; /* Priority within the vregion and group */
177         struct mlxsw_sp_acl_tcam_vgroup *vgroup;
178         struct mlxsw_sp_acl_tcam_vregion *vregion;
179         unsigned int ref_count;
180 };
181
182 struct mlxsw_sp_acl_tcam_entry {
183         struct mlxsw_sp_acl_tcam_ventry *ventry;
184         struct mlxsw_sp_acl_tcam_chunk *chunk;
185         unsigned long priv[];
186         /* priv has to be always the last item */
187 };
188
189 struct mlxsw_sp_acl_tcam_ventry {
190         struct mlxsw_sp_acl_tcam_entry *entry;
191         struct list_head list; /* Member of a TCAM vchunk */
192         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
193         struct mlxsw_sp_acl_rule_info *rulei;
194 };
195
196 static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
197         .key_len = sizeof(unsigned int),
198         .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
199         .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
200         .automatic_shrinking = true,
201 };
202
203 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
204                                           struct mlxsw_sp_acl_tcam_group *group)
205 {
206         struct mlxsw_sp_acl_tcam_region *region;
207         char pagt_pl[MLXSW_REG_PAGT_LEN];
208         int acl_index = 0;
209
210         mlxsw_reg_pagt_pack(pagt_pl, group->id);
211         list_for_each_entry(region, &group->region_list, list) {
212                 bool multi = false;
213
214                 /* Check if the next entry in the list has the same vregion. */
215                 if (region->list.next != &group->region_list &&
216                     list_next_entry(region, list)->vregion == region->vregion)
217                         multi = true;
218                 mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
219                                            region->id, multi);
220         }
221         mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
222         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
223 }
224
225 static int
226 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
227                             struct mlxsw_sp_acl_tcam_group *group)
228 {
229         int err;
230
231         group->tcam = tcam;
232         INIT_LIST_HEAD(&group->region_list);
233
234         err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
235         if (err)
236                 return err;
237
238         mutex_init(&group->lock);
239
240         return 0;
241 }
242
243 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
244 {
245         struct mlxsw_sp_acl_tcam *tcam = group->tcam;
246
247         mutex_destroy(&group->lock);
248         mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
249         WARN_ON(!list_empty(&group->region_list));
250 }
251
252 static int
253 mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
254                              struct mlxsw_sp_acl_tcam *tcam,
255                              struct mlxsw_sp_acl_tcam_vgroup *vgroup,
256                              const struct mlxsw_sp_acl_tcam_pattern *patterns,
257                              unsigned int patterns_count,
258                              struct mlxsw_afk_element_usage *tmplt_elusage,
259                              bool vregion_rehash_enabled,
260                              unsigned int *p_min_prio,
261                              unsigned int *p_max_prio)
262 {
263         int err;
264
265         vgroup->patterns = patterns;
266         vgroup->patterns_count = patterns_count;
267         vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
268         vgroup->p_min_prio = p_min_prio;
269         vgroup->p_max_prio = p_max_prio;
270
271         if (tmplt_elusage) {
272                 vgroup->tmplt_elusage_set = true;
273                 memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
274                        sizeof(vgroup->tmplt_elusage));
275         }
276         INIT_LIST_HEAD(&vgroup->vregion_list);
277
278         err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
279         if (err)
280                 return err;
281
282         err = rhashtable_init(&vgroup->vchunk_ht,
283                               &mlxsw_sp_acl_tcam_vchunk_ht_params);
284         if (err)
285                 goto err_rhashtable_init;
286
287         return 0;
288
289 err_rhashtable_init:
290         mlxsw_sp_acl_tcam_group_del(&vgroup->group);
291         return err;
292 }
293
294 static void
295 mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
296 {
297         rhashtable_destroy(&vgroup->vchunk_ht);
298         mlxsw_sp_acl_tcam_group_del(&vgroup->group);
299         WARN_ON(!list_empty(&vgroup->vregion_list));
300 }
301
302 static int
303 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
304                              struct mlxsw_sp_acl_tcam_group *group,
305                              struct mlxsw_sp_port *mlxsw_sp_port,
306                              bool ingress)
307 {
308         char ppbt_pl[MLXSW_REG_PPBT_LEN];
309
310         mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
311                                                MLXSW_REG_PXBT_E_EACL,
312                             MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
313                             group->id);
314         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
315 }
316
317 static void
318 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
319                                struct mlxsw_sp_acl_tcam_group *group,
320                                struct mlxsw_sp_port *mlxsw_sp_port,
321                                bool ingress)
322 {
323         char ppbt_pl[MLXSW_REG_PPBT_LEN];
324
325         mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
326                                                MLXSW_REG_PXBT_E_EACL,
327                             MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
328                             group->id);
329         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
330 }
331
332 static u16
333 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
334 {
335         return group->id;
336 }
337
338 static unsigned int
339 mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
340 {
341         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
342
343         if (list_empty(&vregion->vchunk_list))
344                 return 0;
345         /* As a priority of a vregion, return priority of the first vchunk */
346         vchunk = list_first_entry(&vregion->vchunk_list,
347                                   typeof(*vchunk), list);
348         return vchunk->priority;
349 }
350
351 static unsigned int
352 mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
353 {
354         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
355
356         if (list_empty(&vregion->vchunk_list))
357                 return 0;
358         vchunk = list_last_entry(&vregion->vchunk_list,
359                                  typeof(*vchunk), list);
360         return vchunk->priority;
361 }
362
363 static void
364 mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
365 {
366         struct mlxsw_sp_acl_tcam_vregion *vregion;
367
368         if (list_empty(&vgroup->vregion_list))
369                 return;
370         vregion = list_first_entry(&vgroup->vregion_list,
371                                    typeof(*vregion), list);
372         *vgroup->p_min_prio = mlxsw_sp_acl_tcam_vregion_prio(vregion);
373         vregion = list_last_entry(&vgroup->vregion_list,
374                                   typeof(*vregion), list);
375         *vgroup->p_max_prio = mlxsw_sp_acl_tcam_vregion_max_prio(vregion);
376 }
377
378 static int
379 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
380                                       struct mlxsw_sp_acl_tcam_group *group,
381                                       struct mlxsw_sp_acl_tcam_region *region,
382                                       unsigned int priority,
383                                       struct mlxsw_sp_acl_tcam_region *next_region)
384 {
385         struct mlxsw_sp_acl_tcam_region *region2;
386         struct list_head *pos;
387         int err;
388
389         mutex_lock(&group->lock);
390         if (group->region_count == group->tcam->max_group_size) {
391                 err = -ENOBUFS;
392                 goto err_region_count_check;
393         }
394
395         if (next_region) {
396                 /* If the next region is defined, place the new one
397                  * before it. The next one is a sibling.
398                  */
399                 pos = &next_region->list;
400         } else {
401                 /* Position the region inside the list according to priority */
402                 list_for_each(pos, &group->region_list) {
403                         region2 = list_entry(pos, typeof(*region2), list);
404                         if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
405                             priority)
406                                 break;
407                 }
408         }
409         list_add_tail(&region->list, pos);
410         region->group = group;
411
412         err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
413         if (err)
414                 goto err_group_update;
415
416         group->region_count++;
417         mutex_unlock(&group->lock);
418         return 0;
419
420 err_group_update:
421         list_del(&region->list);
422 err_region_count_check:
423         mutex_unlock(&group->lock);
424         return err;
425 }
426
427 static void
428 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
429                                       struct mlxsw_sp_acl_tcam_region *region)
430 {
431         struct mlxsw_sp_acl_tcam_group *group = region->group;
432
433         mutex_lock(&group->lock);
434         list_del(&region->list);
435         group->region_count--;
436         mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
437         mutex_unlock(&group->lock);
438 }
439
440 static int
441 mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
442                                         struct mlxsw_sp_acl_tcam_vgroup *vgroup,
443                                         struct mlxsw_sp_acl_tcam_vregion *vregion,
444                                         unsigned int priority)
445 {
446         struct mlxsw_sp_acl_tcam_vregion *vregion2;
447         struct list_head *pos;
448         int err;
449
450         /* Position the vregion inside the list according to priority */
451         list_for_each(pos, &vgroup->vregion_list) {
452                 vregion2 = list_entry(pos, typeof(*vregion2), list);
453                 if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
454                         break;
455         }
456         list_add_tail(&vregion->list, pos);
457
458         err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
459                                                     vregion->region,
460                                                     priority, NULL);
461         if (err)
462                 goto err_region_attach;
463
464         return 0;
465
466 err_region_attach:
467         list_del(&vregion->list);
468         return err;
469 }
470
471 static void
472 mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
473                                         struct mlxsw_sp_acl_tcam_vregion *vregion)
474 {
475         list_del(&vregion->list);
476         if (vregion->region2)
477                 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
478                                                       vregion->region2);
479         mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
480 }
481
482 static struct mlxsw_sp_acl_tcam_vregion *
483 mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
484                                       unsigned int priority,
485                                       struct mlxsw_afk_element_usage *elusage,
486                                       bool *p_need_split)
487 {
488         struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
489         struct list_head *pos;
490         bool issubset;
491
492         list_for_each(pos, &vgroup->vregion_list) {
493                 vregion = list_entry(pos, typeof(*vregion), list);
494
495                 /* First, check if the requested priority does not rather belong
496                  * under some of the next vregions.
497                  */
498                 if (pos->next != &vgroup->vregion_list) { /* not last */
499                         vregion2 = list_entry(pos->next, typeof(*vregion2),
500                                               list);
501                         if (priority >=
502                             mlxsw_sp_acl_tcam_vregion_prio(vregion2))
503                                 continue;
504                 }
505
506                 issubset = mlxsw_afk_key_info_subset(vregion->key_info,
507                                                      elusage);
508
509                 /* If requested element usage would not fit and the priority
510                  * is lower than the currently inspected vregion we cannot
511                  * use this region, so return NULL to indicate new vregion has
512                  * to be created.
513                  */
514                 if (!issubset &&
515                     priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
516                         return NULL;
517
518                 /* If requested element usage would not fit and the priority
519                  * is higher than the currently inspected vregion we cannot
520                  * use this vregion. There is still some hope that the next
521                  * vregion would be the fit. So let it be processed and
522                  * eventually break at the check right above this.
523                  */
524                 if (!issubset &&
525                     priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
526                         continue;
527
528                 /* Indicate if the vregion needs to be split in order to add
529                  * the requested priority. Split is needed when requested
530                  * element usage won't fit into the found vregion.
531                  */
532                 *p_need_split = !issubset;
533                 return vregion;
534         }
535         return NULL; /* New vregion has to be created. */
536 }
537
538 static void
539 mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
540                                       struct mlxsw_afk_element_usage *elusage,
541                                       struct mlxsw_afk_element_usage *out)
542 {
543         const struct mlxsw_sp_acl_tcam_pattern *pattern;
544         int i;
545
546         /* In case the template is set, we don't have to look up the pattern
547          * and just use the template.
548          */
549         if (vgroup->tmplt_elusage_set) {
550                 memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
551                 WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
552                 return;
553         }
554
555         for (i = 0; i < vgroup->patterns_count; i++) {
556                 pattern = &vgroup->patterns[i];
557                 mlxsw_afk_element_usage_fill(out, pattern->elements,
558                                              pattern->elements_count);
559                 if (mlxsw_afk_element_usage_subset(elusage, out))
560                         return;
561         }
562         memcpy(out, elusage, sizeof(*out));
563 }
564
565 static int
566 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
567                                struct mlxsw_sp_acl_tcam_region *region)
568 {
569         struct mlxsw_afk_key_info *key_info = region->key_info;
570         char ptar_pl[MLXSW_REG_PTAR_LEN];
571         unsigned int encodings_count;
572         int i;
573         int err;
574
575         mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
576                             region->key_type,
577                             MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
578                             region->id, region->tcam_region_info);
579         encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
580         for (i = 0; i < encodings_count; i++) {
581                 u16 encoding;
582
583                 encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
584                 mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
585         }
586         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
587         if (err)
588                 return err;
589         mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
590         return 0;
591 }
592
593 static void
594 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
595                               struct mlxsw_sp_acl_tcam_region *region)
596 {
597         char ptar_pl[MLXSW_REG_PTAR_LEN];
598
599         mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
600                             region->key_type, 0, region->id,
601                             region->tcam_region_info);
602         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
603 }
604
605 static int
606 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
607                                 struct mlxsw_sp_acl_tcam_region *region)
608 {
609         char pacl_pl[MLXSW_REG_PACL_LEN];
610
611         mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
612                             region->tcam_region_info);
613         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
614 }
615
616 static void
617 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
618                                  struct mlxsw_sp_acl_tcam_region *region)
619 {
620         char pacl_pl[MLXSW_REG_PACL_LEN];
621
622         mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
623                             region->tcam_region_info);
624         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
625 }
626
627 static struct mlxsw_sp_acl_tcam_region *
628 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
629                                 struct mlxsw_sp_acl_tcam *tcam,
630                                 struct mlxsw_sp_acl_tcam_vregion *vregion,
631                                 void *hints_priv)
632 {
633         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
634         struct mlxsw_sp_acl_tcam_region *region;
635         int err;
636
637         region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
638         if (!region)
639                 return ERR_PTR(-ENOMEM);
640         region->mlxsw_sp = mlxsw_sp;
641         region->vregion = vregion;
642         region->key_info = vregion->key_info;
643
644         err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
645         if (err)
646                 goto err_region_id_get;
647
648         err = ops->region_associate(mlxsw_sp, region);
649         if (err)
650                 goto err_tcam_region_associate;
651
652         region->key_type = ops->key_type;
653         err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
654         if (err)
655                 goto err_tcam_region_alloc;
656
657         err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
658         if (err)
659                 goto err_tcam_region_enable;
660
661         err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
662                                region, hints_priv);
663         if (err)
664                 goto err_tcam_region_init;
665
666         return region;
667
668 err_tcam_region_init:
669         mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
670 err_tcam_region_enable:
671         mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
672 err_tcam_region_alloc:
673 err_tcam_region_associate:
674         mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
675 err_region_id_get:
676         kfree(region);
677         return ERR_PTR(err);
678 }
679
680 static void
681 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
682                                  struct mlxsw_sp_acl_tcam_region *region)
683 {
684         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
685
686         ops->region_fini(mlxsw_sp, region->priv);
687         mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
688         mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
689         mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
690                                         region->id);
691         kfree(region);
692 }
693
694 static void
695 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
696 {
697         unsigned long interval = vregion->tcam->vregion_rehash_intrvl;
698
699         if (!interval)
700                 return;
701         mlxsw_core_schedule_dw(&vregion->rehash.dw,
702                                msecs_to_jiffies(interval));
703 }
704
705 static void
706 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
707                                  struct mlxsw_sp_acl_tcam_vregion *vregion,
708                                  int *credits);
709
710 static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
711 {
712         struct mlxsw_sp_acl_tcam_vregion *vregion =
713                 container_of(work, struct mlxsw_sp_acl_tcam_vregion,
714                              rehash.dw.work);
715         int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
716
717         mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
718         if (credits < 0)
719                 /* Rehash gone out of credits so it was interrupted.
720                  * Schedule the work as soon as possible to continue.
721                  */
722                 mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
723         else
724                 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
725 }
726
727 static void
728 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
729 {
730         struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
731
732         /* If a rule was added or deleted from vchunk which is currently
733          * under rehash migration, we have to reset the ventry pointers
734          * to make sure all rules are properly migrated.
735          */
736         if (vregion->rehash.ctx.current_vchunk == vchunk) {
737                 vregion->rehash.ctx.start_ventry = NULL;
738                 vregion->rehash.ctx.stop_ventry = NULL;
739         }
740 }
741
742 static void
743 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion)
744 {
745         /* If a chunk was added or deleted from vregion we have to reset
746          * the current chunk pointer to make sure all chunks
747          * are properly migrated.
748          */
749         vregion->rehash.ctx.current_vchunk = NULL;
750 }
751
752 static struct mlxsw_sp_acl_tcam_vregion *
753 mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
754                                  struct mlxsw_sp_acl_tcam_vgroup *vgroup,
755                                  unsigned int priority,
756                                  struct mlxsw_afk_element_usage *elusage)
757 {
758         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
759         struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
760         struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
761         struct mlxsw_sp_acl_tcam_vregion *vregion;
762         int err;
763
764         vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
765         if (!vregion)
766                 return ERR_PTR(-ENOMEM);
767         INIT_LIST_HEAD(&vregion->vchunk_list);
768         mutex_init(&vregion->lock);
769         vregion->tcam = tcam;
770         vregion->mlxsw_sp = mlxsw_sp;
771         vregion->vgroup = vgroup;
772         vregion->ref_count = 1;
773
774         vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
775         if (IS_ERR(vregion->key_info)) {
776                 err = PTR_ERR(vregion->key_info);
777                 goto err_key_info_get;
778         }
779
780         vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
781                                                           vregion, NULL);
782         if (IS_ERR(vregion->region)) {
783                 err = PTR_ERR(vregion->region);
784                 goto err_region_create;
785         }
786
787         err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
788                                                       priority);
789         if (err)
790                 goto err_vgroup_vregion_attach;
791
792         if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
793                 /* Create the delayed work for vregion periodic rehash */
794                 INIT_DELAYED_WORK(&vregion->rehash.dw,
795                                   mlxsw_sp_acl_tcam_vregion_rehash_work);
796                 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
797                 mutex_lock(&tcam->lock);
798                 list_add_tail(&vregion->tlist, &tcam->vregion_list);
799                 mutex_unlock(&tcam->lock);
800         }
801
802         return vregion;
803
804 err_vgroup_vregion_attach:
805         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
806 err_region_create:
807         mlxsw_afk_key_info_put(vregion->key_info);
808 err_key_info_get:
809         kfree(vregion);
810         return ERR_PTR(err);
811 }
812
813 static void
814 mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
815                                   struct mlxsw_sp_acl_tcam_vregion *vregion)
816 {
817         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
818         struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
819         struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
820
821         if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
822                 mutex_lock(&tcam->lock);
823                 list_del(&vregion->tlist);
824                 mutex_unlock(&tcam->lock);
825                 cancel_delayed_work_sync(&vregion->rehash.dw);
826         }
827         mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
828         if (vregion->region2)
829                 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
830         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
831         mlxsw_afk_key_info_put(vregion->key_info);
832         mutex_destroy(&vregion->lock);
833         kfree(vregion);
834 }
835
836 static struct mlxsw_sp_acl_tcam_vregion *
837 mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
838                               struct mlxsw_sp_acl_tcam_vgroup *vgroup,
839                               unsigned int priority,
840                               struct mlxsw_afk_element_usage *elusage)
841 {
842         struct mlxsw_afk_element_usage vregion_elusage;
843         struct mlxsw_sp_acl_tcam_vregion *vregion;
844         bool need_split;
845
846         vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
847                                                         elusage, &need_split);
848         if (vregion) {
849                 if (need_split) {
850                         /* According to priority, new vchunk should belong to
851                          * an existing vregion. However, this vchunk needs
852                          * elements that vregion does not contain. We need
853                          * to split the existing vregion into two and create
854                          * a new vregion for the new vchunk in between.
855                          * This is not supported now.
856                          */
857                         return ERR_PTR(-EOPNOTSUPP);
858                 }
859                 vregion->ref_count++;
860                 return vregion;
861         }
862
863         mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
864                                               &vregion_elusage);
865
866         return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
867                                                 &vregion_elusage);
868 }
869
870 static void
871 mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
872                               struct mlxsw_sp_acl_tcam_vregion *vregion)
873 {
874         if (--vregion->ref_count)
875                 return;
876         mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
877 }
878
879 static struct mlxsw_sp_acl_tcam_chunk *
880 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
881                                struct mlxsw_sp_acl_tcam_vchunk *vchunk,
882                                struct mlxsw_sp_acl_tcam_region *region)
883 {
884         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
885         struct mlxsw_sp_acl_tcam_chunk *chunk;
886
887         chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
888         if (!chunk)
889                 return ERR_PTR(-ENOMEM);
890         chunk->vchunk = vchunk;
891         chunk->region = region;
892
893         ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
894         return chunk;
895 }
896
897 static void
898 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
899                                 struct mlxsw_sp_acl_tcam_chunk *chunk)
900 {
901         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
902
903         ops->chunk_fini(chunk->priv);
904         kfree(chunk);
905 }
906
907 static struct mlxsw_sp_acl_tcam_vchunk *
908 mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
909                                 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
910                                 unsigned int priority,
911                                 struct mlxsw_afk_element_usage *elusage)
912 {
913         struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
914         struct mlxsw_sp_acl_tcam_vregion *vregion;
915         struct list_head *pos;
916         int err;
917
918         if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
919                 return ERR_PTR(-EINVAL);
920
921         vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
922         if (!vchunk)
923                 return ERR_PTR(-ENOMEM);
924         INIT_LIST_HEAD(&vchunk->ventry_list);
925         vchunk->priority = priority;
926         vchunk->vgroup = vgroup;
927         vchunk->ref_count = 1;
928
929         vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
930                                                 priority, elusage);
931         if (IS_ERR(vregion)) {
932                 err = PTR_ERR(vregion);
933                 goto err_vregion_get;
934         }
935
936         vchunk->vregion = vregion;
937
938         err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
939                                      mlxsw_sp_acl_tcam_vchunk_ht_params);
940         if (err)
941                 goto err_rhashtable_insert;
942
943         mutex_lock(&vregion->lock);
944         vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
945                                                        vchunk->vregion->region);
946         if (IS_ERR(vchunk->chunk)) {
947                 mutex_unlock(&vregion->lock);
948                 err = PTR_ERR(vchunk->chunk);
949                 goto err_chunk_create;
950         }
951
952         mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
953
954         /* Position the vchunk inside the list according to priority */
955         list_for_each(pos, &vregion->vchunk_list) {
956                 vchunk2 = list_entry(pos, typeof(*vchunk2), list);
957                 if (vchunk2->priority > priority)
958                         break;
959         }
960         list_add_tail(&vchunk->list, pos);
961         mutex_unlock(&vregion->lock);
962         mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
963
964         return vchunk;
965
966 err_chunk_create:
967         rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
968                                mlxsw_sp_acl_tcam_vchunk_ht_params);
969 err_rhashtable_insert:
970         mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
971 err_vregion_get:
972         kfree(vchunk);
973         return ERR_PTR(err);
974 }
975
976 static void
977 mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
978                                  struct mlxsw_sp_acl_tcam_vchunk *vchunk)
979 {
980         struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
981         struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
982
983         mutex_lock(&vregion->lock);
984         mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
985         list_del(&vchunk->list);
986         if (vchunk->chunk2)
987                 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
988         mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
989         mutex_unlock(&vregion->lock);
990         rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
991                                mlxsw_sp_acl_tcam_vchunk_ht_params);
992         mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
993         kfree(vchunk);
994         mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
995 }
996
997 static struct mlxsw_sp_acl_tcam_vchunk *
998 mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
999                              struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1000                              unsigned int priority,
1001                              struct mlxsw_afk_element_usage *elusage)
1002 {
1003         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1004
1005         vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
1006                                         mlxsw_sp_acl_tcam_vchunk_ht_params);
1007         if (vchunk) {
1008                 if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
1009                                                        elusage)))
1010                         return ERR_PTR(-EINVAL);
1011                 vchunk->ref_count++;
1012                 return vchunk;
1013         }
1014         return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
1015                                                priority, elusage);
1016 }
1017
1018 static void
1019 mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
1020                              struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1021 {
1022         if (--vchunk->ref_count)
1023                 return;
1024         mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
1025 }
1026
1027 static struct mlxsw_sp_acl_tcam_entry *
1028 mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
1029                                struct mlxsw_sp_acl_tcam_ventry *ventry,
1030                                struct mlxsw_sp_acl_tcam_chunk *chunk)
1031 {
1032         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1033         struct mlxsw_sp_acl_tcam_entry *entry;
1034         int err;
1035
1036         entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
1037         if (!entry)
1038                 return ERR_PTR(-ENOMEM);
1039         entry->ventry = ventry;
1040         entry->chunk = chunk;
1041
1042         err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
1043                              entry->priv, ventry->rulei);
1044         if (err)
1045                 goto err_entry_add;
1046
1047         return entry;
1048
1049 err_entry_add:
1050         kfree(entry);
1051         return ERR_PTR(err);
1052 }
1053
1054 static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1055                                             struct mlxsw_sp_acl_tcam_entry *entry)
1056 {
1057         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1058
1059         ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
1060                        entry->chunk->priv, entry->priv);
1061         kfree(entry);
1062 }
1063
1064 static int
1065 mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
1066                                        struct mlxsw_sp_acl_tcam_region *region,
1067                                        struct mlxsw_sp_acl_tcam_entry *entry,
1068                                        struct mlxsw_sp_acl_rule_info *rulei)
1069 {
1070         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1071
1072         return ops->entry_action_replace(mlxsw_sp, region->priv,
1073                                          entry->priv, rulei);
1074 }
1075
1076 static int
1077 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
1078                                      struct mlxsw_sp_acl_tcam_entry *entry,
1079                                      bool *activity)
1080 {
1081         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1082
1083         return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
1084                                        entry->priv, activity);
1085 }
1086
1087 static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
1088                                         struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1089                                         struct mlxsw_sp_acl_tcam_ventry *ventry,
1090                                         struct mlxsw_sp_acl_rule_info *rulei)
1091 {
1092         struct mlxsw_sp_acl_tcam_vregion *vregion;
1093         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1094         int err;
1095
1096         vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
1097                                               &rulei->values.elusage);
1098         if (IS_ERR(vchunk))
1099                 return PTR_ERR(vchunk);
1100
1101         ventry->vchunk = vchunk;
1102         ventry->rulei = rulei;
1103         vregion = vchunk->vregion;
1104
1105         mutex_lock(&vregion->lock);
1106         ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
1107                                                        vchunk->chunk);
1108         if (IS_ERR(ventry->entry)) {
1109                 mutex_unlock(&vregion->lock);
1110                 err = PTR_ERR(ventry->entry);
1111                 goto err_entry_create;
1112         }
1113
1114         list_add_tail(&ventry->list, &vchunk->ventry_list);
1115         mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1116         mutex_unlock(&vregion->lock);
1117
1118         return 0;
1119
1120 err_entry_create:
1121         mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1122         return err;
1123 }
1124
1125 static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
1126                                          struct mlxsw_sp_acl_tcam_ventry *ventry)
1127 {
1128         struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1129         struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1130
1131         mutex_lock(&vregion->lock);
1132         mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1133         list_del(&ventry->list);
1134         mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1135         mutex_unlock(&vregion->lock);
1136         mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1137 }
1138
1139 static int
1140 mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
1141                                         struct mlxsw_sp_acl_tcam_ventry *ventry,
1142                                         struct mlxsw_sp_acl_rule_info *rulei)
1143 {
1144         struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1145
1146         return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
1147                                                       vchunk->vregion->region,
1148                                                       ventry->entry, rulei);
1149 }
1150
1151 static int
1152 mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
1153                                       struct mlxsw_sp_acl_tcam_ventry *ventry,
1154                                       bool *activity)
1155 {
1156         return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
1157                                                     ventry->entry, activity);
1158 }
1159
1160 static int
1161 mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
1162                                  struct mlxsw_sp_acl_tcam_ventry *ventry,
1163                                  struct mlxsw_sp_acl_tcam_chunk *chunk,
1164                                  int *credits)
1165 {
1166         struct mlxsw_sp_acl_tcam_entry *new_entry;
1167
1168         /* First check if the entry is not already where we want it to be. */
1169         if (ventry->entry->chunk == chunk)
1170                 return 0;
1171
1172         if (--(*credits) < 0)
1173                 return 0;
1174
1175         new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk);
1176         if (IS_ERR(new_entry))
1177                 return PTR_ERR(new_entry);
1178         mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1179         ventry->entry = new_entry;
1180         return 0;
1181 }
1182
1183 static int
1184 mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
1185                                        struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1186                                        struct mlxsw_sp_acl_tcam_region *region,
1187                                        struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1188 {
1189         struct mlxsw_sp_acl_tcam_chunk *new_chunk;
1190
1191         new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
1192         if (IS_ERR(new_chunk))
1193                 return PTR_ERR(new_chunk);
1194         vchunk->chunk2 = vchunk->chunk;
1195         vchunk->chunk = new_chunk;
1196         ctx->current_vchunk = vchunk;
1197         ctx->start_ventry = NULL;
1198         ctx->stop_ventry = NULL;
1199         return 0;
1200 }
1201
1202 static void
1203 mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
1204                                      struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1205                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1206 {
1207         mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1208         vchunk->chunk2 = NULL;
1209         ctx->current_vchunk = NULL;
1210 }
1211
1212 static int
1213 mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1214                                      struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1215                                      struct mlxsw_sp_acl_tcam_region *region,
1216                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1217                                      int *credits)
1218 {
1219         struct mlxsw_sp_acl_tcam_ventry *ventry;
1220         int err;
1221
1222         if (vchunk->chunk->region != region) {
1223                 err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk,
1224                                                              region, ctx);
1225                 if (err)
1226                         return err;
1227         } else if (!vchunk->chunk2) {
1228                 /* The chunk is already as it should be, nothing to do. */
1229                 return 0;
1230         }
1231
1232         /* If the migration got interrupted, we have the ventry to start from
1233          * stored in context.
1234          */
1235         if (ctx->start_ventry)
1236                 ventry = ctx->start_ventry;
1237         else
1238                 ventry = list_first_entry(&vchunk->ventry_list,
1239                                           typeof(*ventry), list);
1240
1241         list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
1242                 /* During rollback, once we reach the ventry that failed
1243                  * to migrate, we are done.
1244                  */
1245                 if (ventry == ctx->stop_ventry)
1246                         break;
1247
1248                 err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
1249                                                        vchunk->chunk, credits);
1250                 if (err) {
1251                         if (ctx->this_is_rollback) {
1252                                 /* Save the ventry which we ended with and try
1253                                  * to continue later on.
1254                                  */
1255                                 ctx->start_ventry = ventry;
1256                                 return err;
1257                         }
1258                         /* Swap the chunk and chunk2 pointers so the follow-up
1259                          * rollback call will see the original chunk pointer
1260                          * in vchunk->chunk.
1261                          */
1262                         swap(vchunk->chunk, vchunk->chunk2);
1263                         /* The rollback has to be done from beginning of the
1264                          * chunk, that is why we have to null the start_ventry.
1265                          * However, we know where to stop the rollback,
1266                          * at the current ventry.
1267                          */
1268                         ctx->start_ventry = NULL;
1269                         ctx->stop_ventry = ventry;
1270                         return err;
1271                 } else if (*credits < 0) {
1272                         /* We are out of credits, the rest of the ventries
1273                          * will be migrated later. Save the ventry
1274                          * which we ended with.
1275                          */
1276                         ctx->start_ventry = ventry;
1277                         return 0;
1278                 }
1279         }
1280
1281         mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
1282         return 0;
1283 }
1284
1285 static int
1286 mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1287                                      struct mlxsw_sp_acl_tcam_vregion *vregion,
1288                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1289                                      int *credits)
1290 {
1291         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1292         int err;
1293
1294         /* If the migration got interrupted, we have the vchunk
1295          * we are working on stored in context.
1296          */
1297         if (ctx->current_vchunk)
1298                 vchunk = ctx->current_vchunk;
1299         else
1300                 vchunk = list_first_entry(&vregion->vchunk_list,
1301                                           typeof(*vchunk), list);
1302
1303         list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) {
1304                 err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
1305                                                            vregion->region,
1306                                                            ctx, credits);
1307                 if (err || *credits < 0)
1308                         return err;
1309         }
1310         return 0;
1311 }
1312
1313 static int
1314 mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1315                                   struct mlxsw_sp_acl_tcam_vregion *vregion,
1316                                   struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1317                                   int *credits)
1318 {
1319         int err, err2;
1320
1321         trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
1322         mutex_lock(&vregion->lock);
1323         err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1324                                                    ctx, credits);
1325         if (err) {
1326                 /* In case migration was not successful, we need to swap
1327                  * so the original region pointer is assigned again
1328                  * to vregion->region.
1329                  */
1330                 swap(vregion->region, vregion->region2);
1331                 ctx->current_vchunk = NULL;
1332                 ctx->this_is_rollback = true;
1333                 err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1334                                                             ctx, credits);
1335                 if (err2) {
1336                         trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
1337                                                                                vregion);
1338                         dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
1339                         /* Let the rollback to be continued later on. */
1340                 }
1341         }
1342         mutex_unlock(&vregion->lock);
1343         trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
1344         return err;
1345 }
1346
1347 static bool
1348 mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1349 {
1350         return ctx->hints_priv;
1351 }
1352
1353 static int
1354 mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
1355                                        struct mlxsw_sp_acl_tcam_vregion *vregion,
1356                                        struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1357 {
1358         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1359         unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
1360         struct mlxsw_sp_acl_tcam_region *new_region;
1361         void *hints_priv;
1362         int err;
1363
1364         trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
1365
1366         hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
1367         if (IS_ERR(hints_priv))
1368                 return PTR_ERR(hints_priv);
1369
1370         new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
1371                                                      vregion, hints_priv);
1372         if (IS_ERR(new_region)) {
1373                 err = PTR_ERR(new_region);
1374                 goto err_region_create;
1375         }
1376
1377         /* vregion->region contains the pointer to the new region
1378          * we are going to migrate to.
1379          */
1380         vregion->region2 = vregion->region;
1381         vregion->region = new_region;
1382         err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
1383                                                     vregion->region2->group,
1384                                                     new_region, priority,
1385                                                     vregion->region2);
1386         if (err)
1387                 goto err_group_region_attach;
1388
1389         ctx->hints_priv = hints_priv;
1390         ctx->this_is_rollback = false;
1391
1392         return 0;
1393
1394 err_group_region_attach:
1395         vregion->region = vregion->region2;
1396         vregion->region2 = NULL;
1397         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region);
1398 err_region_create:
1399         ops->region_rehash_hints_put(hints_priv);
1400         return err;
1401 }
1402
1403 static void
1404 mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
1405                                      struct mlxsw_sp_acl_tcam_vregion *vregion,
1406                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1407 {
1408         struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
1409         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1410
1411         vregion->region2 = NULL;
1412         mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
1413         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
1414         ops->region_rehash_hints_put(ctx->hints_priv);
1415         ctx->hints_priv = NULL;
1416 }
1417
1418 static void
1419 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
1420                                  struct mlxsw_sp_acl_tcam_vregion *vregion,
1421                                  int *credits)
1422 {
1423         struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
1424         int err;
1425
1426         /* Check if the previous rehash work was interrupted
1427          * which means we have to continue it now.
1428          * If not, start a new rehash.
1429          */
1430         if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) {
1431                 err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp,
1432                                                              vregion, ctx);
1433                 if (err) {
1434                         if (err != -EAGAIN)
1435                                 dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
1436                         return;
1437                 }
1438         }
1439
1440         err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
1441                                                 ctx, credits);
1442         if (err) {
1443                 dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
1444         }
1445
1446         if (*credits >= 0)
1447                 mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
1448 }
1449
1450 static int
1451 mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
1452                                            struct devlink_param_gset_ctx *ctx)
1453 {
1454         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1455         struct mlxsw_sp_acl_tcam *tcam;
1456         struct mlxsw_sp *mlxsw_sp;
1457
1458         mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1459         tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
1460         ctx->val.vu32 = tcam->vregion_rehash_intrvl;
1461
1462         return 0;
1463 }
1464
1465 static int
1466 mlxsw_sp_acl_tcam_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
1467                                            struct devlink_param_gset_ctx *ctx)
1468 {
1469         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1470         struct mlxsw_sp_acl_tcam_vregion *vregion;
1471         struct mlxsw_sp_acl_tcam *tcam;
1472         struct mlxsw_sp *mlxsw_sp;
1473         u32 val = ctx->val.vu32;
1474
1475         if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
1476                 return -EINVAL;
1477
1478         mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1479         tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
1480         tcam->vregion_rehash_intrvl = val;
1481         mutex_lock(&tcam->lock);
1482         list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
1483                 if (val)
1484                         mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
1485                 else
1486                         cancel_delayed_work_sync(&vregion->rehash.dw);
1487         }
1488         mutex_unlock(&tcam->lock);
1489         return 0;
1490 }
1491
1492 static const struct devlink_param mlxsw_sp_acl_tcam_rehash_params[] = {
1493         DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
1494                              "acl_region_rehash_interval",
1495                              DEVLINK_PARAM_TYPE_U32,
1496                              BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1497                              mlxsw_sp_acl_tcam_region_rehash_intrvl_get,
1498                              mlxsw_sp_acl_tcam_region_rehash_intrvl_set,
1499                              NULL),
1500 };
1501
1502 static int mlxsw_sp_acl_tcam_rehash_params_register(struct mlxsw_sp *mlxsw_sp)
1503 {
1504         struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
1505
1506         if (!mlxsw_sp->acl_tcam_ops->region_rehash_hints_get)
1507                 return 0;
1508
1509         return devl_params_register(devlink, mlxsw_sp_acl_tcam_rehash_params,
1510                                     ARRAY_SIZE(mlxsw_sp_acl_tcam_rehash_params));
1511 }
1512
1513 static void
1514 mlxsw_sp_acl_tcam_rehash_params_unregister(struct mlxsw_sp *mlxsw_sp)
1515 {
1516         struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
1517
1518         if (!mlxsw_sp->acl_tcam_ops->region_rehash_hints_get)
1519                 return;
1520
1521         devl_params_unregister(devlink, mlxsw_sp_acl_tcam_rehash_params,
1522                                ARRAY_SIZE(mlxsw_sp_acl_tcam_rehash_params));
1523 }
1524
1525 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
1526                            struct mlxsw_sp_acl_tcam *tcam)
1527 {
1528         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1529         u64 max_tcam_regions;
1530         u64 max_regions;
1531         u64 max_groups;
1532         int err;
1533
1534         mutex_init(&tcam->lock);
1535         tcam->vregion_rehash_intrvl =
1536                         MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
1537         INIT_LIST_HEAD(&tcam->vregion_list);
1538
1539         err = mlxsw_sp_acl_tcam_rehash_params_register(mlxsw_sp);
1540         if (err)
1541                 goto err_rehash_params_register;
1542
1543         max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1544                                               ACL_MAX_TCAM_REGIONS);
1545         max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
1546
1547         /* Use 1:1 mapping between ACL region and TCAM region */
1548         if (max_tcam_regions < max_regions)
1549                 max_regions = max_tcam_regions;
1550
1551         tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
1552         if (!tcam->used_regions) {
1553                 err = -ENOMEM;
1554                 goto err_alloc_used_regions;
1555         }
1556         tcam->max_regions = max_regions;
1557
1558         max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
1559         tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
1560         if (!tcam->used_groups) {
1561                 err = -ENOMEM;
1562                 goto err_alloc_used_groups;
1563         }
1564         tcam->max_groups = max_groups;
1565         tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1566                                                   ACL_MAX_GROUP_SIZE);
1567
1568         err = ops->init(mlxsw_sp, tcam->priv, tcam);
1569         if (err)
1570                 goto err_tcam_init;
1571
1572         return 0;
1573
1574 err_tcam_init:
1575         bitmap_free(tcam->used_groups);
1576 err_alloc_used_groups:
1577         bitmap_free(tcam->used_regions);
1578 err_alloc_used_regions:
1579         mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
1580 err_rehash_params_register:
1581         mutex_destroy(&tcam->lock);
1582         return err;
1583 }
1584
1585 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
1586                             struct mlxsw_sp_acl_tcam *tcam)
1587 {
1588         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1589
1590         ops->fini(mlxsw_sp, tcam->priv);
1591         bitmap_free(tcam->used_groups);
1592         bitmap_free(tcam->used_regions);
1593         mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
1594         mutex_destroy(&tcam->lock);
1595 }
1596
1597 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
1598         MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
1599         MLXSW_AFK_ELEMENT_DMAC_32_47,
1600         MLXSW_AFK_ELEMENT_DMAC_0_31,
1601         MLXSW_AFK_ELEMENT_SMAC_32_47,
1602         MLXSW_AFK_ELEMENT_SMAC_0_31,
1603         MLXSW_AFK_ELEMENT_ETHERTYPE,
1604         MLXSW_AFK_ELEMENT_IP_PROTO,
1605         MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1606         MLXSW_AFK_ELEMENT_DST_IP_0_31,
1607         MLXSW_AFK_ELEMENT_DST_L4_PORT,
1608         MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1609         MLXSW_AFK_ELEMENT_VID,
1610         MLXSW_AFK_ELEMENT_PCP,
1611         MLXSW_AFK_ELEMENT_TCP_FLAGS,
1612         MLXSW_AFK_ELEMENT_IP_TTL_,
1613         MLXSW_AFK_ELEMENT_IP_ECN,
1614         MLXSW_AFK_ELEMENT_IP_DSCP,
1615 };
1616
1617 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
1618         MLXSW_AFK_ELEMENT_ETHERTYPE,
1619         MLXSW_AFK_ELEMENT_IP_PROTO,
1620         MLXSW_AFK_ELEMENT_SRC_IP_96_127,
1621         MLXSW_AFK_ELEMENT_SRC_IP_64_95,
1622         MLXSW_AFK_ELEMENT_SRC_IP_32_63,
1623         MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1624         MLXSW_AFK_ELEMENT_DST_IP_96_127,
1625         MLXSW_AFK_ELEMENT_DST_IP_64_95,
1626         MLXSW_AFK_ELEMENT_DST_IP_32_63,
1627         MLXSW_AFK_ELEMENT_DST_IP_0_31,
1628         MLXSW_AFK_ELEMENT_DST_L4_PORT,
1629         MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1630 };
1631
1632 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
1633         {
1634                 .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1635                 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1636         },
1637         {
1638                 .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1639                 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1640         },
1641 };
1642
1643 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1644         ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1645
1646 struct mlxsw_sp_acl_tcam_flower_ruleset {
1647         struct mlxsw_sp_acl_tcam_vgroup vgroup;
1648 };
1649
1650 struct mlxsw_sp_acl_tcam_flower_rule {
1651         struct mlxsw_sp_acl_tcam_ventry ventry;
1652 };
1653
1654 static int
1655 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1656                                      struct mlxsw_sp_acl_tcam *tcam,
1657                                      void *ruleset_priv,
1658                                      struct mlxsw_afk_element_usage *tmplt_elusage,
1659                                      unsigned int *p_min_prio,
1660                                      unsigned int *p_max_prio)
1661 {
1662         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1663
1664         return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1665                                             mlxsw_sp_acl_tcam_patterns,
1666                                             MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1667                                             tmplt_elusage, true,
1668                                             p_min_prio, p_max_prio);
1669 }
1670
1671 static void
1672 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1673                                      void *ruleset_priv)
1674 {
1675         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1676
1677         mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1678 }
1679
1680 static int
1681 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1682                                       void *ruleset_priv,
1683                                       struct mlxsw_sp_port *mlxsw_sp_port,
1684                                       bool ingress)
1685 {
1686         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1687
1688         return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
1689                                             mlxsw_sp_port, ingress);
1690 }
1691
1692 static void
1693 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1694                                         void *ruleset_priv,
1695                                         struct mlxsw_sp_port *mlxsw_sp_port,
1696                                         bool ingress)
1697 {
1698         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1699
1700         mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
1701                                        mlxsw_sp_port, ingress);
1702 }
1703
1704 static u16
1705 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
1706 {
1707         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1708
1709         return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1710 }
1711
1712 static int
1713 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1714                                   void *ruleset_priv, void *rule_priv,
1715                                   struct mlxsw_sp_acl_rule_info *rulei)
1716 {
1717         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1718         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1719
1720         return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1721                                             &rule->ventry, rulei);
1722 }
1723
1724 static void
1725 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1726 {
1727         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1728
1729         mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1730 }
1731
1732 static int
1733 mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1734                                              void *rule_priv,
1735                                              struct mlxsw_sp_acl_rule_info *rulei)
1736 {
1737         return -EOPNOTSUPP;
1738 }
1739
1740 static int
1741 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1742                                            void *rule_priv, bool *activity)
1743 {
1744         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1745
1746         return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1747                                                      activity);
1748 }
1749
1750 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1751         .ruleset_priv_size      = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1752         .ruleset_add            = mlxsw_sp_acl_tcam_flower_ruleset_add,
1753         .ruleset_del            = mlxsw_sp_acl_tcam_flower_ruleset_del,
1754         .ruleset_bind           = mlxsw_sp_acl_tcam_flower_ruleset_bind,
1755         .ruleset_unbind         = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1756         .ruleset_group_id       = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
1757         .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1758         .rule_add               = mlxsw_sp_acl_tcam_flower_rule_add,
1759         .rule_del               = mlxsw_sp_acl_tcam_flower_rule_del,
1760         .rule_action_replace    = mlxsw_sp_acl_tcam_flower_rule_action_replace,
1761         .rule_activity_get      = mlxsw_sp_acl_tcam_flower_rule_activity_get,
1762 };
1763
1764 struct mlxsw_sp_acl_tcam_mr_ruleset {
1765         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1766         struct mlxsw_sp_acl_tcam_vgroup vgroup;
1767 };
1768
1769 struct mlxsw_sp_acl_tcam_mr_rule {
1770         struct mlxsw_sp_acl_tcam_ventry ventry;
1771 };
1772
1773 static int
1774 mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1775                                  struct mlxsw_sp_acl_tcam *tcam,
1776                                  void *ruleset_priv,
1777                                  struct mlxsw_afk_element_usage *tmplt_elusage,
1778                                  unsigned int *p_min_prio,
1779                                  unsigned int *p_max_prio)
1780 {
1781         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1782         int err;
1783
1784         err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1785                                            mlxsw_sp_acl_tcam_patterns,
1786                                            MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1787                                            tmplt_elusage, false,
1788                                            p_min_prio, p_max_prio);
1789         if (err)
1790                 return err;
1791
1792         /* For most of the TCAM clients it would make sense to take a tcam chunk
1793          * only when the first rule is written. This is not the case for
1794          * multicast router as it is required to bind the multicast router to a
1795          * specific ACL Group ID which must exist in HW before multicast router
1796          * is initialized.
1797          */
1798         ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
1799                                                        &ruleset->vgroup, 1,
1800                                                        tmplt_elusage);
1801         if (IS_ERR(ruleset->vchunk)) {
1802                 err = PTR_ERR(ruleset->vchunk);
1803                 goto err_chunk_get;
1804         }
1805
1806         return 0;
1807
1808 err_chunk_get:
1809         mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1810         return err;
1811 }
1812
1813 static void
1814 mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
1815 {
1816         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1817
1818         mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
1819         mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1820 }
1821
1822 static int
1823 mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1824                                   struct mlxsw_sp_port *mlxsw_sp_port,
1825                                   bool ingress)
1826 {
1827         /* Binding is done when initializing multicast router */
1828         return 0;
1829 }
1830
1831 static void
1832 mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1833                                     void *ruleset_priv,
1834                                     struct mlxsw_sp_port *mlxsw_sp_port,
1835                                     bool ingress)
1836 {
1837 }
1838
1839 static u16
1840 mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
1841 {
1842         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1843
1844         return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1845 }
1846
1847 static int
1848 mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1849                               void *rule_priv,
1850                               struct mlxsw_sp_acl_rule_info *rulei)
1851 {
1852         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1853         struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1854
1855         return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1856                                            &rule->ventry, rulei);
1857 }
1858
1859 static void
1860 mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1861 {
1862         struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1863
1864         mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1865 }
1866
1867 static int
1868 mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1869                                          void *rule_priv,
1870                                          struct mlxsw_sp_acl_rule_info *rulei)
1871 {
1872         struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1873
1874         return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
1875                                                        rulei);
1876 }
1877
1878 static int
1879 mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1880                                        void *rule_priv, bool *activity)
1881 {
1882         *activity = false;
1883
1884         return 0;
1885 }
1886
1887 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
1888         .ruleset_priv_size      = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
1889         .ruleset_add            = mlxsw_sp_acl_tcam_mr_ruleset_add,
1890         .ruleset_del            = mlxsw_sp_acl_tcam_mr_ruleset_del,
1891         .ruleset_bind           = mlxsw_sp_acl_tcam_mr_ruleset_bind,
1892         .ruleset_unbind         = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
1893         .ruleset_group_id       = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1894         .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
1895         .rule_add               = mlxsw_sp_acl_tcam_mr_rule_add,
1896         .rule_del               = mlxsw_sp_acl_tcam_mr_rule_del,
1897         .rule_action_replace    = mlxsw_sp_acl_tcam_mr_rule_action_replace,
1898         .rule_activity_get      = mlxsw_sp_acl_tcam_mr_rule_activity_get,
1899 };
1900
1901 static const struct mlxsw_sp_acl_profile_ops *
1902 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1903         [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1904         [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
1905 };
1906
1907 const struct mlxsw_sp_acl_profile_ops *
1908 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1909                               enum mlxsw_sp_acl_profile profile)
1910 {
1911         const struct mlxsw_sp_acl_profile_ops *ops;
1912
1913         if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1914                 return NULL;
1915         ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1916         if (WARN_ON(!ops))
1917                 return NULL;
1918         return ops;
1919 }