1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/bitops.h>
8 #include <linux/list.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <trace/events/mlxsw.h>
16 #include "resources.h"
18 #include "spectrum_acl_tcam.h"
19 #include "core_acl_flex_keys.h"
21 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
23 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
25 return ops->priv_size;
28 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
29 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
30 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
32 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
33 struct mlxsw_sp_acl_tcam *tcam)
35 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
42 mutex_init(&tcam->lock);
43 tcam->vregion_rehash_intrvl =
44 MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
45 INIT_LIST_HEAD(&tcam->vregion_list);
47 max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
48 ACL_MAX_TCAM_REGIONS);
49 max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
51 /* Use 1:1 mapping between ACL region and TCAM region */
52 if (max_tcam_regions < max_regions)
53 max_regions = max_tcam_regions;
55 alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
56 tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
57 if (!tcam->used_regions)
59 tcam->max_regions = max_regions;
61 max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
62 alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
63 tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
64 if (!tcam->used_groups) {
66 goto err_alloc_used_groups;
68 tcam->max_groups = max_groups;
69 tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
72 err = ops->init(mlxsw_sp, tcam->priv, tcam);
79 kfree(tcam->used_groups);
80 err_alloc_used_groups:
81 kfree(tcam->used_regions);
85 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
86 struct mlxsw_sp_acl_tcam *tcam)
88 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
90 mutex_destroy(&tcam->lock);
91 ops->fini(mlxsw_sp, tcam->priv);
92 kfree(tcam->used_groups);
93 kfree(tcam->used_regions);
96 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
97 struct mlxsw_sp_acl_rule_info *rulei,
98 u32 *priority, bool fillup_priority)
102 if (!fillup_priority) {
107 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
110 /* Priority range is 1..cap_kvd_size-1. */
111 max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
112 if (rulei->priority >= max_priority)
115 /* Unlike in TC, in HW, higher number means higher priority. */
116 *priority = max_priority - rulei->priority;
120 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
125 id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
126 if (id < tcam->max_regions) {
127 __set_bit(id, tcam->used_regions);
134 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
137 __clear_bit(id, tcam->used_regions);
140 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
145 id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
146 if (id < tcam->max_groups) {
147 __set_bit(id, tcam->used_groups);
154 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
157 __clear_bit(id, tcam->used_groups);
160 struct mlxsw_sp_acl_tcam_pattern {
161 const enum mlxsw_afk_element *elements;
162 unsigned int elements_count;
165 struct mlxsw_sp_acl_tcam_group {
166 struct mlxsw_sp_acl_tcam *tcam;
168 struct mutex lock; /* guards region list updates */
169 struct list_head region_list;
170 unsigned int region_count;
173 struct mlxsw_sp_acl_tcam_vgroup {
174 struct mlxsw_sp_acl_tcam_group group;
175 struct list_head vregion_list;
176 struct rhashtable vchunk_ht;
177 const struct mlxsw_sp_acl_tcam_pattern *patterns;
178 unsigned int patterns_count;
179 bool tmplt_elusage_set;
180 struct mlxsw_afk_element_usage tmplt_elusage;
181 bool vregion_rehash_enabled;
184 struct mlxsw_sp_acl_tcam_rehash_ctx {
186 bool this_is_rollback;
187 struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being
188 * currently migrated.
190 struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start
193 * currently migrated.
195 struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop
198 * currently migrated.
202 struct mlxsw_sp_acl_tcam_vregion {
203 struct mutex lock; /* Protects consistency of region, region2 pointers
206 struct mlxsw_sp_acl_tcam_region *region;
207 struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
208 struct list_head list; /* Member of a TCAM group */
209 struct list_head tlist; /* Member of a TCAM */
210 struct list_head vchunk_list; /* List of vchunks under this vregion */
211 struct mlxsw_afk_key_info *key_info;
212 struct mlxsw_sp_acl_tcam *tcam;
213 struct mlxsw_sp_acl_tcam_vgroup *vgroup;
215 struct delayed_work dw;
216 struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
218 struct mlxsw_sp *mlxsw_sp;
219 unsigned int ref_count;
222 struct mlxsw_sp_acl_tcam_vchunk;
224 struct mlxsw_sp_acl_tcam_chunk {
225 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
226 struct mlxsw_sp_acl_tcam_region *region;
227 unsigned long priv[];
228 /* priv has to be always the last item */
231 struct mlxsw_sp_acl_tcam_vchunk {
232 struct mlxsw_sp_acl_tcam_chunk *chunk;
233 struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
234 struct list_head list; /* Member of a TCAM vregion */
235 struct rhash_head ht_node; /* Member of a chunk HT */
236 struct list_head ventry_list;
237 unsigned int priority; /* Priority within the vregion and group */
238 struct mlxsw_sp_acl_tcam_vgroup *vgroup;
239 struct mlxsw_sp_acl_tcam_vregion *vregion;
240 unsigned int ref_count;
243 struct mlxsw_sp_acl_tcam_entry {
244 struct mlxsw_sp_acl_tcam_ventry *ventry;
245 struct mlxsw_sp_acl_tcam_chunk *chunk;
246 unsigned long priv[];
247 /* priv has to be always the last item */
250 struct mlxsw_sp_acl_tcam_ventry {
251 struct mlxsw_sp_acl_tcam_entry *entry;
252 struct list_head list; /* Member of a TCAM vchunk */
253 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
254 struct mlxsw_sp_acl_rule_info *rulei;
257 static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
258 .key_len = sizeof(unsigned int),
259 .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
260 .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
261 .automatic_shrinking = true,
264 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
265 struct mlxsw_sp_acl_tcam_group *group)
267 struct mlxsw_sp_acl_tcam_region *region;
268 char pagt_pl[MLXSW_REG_PAGT_LEN];
271 mlxsw_reg_pagt_pack(pagt_pl, group->id);
272 list_for_each_entry(region, &group->region_list, list) {
275 /* Check if the next entry in the list has the same vregion. */
276 if (region->list.next != &group->region_list &&
277 list_next_entry(region, list)->vregion == region->vregion)
279 mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
282 mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
283 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
287 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
288 struct mlxsw_sp_acl_tcam_group *group)
293 mutex_init(&group->lock);
294 INIT_LIST_HEAD(&group->region_list);
296 err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
303 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
305 struct mlxsw_sp_acl_tcam *tcam = group->tcam;
307 mutex_destroy(&group->lock);
308 mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
309 WARN_ON(!list_empty(&group->region_list));
313 mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
314 struct mlxsw_sp_acl_tcam *tcam,
315 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
316 const struct mlxsw_sp_acl_tcam_pattern *patterns,
317 unsigned int patterns_count,
318 struct mlxsw_afk_element_usage *tmplt_elusage,
319 bool vregion_rehash_enabled)
323 vgroup->patterns = patterns;
324 vgroup->patterns_count = patterns_count;
325 vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
328 vgroup->tmplt_elusage_set = true;
329 memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
330 sizeof(vgroup->tmplt_elusage));
332 INIT_LIST_HEAD(&vgroup->vregion_list);
334 err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
338 err = rhashtable_init(&vgroup->vchunk_ht,
339 &mlxsw_sp_acl_tcam_vchunk_ht_params);
341 goto err_rhashtable_init;
346 mlxsw_sp_acl_tcam_group_del(&vgroup->group);
351 mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
353 rhashtable_destroy(&vgroup->vchunk_ht);
354 mlxsw_sp_acl_tcam_group_del(&vgroup->group);
355 WARN_ON(!list_empty(&vgroup->vregion_list));
359 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
360 struct mlxsw_sp_acl_tcam_group *group,
361 struct mlxsw_sp_port *mlxsw_sp_port,
364 char ppbt_pl[MLXSW_REG_PPBT_LEN];
366 mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
367 MLXSW_REG_PXBT_E_EACL,
368 MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
370 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
374 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
375 struct mlxsw_sp_acl_tcam_group *group,
376 struct mlxsw_sp_port *mlxsw_sp_port,
379 char ppbt_pl[MLXSW_REG_PPBT_LEN];
381 mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
382 MLXSW_REG_PXBT_E_EACL,
383 MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
385 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
389 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
395 mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
397 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
399 if (list_empty(&vregion->vchunk_list))
401 /* As a priority of a vregion, return priority of the first vchunk */
402 vchunk = list_first_entry(&vregion->vchunk_list,
403 typeof(*vchunk), list);
404 return vchunk->priority;
408 mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
410 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
412 if (list_empty(&vregion->vchunk_list))
414 vchunk = list_last_entry(&vregion->vchunk_list,
415 typeof(*vchunk), list);
416 return vchunk->priority;
420 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
421 struct mlxsw_sp_acl_tcam_group *group,
422 struct mlxsw_sp_acl_tcam_region *region,
423 unsigned int priority,
424 struct mlxsw_sp_acl_tcam_region *next_region)
426 struct mlxsw_sp_acl_tcam_region *region2;
427 struct list_head *pos;
430 mutex_lock(&group->lock);
431 if (group->region_count == group->tcam->max_group_size) {
433 goto err_region_count_check;
437 /* If the next region is defined, place the new one
438 * before it. The next one is a sibling.
440 pos = &next_region->list;
442 /* Position the region inside the list according to priority */
443 list_for_each(pos, &group->region_list) {
444 region2 = list_entry(pos, typeof(*region2), list);
445 if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
450 list_add_tail(®ion->list, pos);
451 region->group = group;
453 err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
455 goto err_group_update;
457 group->region_count++;
458 mutex_unlock(&group->lock);
462 list_del(®ion->list);
463 err_region_count_check:
464 mutex_unlock(&group->lock);
469 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
470 struct mlxsw_sp_acl_tcam_region *region)
472 struct mlxsw_sp_acl_tcam_group *group = region->group;
474 mutex_lock(&group->lock);
475 list_del(®ion->list);
476 group->region_count--;
477 mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
478 mutex_unlock(&group->lock);
482 mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
483 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
484 struct mlxsw_sp_acl_tcam_vregion *vregion,
485 unsigned int priority)
487 struct mlxsw_sp_acl_tcam_vregion *vregion2;
488 struct list_head *pos;
491 /* Position the vregion inside the list according to priority */
492 list_for_each(pos, &vgroup->vregion_list) {
493 vregion2 = list_entry(pos, typeof(*vregion2), list);
494 if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
497 list_add_tail(&vregion->list, pos);
499 err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
503 goto err_region_attach;
508 list_del(&vregion->list);
513 mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
514 struct mlxsw_sp_acl_tcam_vregion *vregion)
516 list_del(&vregion->list);
517 if (vregion->region2)
518 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
520 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
523 static struct mlxsw_sp_acl_tcam_vregion *
524 mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
525 unsigned int priority,
526 struct mlxsw_afk_element_usage *elusage,
529 struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
530 struct list_head *pos;
533 list_for_each(pos, &vgroup->vregion_list) {
534 vregion = list_entry(pos, typeof(*vregion), list);
536 /* First, check if the requested priority does not rather belong
537 * under some of the next vregions.
539 if (pos->next != &vgroup->vregion_list) { /* not last */
540 vregion2 = list_entry(pos->next, typeof(*vregion2),
543 mlxsw_sp_acl_tcam_vregion_prio(vregion2))
547 issubset = mlxsw_afk_key_info_subset(vregion->key_info,
550 /* If requested element usage would not fit and the priority
551 * is lower than the currently inspected vregion we cannot
552 * use this region, so return NULL to indicate new vregion has
556 priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
559 /* If requested element usage would not fit and the priority
560 * is higher than the currently inspected vregion we cannot
561 * use this vregion. There is still some hope that the next
562 * vregion would be the fit. So let it be processed and
563 * eventually break at the check right above this.
566 priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
569 /* Indicate if the vregion needs to be split in order to add
570 * the requested priority. Split is needed when requested
571 * element usage won't fit into the found vregion.
573 *p_need_split = !issubset;
576 return NULL; /* New vregion has to be created. */
580 mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
581 struct mlxsw_afk_element_usage *elusage,
582 struct mlxsw_afk_element_usage *out)
584 const struct mlxsw_sp_acl_tcam_pattern *pattern;
587 /* In case the template is set, we don't have to look up the pattern
588 * and just use the template.
590 if (vgroup->tmplt_elusage_set) {
591 memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
592 WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
596 for (i = 0; i < vgroup->patterns_count; i++) {
597 pattern = &vgroup->patterns[i];
598 mlxsw_afk_element_usage_fill(out, pattern->elements,
599 pattern->elements_count);
600 if (mlxsw_afk_element_usage_subset(elusage, out))
603 memcpy(out, elusage, sizeof(*out));
607 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
608 struct mlxsw_sp_acl_tcam_region *region)
610 struct mlxsw_afk_key_info *key_info = region->key_info;
611 char ptar_pl[MLXSW_REG_PTAR_LEN];
612 unsigned int encodings_count;
616 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
618 MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
619 region->id, region->tcam_region_info);
620 encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
621 for (i = 0; i < encodings_count; i++) {
624 encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
625 mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
627 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
630 mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
635 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
636 struct mlxsw_sp_acl_tcam_region *region)
638 char ptar_pl[MLXSW_REG_PTAR_LEN];
640 mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
641 region->key_type, 0, region->id,
642 region->tcam_region_info);
643 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
647 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
648 struct mlxsw_sp_acl_tcam_region *region)
650 char pacl_pl[MLXSW_REG_PACL_LEN];
652 mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
653 region->tcam_region_info);
654 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
658 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
659 struct mlxsw_sp_acl_tcam_region *region)
661 char pacl_pl[MLXSW_REG_PACL_LEN];
663 mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
664 region->tcam_region_info);
665 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
668 static struct mlxsw_sp_acl_tcam_region *
669 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
670 struct mlxsw_sp_acl_tcam *tcam,
671 struct mlxsw_sp_acl_tcam_vregion *vregion,
674 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
675 struct mlxsw_sp_acl_tcam_region *region;
678 region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
680 return ERR_PTR(-ENOMEM);
681 region->mlxsw_sp = mlxsw_sp;
682 region->vregion = vregion;
683 region->key_info = vregion->key_info;
685 err = mlxsw_sp_acl_tcam_region_id_get(tcam, ®ion->id);
687 goto err_region_id_get;
689 err = ops->region_associate(mlxsw_sp, region);
691 goto err_tcam_region_associate;
693 region->key_type = ops->key_type;
694 err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
696 goto err_tcam_region_alloc;
698 err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
700 goto err_tcam_region_enable;
702 err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
705 goto err_tcam_region_init;
709 err_tcam_region_init:
710 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
711 err_tcam_region_enable:
712 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
713 err_tcam_region_alloc:
714 err_tcam_region_associate:
715 mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
722 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
723 struct mlxsw_sp_acl_tcam_region *region)
725 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
727 ops->region_fini(mlxsw_sp, region->priv);
728 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
729 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
730 mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
736 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
738 unsigned long interval = vregion->tcam->vregion_rehash_intrvl;
742 mlxsw_core_schedule_dw(&vregion->rehash.dw,
743 msecs_to_jiffies(interval));
747 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
748 struct mlxsw_sp_acl_tcam_vregion *vregion,
751 static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
753 struct mlxsw_sp_acl_tcam_vregion *vregion =
754 container_of(work, struct mlxsw_sp_acl_tcam_vregion,
756 int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
758 mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
760 /* Rehash gone out of credits so it was interrupted.
761 * Schedule the work as soon as possible to continue.
763 mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
765 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
769 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
771 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
773 /* If a rule was added or deleted from vchunk which is currently
774 * under rehash migration, we have to reset the ventry pointers
775 * to make sure all rules are properly migrated.
777 if (vregion->rehash.ctx.current_vchunk == vchunk) {
778 vregion->rehash.ctx.start_ventry = NULL;
779 vregion->rehash.ctx.stop_ventry = NULL;
784 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion)
786 /* If a chunk was added or deleted from vregion we have to reset
787 * the current chunk pointer to make sure all chunks
788 * are properly migrated.
790 vregion->rehash.ctx.current_vchunk = NULL;
793 static struct mlxsw_sp_acl_tcam_vregion *
794 mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
795 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
796 unsigned int priority,
797 struct mlxsw_afk_element_usage *elusage)
799 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
800 struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
801 struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
802 struct mlxsw_sp_acl_tcam_vregion *vregion;
805 vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
807 return ERR_PTR(-ENOMEM);
808 INIT_LIST_HEAD(&vregion->vchunk_list);
809 mutex_init(&vregion->lock);
810 vregion->tcam = tcam;
811 vregion->mlxsw_sp = mlxsw_sp;
812 vregion->vgroup = vgroup;
813 vregion->ref_count = 1;
815 vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
816 if (IS_ERR(vregion->key_info)) {
817 err = PTR_ERR(vregion->key_info);
818 goto err_key_info_get;
821 vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
823 if (IS_ERR(vregion->region)) {
824 err = PTR_ERR(vregion->region);
825 goto err_region_create;
828 err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
831 goto err_vgroup_vregion_attach;
833 if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
834 /* Create the delayed work for vregion periodic rehash */
835 INIT_DELAYED_WORK(&vregion->rehash.dw,
836 mlxsw_sp_acl_tcam_vregion_rehash_work);
837 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
838 mutex_lock(&tcam->lock);
839 list_add_tail(&vregion->tlist, &tcam->vregion_list);
840 mutex_unlock(&tcam->lock);
845 err_vgroup_vregion_attach:
846 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
848 mlxsw_afk_key_info_put(vregion->key_info);
855 mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
856 struct mlxsw_sp_acl_tcam_vregion *vregion)
858 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
859 struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
860 struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
862 if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
863 mutex_lock(&tcam->lock);
864 list_del(&vregion->tlist);
865 mutex_unlock(&tcam->lock);
866 cancel_delayed_work_sync(&vregion->rehash.dw);
868 mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
869 if (vregion->region2)
870 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
871 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
872 mlxsw_afk_key_info_put(vregion->key_info);
873 mutex_destroy(&vregion->lock);
877 u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
878 struct mlxsw_sp_acl_tcam *tcam)
880 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
881 u32 vregion_rehash_intrvl;
883 if (WARN_ON(!ops->region_rehash_hints_get))
885 vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
886 return vregion_rehash_intrvl;
889 int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
890 struct mlxsw_sp_acl_tcam *tcam,
893 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
894 struct mlxsw_sp_acl_tcam_vregion *vregion;
896 if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
898 if (WARN_ON(!ops->region_rehash_hints_get))
900 tcam->vregion_rehash_intrvl = val;
901 mutex_lock(&tcam->lock);
902 list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
904 mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
906 cancel_delayed_work_sync(&vregion->rehash.dw);
908 mutex_unlock(&tcam->lock);
912 static struct mlxsw_sp_acl_tcam_vregion *
913 mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
914 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
915 unsigned int priority,
916 struct mlxsw_afk_element_usage *elusage)
918 struct mlxsw_afk_element_usage vregion_elusage;
919 struct mlxsw_sp_acl_tcam_vregion *vregion;
922 vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
923 elusage, &need_split);
926 /* According to priority, new vchunk should belong to
927 * an existing vregion. However, this vchunk needs
928 * elements that vregion does not contain. We need
929 * to split the existing vregion into two and create
930 * a new vregion for the new vchunk in between.
931 * This is not supported now.
933 return ERR_PTR(-EOPNOTSUPP);
935 vregion->ref_count++;
939 mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
942 return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
947 mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
948 struct mlxsw_sp_acl_tcam_vregion *vregion)
950 if (--vregion->ref_count)
952 mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
955 static struct mlxsw_sp_acl_tcam_chunk *
956 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
957 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
958 struct mlxsw_sp_acl_tcam_region *region)
960 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
961 struct mlxsw_sp_acl_tcam_chunk *chunk;
963 chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
965 return ERR_PTR(-ENOMEM);
966 chunk->vchunk = vchunk;
967 chunk->region = region;
969 ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
974 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
975 struct mlxsw_sp_acl_tcam_chunk *chunk)
977 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
979 ops->chunk_fini(chunk->priv);
983 static struct mlxsw_sp_acl_tcam_vchunk *
984 mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
985 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
986 unsigned int priority,
987 struct mlxsw_afk_element_usage *elusage)
989 struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
990 struct mlxsw_sp_acl_tcam_vregion *vregion;
991 struct list_head *pos;
994 if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
995 return ERR_PTR(-EINVAL);
997 vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
999 return ERR_PTR(-ENOMEM);
1000 INIT_LIST_HEAD(&vchunk->ventry_list);
1001 vchunk->priority = priority;
1002 vchunk->vgroup = vgroup;
1003 vchunk->ref_count = 1;
1005 vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
1007 if (IS_ERR(vregion)) {
1008 err = PTR_ERR(vregion);
1009 goto err_vregion_get;
1012 vchunk->vregion = vregion;
1014 err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1015 mlxsw_sp_acl_tcam_vchunk_ht_params);
1017 goto err_rhashtable_insert;
1019 mutex_lock(&vregion->lock);
1020 vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
1021 vchunk->vregion->region);
1022 if (IS_ERR(vchunk->chunk)) {
1023 mutex_unlock(&vregion->lock);
1024 err = PTR_ERR(vchunk->chunk);
1025 goto err_chunk_create;
1028 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1030 /* Position the vchunk inside the list according to priority */
1031 list_for_each(pos, &vregion->vchunk_list) {
1032 vchunk2 = list_entry(pos, typeof(*vchunk2), list);
1033 if (vchunk2->priority > priority)
1036 list_add_tail(&vchunk->list, pos);
1037 mutex_unlock(&vregion->lock);
1042 rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1043 mlxsw_sp_acl_tcam_vchunk_ht_params);
1044 err_rhashtable_insert:
1045 mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
1048 return ERR_PTR(err);
1052 mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
1053 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1055 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1056 struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
1058 mutex_lock(&vregion->lock);
1059 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1060 list_del(&vchunk->list);
1062 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1063 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
1064 mutex_unlock(&vregion->lock);
1065 rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1066 mlxsw_sp_acl_tcam_vchunk_ht_params);
1067 mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
1071 static struct mlxsw_sp_acl_tcam_vchunk *
1072 mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
1073 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1074 unsigned int priority,
1075 struct mlxsw_afk_element_usage *elusage)
1077 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1079 vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
1080 mlxsw_sp_acl_tcam_vchunk_ht_params);
1082 if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
1084 return ERR_PTR(-EINVAL);
1085 vchunk->ref_count++;
1088 return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
1093 mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
1094 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1096 if (--vchunk->ref_count)
1098 mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
1101 static struct mlxsw_sp_acl_tcam_entry *
1102 mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
1103 struct mlxsw_sp_acl_tcam_ventry *ventry,
1104 struct mlxsw_sp_acl_tcam_chunk *chunk)
1106 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1107 struct mlxsw_sp_acl_tcam_entry *entry;
1110 entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
1112 return ERR_PTR(-ENOMEM);
1113 entry->ventry = ventry;
1114 entry->chunk = chunk;
1116 err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
1117 entry->priv, ventry->rulei);
1125 return ERR_PTR(err);
1128 static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1129 struct mlxsw_sp_acl_tcam_entry *entry)
1131 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1133 ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
1134 entry->chunk->priv, entry->priv);
1139 mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
1140 struct mlxsw_sp_acl_tcam_region *region,
1141 struct mlxsw_sp_acl_tcam_entry *entry,
1142 struct mlxsw_sp_acl_rule_info *rulei)
1144 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1146 return ops->entry_action_replace(mlxsw_sp, region->priv,
1147 entry->priv, rulei);
1151 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
1152 struct mlxsw_sp_acl_tcam_entry *entry,
1155 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1157 return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
1158 entry->priv, activity);
1161 static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
1162 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1163 struct mlxsw_sp_acl_tcam_ventry *ventry,
1164 struct mlxsw_sp_acl_rule_info *rulei)
1166 struct mlxsw_sp_acl_tcam_vregion *vregion;
1167 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1170 vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
1171 &rulei->values.elusage);
1173 return PTR_ERR(vchunk);
1175 ventry->vchunk = vchunk;
1176 ventry->rulei = rulei;
1177 vregion = vchunk->vregion;
1179 mutex_lock(&vregion->lock);
1180 ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
1182 if (IS_ERR(ventry->entry)) {
1183 mutex_unlock(&vregion->lock);
1184 err = PTR_ERR(ventry->entry);
1185 goto err_entry_create;
1188 list_add_tail(&ventry->list, &vchunk->ventry_list);
1189 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1190 mutex_unlock(&vregion->lock);
1195 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1199 static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
1200 struct mlxsw_sp_acl_tcam_ventry *ventry)
1202 struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1203 struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1205 mutex_lock(&vregion->lock);
1206 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1207 list_del(&ventry->list);
1208 mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1209 mutex_unlock(&vregion->lock);
1210 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1214 mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
1215 struct mlxsw_sp_acl_tcam_ventry *ventry,
1216 struct mlxsw_sp_acl_rule_info *rulei)
1218 struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1220 return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
1221 vchunk->vregion->region,
1222 ventry->entry, rulei);
1226 mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
1227 struct mlxsw_sp_acl_tcam_ventry *ventry,
1230 return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
1231 ventry->entry, activity);
1235 mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
1236 struct mlxsw_sp_acl_tcam_ventry *ventry,
1237 struct mlxsw_sp_acl_tcam_chunk *chunk,
1240 struct mlxsw_sp_acl_tcam_entry *new_entry;
1242 /* First check if the entry is not already where we want it to be. */
1243 if (ventry->entry->chunk == chunk)
1246 if (--(*credits) < 0)
1249 new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk);
1250 if (IS_ERR(new_entry))
1251 return PTR_ERR(new_entry);
1252 mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1253 ventry->entry = new_entry;
1258 mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
1259 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1260 struct mlxsw_sp_acl_tcam_region *region,
1261 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1263 struct mlxsw_sp_acl_tcam_chunk *new_chunk;
1265 new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
1266 if (IS_ERR(new_chunk))
1267 return PTR_ERR(new_chunk);
1268 vchunk->chunk2 = vchunk->chunk;
1269 vchunk->chunk = new_chunk;
1270 ctx->current_vchunk = vchunk;
1271 ctx->start_ventry = NULL;
1272 ctx->stop_ventry = NULL;
1277 mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
1278 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1279 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1281 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1282 vchunk->chunk2 = NULL;
1283 ctx->current_vchunk = NULL;
1287 mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1288 struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1289 struct mlxsw_sp_acl_tcam_region *region,
1290 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1293 struct mlxsw_sp_acl_tcam_ventry *ventry;
1296 if (vchunk->chunk->region != region) {
1297 err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk,
1301 } else if (!vchunk->chunk2) {
1302 /* The chunk is already as it should be, nothing to do. */
1306 /* If the migration got interrupted, we have the ventry to start from
1307 * stored in context.
1309 if (ctx->start_ventry)
1310 ventry = ctx->start_ventry;
1312 ventry = list_first_entry(&vchunk->ventry_list,
1313 typeof(*ventry), list);
1315 list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
1316 /* During rollback, once we reach the ventry that failed
1317 * to migrate, we are done.
1319 if (ventry == ctx->stop_ventry)
1322 err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
1323 vchunk->chunk, credits);
1325 if (ctx->this_is_rollback) {
1326 /* Save the ventry which we ended with and try
1327 * to continue later on.
1329 ctx->start_ventry = ventry;
1332 /* Swap the chunk and chunk2 pointers so the follow-up
1333 * rollback call will see the original chunk pointer
1336 swap(vchunk->chunk, vchunk->chunk2);
1337 /* The rollback has to be done from beginning of the
1338 * chunk, that is why we have to null the start_ventry.
1339 * However, we know where to stop the rollback,
1340 * at the current ventry.
1342 ctx->start_ventry = NULL;
1343 ctx->stop_ventry = ventry;
1345 } else if (*credits < 0) {
1346 /* We are out of credits, the rest of the ventries
1347 * will be migrated later. Save the ventry
1348 * which we ended with.
1350 ctx->start_ventry = ventry;
1355 mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
1360 mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1361 struct mlxsw_sp_acl_tcam_vregion *vregion,
1362 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1365 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1368 /* If the migration got interrupted, we have the vchunk
1369 * we are working on stored in context.
1371 if (ctx->current_vchunk)
1372 vchunk = ctx->current_vchunk;
1374 vchunk = list_first_entry(&vregion->vchunk_list,
1375 typeof(*vchunk), list);
1377 list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) {
1378 err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
1381 if (err || *credits < 0)
1388 mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1389 struct mlxsw_sp_acl_tcam_vregion *vregion,
1390 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1395 trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
1396 mutex_lock(&vregion->lock);
1397 err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1400 /* In case migration was not successful, we need to swap
1401 * so the original region pointer is assigned again
1402 * to vregion->region.
1404 swap(vregion->region, vregion->region2);
1405 ctx->current_vchunk = NULL;
1406 ctx->this_is_rollback = true;
1407 err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1410 trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
1412 dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
1413 /* Let the rollback to be continued later on. */
1416 mutex_unlock(&vregion->lock);
1417 trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
1422 mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1424 return ctx->hints_priv;
1428 mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
1429 struct mlxsw_sp_acl_tcam_vregion *vregion,
1430 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1432 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1433 unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
1434 struct mlxsw_sp_acl_tcam_region *new_region;
1438 trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
1440 hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
1441 if (IS_ERR(hints_priv))
1442 return PTR_ERR(hints_priv);
1444 new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
1445 vregion, hints_priv);
1446 if (IS_ERR(new_region)) {
1447 err = PTR_ERR(new_region);
1448 goto err_region_create;
1451 /* vregion->region contains the pointer to the new region
1452 * we are going to migrate to.
1454 vregion->region2 = vregion->region;
1455 vregion->region = new_region;
1456 err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
1457 vregion->region2->group,
1458 new_region, priority,
1461 goto err_group_region_attach;
1463 ctx->hints_priv = hints_priv;
1464 ctx->this_is_rollback = false;
1468 err_group_region_attach:
1469 vregion->region = vregion->region2;
1470 vregion->region2 = NULL;
1471 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region);
1473 ops->region_rehash_hints_put(hints_priv);
1478 mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
1479 struct mlxsw_sp_acl_tcam_vregion *vregion,
1480 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1482 struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
1483 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1485 vregion->region2 = NULL;
1486 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
1487 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
1488 ops->region_rehash_hints_put(ctx->hints_priv);
1489 ctx->hints_priv = NULL;
1493 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
1494 struct mlxsw_sp_acl_tcam_vregion *vregion,
1497 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
1500 /* Check if the previous rehash work was interrupted
1501 * which means we have to continue it now.
1502 * If not, start a new rehash.
1504 if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) {
1505 err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp,
1509 dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
1514 err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
1517 dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
1521 mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
1524 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
1525 MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
1526 MLXSW_AFK_ELEMENT_DMAC_32_47,
1527 MLXSW_AFK_ELEMENT_DMAC_0_31,
1528 MLXSW_AFK_ELEMENT_SMAC_32_47,
1529 MLXSW_AFK_ELEMENT_SMAC_0_31,
1530 MLXSW_AFK_ELEMENT_ETHERTYPE,
1531 MLXSW_AFK_ELEMENT_IP_PROTO,
1532 MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1533 MLXSW_AFK_ELEMENT_DST_IP_0_31,
1534 MLXSW_AFK_ELEMENT_DST_L4_PORT,
1535 MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1536 MLXSW_AFK_ELEMENT_VID,
1537 MLXSW_AFK_ELEMENT_PCP,
1538 MLXSW_AFK_ELEMENT_TCP_FLAGS,
1539 MLXSW_AFK_ELEMENT_IP_TTL_,
1540 MLXSW_AFK_ELEMENT_IP_ECN,
1541 MLXSW_AFK_ELEMENT_IP_DSCP,
1544 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
1545 MLXSW_AFK_ELEMENT_ETHERTYPE,
1546 MLXSW_AFK_ELEMENT_IP_PROTO,
1547 MLXSW_AFK_ELEMENT_SRC_IP_96_127,
1548 MLXSW_AFK_ELEMENT_SRC_IP_64_95,
1549 MLXSW_AFK_ELEMENT_SRC_IP_32_63,
1550 MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1551 MLXSW_AFK_ELEMENT_DST_IP_96_127,
1552 MLXSW_AFK_ELEMENT_DST_IP_64_95,
1553 MLXSW_AFK_ELEMENT_DST_IP_32_63,
1554 MLXSW_AFK_ELEMENT_DST_IP_0_31,
1555 MLXSW_AFK_ELEMENT_DST_L4_PORT,
1556 MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1559 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
1561 .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1562 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1565 .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1566 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1570 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1571 ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1573 struct mlxsw_sp_acl_tcam_flower_ruleset {
1574 struct mlxsw_sp_acl_tcam_vgroup vgroup;
1577 struct mlxsw_sp_acl_tcam_flower_rule {
1578 struct mlxsw_sp_acl_tcam_ventry ventry;
1582 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1583 struct mlxsw_sp_acl_tcam *tcam,
1585 struct mlxsw_afk_element_usage *tmplt_elusage)
1587 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1589 return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1590 mlxsw_sp_acl_tcam_patterns,
1591 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1592 tmplt_elusage, true);
1596 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1599 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1601 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1605 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1607 struct mlxsw_sp_port *mlxsw_sp_port,
1610 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1612 return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
1613 mlxsw_sp_port, ingress);
1617 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1619 struct mlxsw_sp_port *mlxsw_sp_port,
1622 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1624 mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
1625 mlxsw_sp_port, ingress);
1629 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
1631 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1633 return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1637 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1638 void *ruleset_priv, void *rule_priv,
1639 struct mlxsw_sp_acl_rule_info *rulei)
1641 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1642 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1644 return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1645 &rule->ventry, rulei);
1649 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1651 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1653 mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1657 mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1659 struct mlxsw_sp_acl_rule_info *rulei)
1665 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1666 void *rule_priv, bool *activity)
1668 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1670 return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1674 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1675 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1676 .ruleset_add = mlxsw_sp_acl_tcam_flower_ruleset_add,
1677 .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del,
1678 .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
1679 .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1680 .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
1681 .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1682 .rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
1683 .rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
1684 .rule_action_replace = mlxsw_sp_acl_tcam_flower_rule_action_replace,
1685 .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
1688 struct mlxsw_sp_acl_tcam_mr_ruleset {
1689 struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1690 struct mlxsw_sp_acl_tcam_vgroup vgroup;
1693 struct mlxsw_sp_acl_tcam_mr_rule {
1694 struct mlxsw_sp_acl_tcam_ventry ventry;
1698 mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1699 struct mlxsw_sp_acl_tcam *tcam,
1701 struct mlxsw_afk_element_usage *tmplt_elusage)
1703 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1706 err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1707 mlxsw_sp_acl_tcam_patterns,
1708 MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1709 tmplt_elusage, false);
1713 /* For most of the TCAM clients it would make sense to take a tcam chunk
1714 * only when the first rule is written. This is not the case for
1715 * multicast router as it is required to bind the multicast router to a
1716 * specific ACL Group ID which must exist in HW before multicast router
1719 ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
1720 &ruleset->vgroup, 1,
1722 if (IS_ERR(ruleset->vchunk)) {
1723 err = PTR_ERR(ruleset->vchunk);
1730 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1735 mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
1737 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1739 mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
1740 mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1744 mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1745 struct mlxsw_sp_port *mlxsw_sp_port,
1748 /* Binding is done when initializing multicast router */
1753 mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1755 struct mlxsw_sp_port *mlxsw_sp_port,
1761 mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
1763 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1765 return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1769 mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1771 struct mlxsw_sp_acl_rule_info *rulei)
1773 struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1774 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1776 return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1777 &rule->ventry, rulei);
1781 mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1783 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1785 mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1789 mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1791 struct mlxsw_sp_acl_rule_info *rulei)
1793 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1795 return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
1800 mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1801 void *rule_priv, bool *activity)
1803 struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1805 return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1809 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
1810 .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
1811 .ruleset_add = mlxsw_sp_acl_tcam_mr_ruleset_add,
1812 .ruleset_del = mlxsw_sp_acl_tcam_mr_ruleset_del,
1813 .ruleset_bind = mlxsw_sp_acl_tcam_mr_ruleset_bind,
1814 .ruleset_unbind = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
1815 .ruleset_group_id = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1816 .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
1817 .rule_add = mlxsw_sp_acl_tcam_mr_rule_add,
1818 .rule_del = mlxsw_sp_acl_tcam_mr_rule_del,
1819 .rule_action_replace = mlxsw_sp_acl_tcam_mr_rule_action_replace,
1820 .rule_activity_get = mlxsw_sp_acl_tcam_mr_rule_activity_get,
1823 static const struct mlxsw_sp_acl_profile_ops *
1824 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1825 [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1826 [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
1829 const struct mlxsw_sp_acl_profile_ops *
1830 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1831 enum mlxsw_sp_acl_profile profile)
1833 const struct mlxsw_sp_acl_profile_ops *ops;
1835 if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1837 ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];