1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5 #include "mlx5_ifc_dr_ste_v1.h"
7 enum dr_ptrn_modify_hdr_action_id {
8 DR_PTRN_MODIFY_HDR_ACTION_ID_NOP = 0x00,
9 DR_PTRN_MODIFY_HDR_ACTION_ID_COPY = 0x05,
10 DR_PTRN_MODIFY_HDR_ACTION_ID_SET = 0x06,
11 DR_PTRN_MODIFY_HDR_ACTION_ID_ADD = 0x07,
12 DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE = 0x0a,
15 struct mlx5dr_ptrn_mgr {
16 struct mlx5dr_domain *dmn;
17 struct mlx5dr_icm_pool *ptrn_icm_pool;
18 /* cache for modify_header ptrn */
19 struct list_head ptrn_list;
20 struct mutex modify_hdr_mutex; /* protect the pattern cache */
23 /* Cache structure and functions */
24 static bool dr_ptrn_compare_modify_hdr(size_t cur_num_of_actions,
25 __be64 cur_hw_actions[],
26 size_t num_of_actions,
31 if (cur_num_of_actions != num_of_actions)
34 for (i = 0; i < num_of_actions; i++) {
36 MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id);
38 if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_COPY) {
39 if (hw_actions[i] != cur_hw_actions[i])
42 if ((__force __be32)hw_actions[i] !=
43 (__force __be32)cur_hw_actions[i])
51 static struct mlx5dr_ptrn_obj *
52 dr_ptrn_find_cached_pattern(struct mlx5dr_ptrn_mgr *mgr,
53 size_t num_of_actions,
56 struct mlx5dr_ptrn_obj *cached_pattern;
57 struct mlx5dr_ptrn_obj *tmp;
59 list_for_each_entry_safe(cached_pattern, tmp, &mgr->ptrn_list, list) {
60 if (dr_ptrn_compare_modify_hdr(cached_pattern->num_of_actions,
61 (__be64 *)cached_pattern->data,
64 /* Put this pattern in the head of the list,
65 * as we will probably use it more.
67 list_del_init(&cached_pattern->list);
68 list_add(&cached_pattern->list, &mgr->ptrn_list);
69 return cached_pattern;
76 static struct mlx5dr_ptrn_obj *
77 dr_ptrn_alloc_pattern(struct mlx5dr_ptrn_mgr *mgr,
78 u16 num_of_actions, u8 *data)
80 struct mlx5dr_ptrn_obj *pattern;
81 struct mlx5dr_icm_chunk *chunk;
85 chunk_size = ilog2(num_of_actions);
86 /* HW modify action index granularity is at least 64B */
87 chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8);
89 chunk = mlx5dr_icm_alloc_chunk(mgr->ptrn_icm_pool, chunk_size);
93 index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) -
94 mgr->dmn->info.caps.hdr_modify_pattern_icm_addr) /
95 DR_ACTION_CACHE_LINE_SIZE;
97 pattern = kzalloc(sizeof(*pattern), GFP_KERNEL);
101 pattern->data = kzalloc(num_of_actions * DR_MODIFY_ACTION_SIZE *
102 sizeof(*pattern->data), GFP_KERNEL);
106 memcpy(pattern->data, data, num_of_actions * DR_MODIFY_ACTION_SIZE);
107 pattern->chunk = chunk;
108 pattern->index = index;
109 pattern->num_of_actions = num_of_actions;
111 list_add(&pattern->list, &mgr->ptrn_list);
112 refcount_set(&pattern->refcount, 1);
119 mlx5dr_icm_free_chunk(chunk);
124 dr_ptrn_free_pattern(struct mlx5dr_ptrn_obj *pattern)
126 list_del(&pattern->list);
127 mlx5dr_icm_free_chunk(pattern->chunk);
128 kfree(pattern->data);
132 struct mlx5dr_ptrn_obj *
133 mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr,
137 struct mlx5dr_ptrn_obj *pattern;
142 mutex_lock(&mgr->modify_hdr_mutex);
143 pattern = dr_ptrn_find_cached_pattern(mgr,
147 /* Alloc and add new pattern to cache */
148 pattern = dr_ptrn_alloc_pattern(mgr, num_of_actions, data);
152 hw_actions = (u64 *)pattern->data;
153 /* Here we mask the pattern data to create a valid pattern
154 * since we do an OR operation between the arg and pattern
156 for (i = 0; i < num_of_actions; i++) {
157 action_id = MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id);
159 if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_SET ||
160 action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_ADD ||
161 action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE)
162 MLX5_SET(ste_double_action_set_v1, &hw_actions[i], inline_data, 0);
165 if (mlx5dr_send_postsend_pattern(mgr->dmn, pattern->chunk,
166 num_of_actions, pattern->data)) {
167 refcount_dec(&pattern->refcount);
171 refcount_inc(&pattern->refcount);
174 mutex_unlock(&mgr->modify_hdr_mutex);
179 dr_ptrn_free_pattern(pattern);
181 mutex_unlock(&mgr->modify_hdr_mutex);
186 mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr,
187 struct mlx5dr_ptrn_obj *pattern)
189 mutex_lock(&mgr->modify_hdr_mutex);
191 if (refcount_dec_and_test(&pattern->refcount))
192 dr_ptrn_free_pattern(pattern);
194 mutex_unlock(&mgr->modify_hdr_mutex);
197 struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn)
199 struct mlx5dr_ptrn_mgr *mgr;
201 if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
204 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
209 mgr->ptrn_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_HDR_PTRN);
210 if (!mgr->ptrn_icm_pool) {
211 mlx5dr_err(dmn, "Couldn't get modify-header-pattern memory\n");
215 INIT_LIST_HEAD(&mgr->ptrn_list);
216 mutex_init(&mgr->modify_hdr_mutex);
225 void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr)
227 struct mlx5dr_ptrn_obj *pattern;
228 struct mlx5dr_ptrn_obj *tmp;
233 WARN_ON(!list_empty(&mgr->ptrn_list));
235 list_for_each_entry_safe(pattern, tmp, &mgr->ptrn_list, list) {
236 list_del(&pattern->list);
237 kfree(pattern->data);
241 mlx5dr_icm_pool_destroy(mgr->ptrn_icm_pool);
242 mutex_destroy(&mgr->modify_hdr_mutex);