/* struct for callback API management */
struct mlx5e_async_ctx {
struct mlx5_async_work context;
- struct mlx5_async_ctx async_ctx;
+ struct mlx5_async_ctx *async_ctx;
struct mlx5e_ktls_offload_context_tx *priv_tx;
int err;
union {
};
};
-static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
+struct mlx5e_bulk_async_ctx {
+ struct mlx5_async_ctx async_ctx;
+ DECLARE_FLEX_ARRAY(struct mlx5e_async_ctx, arr);
+};
+
+static struct mlx5e_bulk_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
{
- struct mlx5e_async_ctx *bulk_async;
+ struct mlx5e_bulk_async_ctx *bulk_async;
+ int sz;
int i;
- bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
+ sz = struct_size(bulk_async, arr, n);
+ bulk_async = kvzalloc(sz, GFP_KERNEL);
if (!bulk_async)
return NULL;
- for (i = 0; i < n; i++) {
- struct mlx5e_async_ctx *async = &bulk_async[i];
+ mlx5_cmd_init_async_ctx(mdev, &bulk_async->async_ctx);
- mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
- }
+ for (i = 0; i < n; i++)
+ bulk_async->arr[i].async_ctx = &bulk_async->async_ctx;
return bulk_async;
}
-static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
+static void mlx5e_bulk_async_cleanup(struct mlx5e_bulk_async_ctx *bulk_async)
{
- int i;
-
- for (i = 0; i < n; i++) {
- struct mlx5e_async_ctx *async = &bulk_async[i];
-
- mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
- }
+ mlx5_cmd_cleanup_async_ctx(&bulk_async->async_ctx);
kvfree(bulk_async);
}
goto err_out;
} else {
async->priv_tx = priv_tx;
- err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
+ err = mlx5e_ktls_create_tis_cb(mdev, async->async_ctx,
async->out_create, sizeof(async->out_create),
create_tis_callback, &async->context);
if (err)
}
async->priv_tx = priv_tx;
mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
- &async->async_ctx,
+ async->async_ctx,
async->out_destroy, sizeof(async->out_destroy),
destroy_tis_callback, &async->context);
}
struct list_head *list, int size)
{
struct mlx5e_ktls_offload_context_tx *obj, *n;
- struct mlx5e_async_ctx *bulk_async;
+ struct mlx5e_bulk_async_ctx *bulk_async;
int i;
bulk_async = mlx5e_bulk_async_init(mdev, size);
i = 0;
list_for_each_entry_safe(obj, n, list, list_node) {
- mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
+ mlx5e_tls_priv_tx_cleanup(obj, &bulk_async->arr[i]);
i++;
}
- mlx5e_bulk_async_cleanup(bulk_async, size);
+ mlx5e_bulk_async_cleanup(bulk_async);
}
/* Recycling pool API */
struct mlx5e_tls_tx_pool *pool =
container_of(work, struct mlx5e_tls_tx_pool, create_work);
struct mlx5e_ktls_offload_context_tx *obj;
- struct mlx5e_async_ctx *bulk_async;
+ struct mlx5e_bulk_async_ctx *bulk_async;
LIST_HEAD(local_list);
int i, j, err = 0;
return;
for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
- obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
+ obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async->arr[i]);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
break;
}
for (j = 0; j < i; j++) {
- struct mlx5e_async_ctx *async = &bulk_async[j];
+ struct mlx5e_async_ctx *async = &bulk_async->arr[j];
if (!err && async->err)
err = async->err;
}
atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
- mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
+ mlx5e_bulk_async_cleanup(bulk_async);
if (err)
goto err_out;