net/mlx5: DR, Sync chunks only during free
authorYevgeny Kliteynik <kliteyn@nvidia.com>
Mon, 14 Sep 2020 12:06:49 +0000 (15:06 +0300)
committerSaeed Mahameed <saeedm@nvidia.com>
Thu, 5 Nov 2020 20:09:29 +0000 (12:09 -0800)
When freeing chunks, we want to sync the steering
so that all the "hot" memory will be written to ICM
and all the chunks that are in the hot_list will be
actually destroyed.
When allocating from the pool, we don't have a need
to sync the steering, as we're not freeing anything,
and sync might just hurt the performance in terms of
flow-per-second offloaded.

Signed-off-by: Erez Shitrit <erezsh@nvidia.com>
Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: Alex Vesker <valex@nvidia.com>
Reviewed-by: Mark Bloch <mbloch@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c

index 2c5886b..4d8330a 100644 (file)
@@ -332,10 +332,6 @@ static int dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool *pool,
        bool new_mem = false;
        int err;
 
-       /* Check if we have chunks that are waiting for sync-ste */
-       if (dr_icm_pool_is_sync_required(pool))
-               dr_icm_pool_sync_all_buddy_pools(pool);
-
 alloc_buddy_mem:
        /* find the next free place from the buddy list */
        list_for_each_entry(buddy_mem_pool, &pool->buddy_mem_list, list_node) {
@@ -409,12 +405,18 @@ out:
 void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
 {
        struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+       struct mlx5dr_icm_pool *pool = buddy->pool;
 
        /* move the memory to the waiting list AKA "hot" */
-       mutex_lock(&buddy->pool->mutex);
+       mutex_lock(&pool->mutex);
        list_move_tail(&chunk->chunk_list, &buddy->hot_list);
        buddy->hot_memory_size += chunk->byte_size;
-       mutex_unlock(&buddy->pool->mutex);
+
+       /* Check if we have chunks that are waiting for sync-ste */
+       if (dr_icm_pool_is_sync_required(pool))
+               dr_icm_pool_sync_all_buddy_pools(pool);
+
+       mutex_unlock(&pool->mutex);
 }
 
 struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,