#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
/* Wait until all page fault handlers using the mr complete. */
- if (mr->umem && mr->umem->is_odp)
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->mr_srcu);
#endif
return err;
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- bool odp_mkey_exist = false;
-#endif
struct mlx5_ib_mr *tmp_mr;
struct mlx5_ib_mr *mr;
LIST_HEAD(del_list);
break;
}
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (mr->umem && mr->umem->is_odp)
- odp_mkey_exist = true;
-#endif
list_move(&mr->list, &del_list);
ent->cur--;
ent->size--;
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (odp_mkey_exist)
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->mr_srcu);
#endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
{
struct mlx5_mr_cache *cache = &dev->cache;
struct mlx5_cache_ent *ent = &cache->ent[c];
- bool odp_mkey_exist = false;
struct mlx5_ib_mr *tmp_mr;
struct mlx5_ib_mr *mr;
LIST_HEAD(del_list);
break;
}
mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
- if (mr->umem && mr->umem->is_odp)
- odp_mkey_exist = true;
list_move(&mr->list, &del_list);
ent->cur--;
ent->size--;
}
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (odp_mkey_exist)
- synchronize_srcu(&dev->mr_srcu);
+ synchronize_srcu(&dev->mr_srcu);
#endif
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {