Revert "IB/mlx5: Fix long EEH recover time with NVMe offloads"
authorLeon Romanovsky <leonro@mellanox.com>
Wed, 26 Dec 2018 13:22:12 +0000 (15:22 +0200)
committerJason Gunthorpe <jgg@mellanox.com>
Wed, 2 Jan 2019 16:40:34 +0000 (09:40 -0700)
Longer term testing shows this patch didn't play well with MR cache and
caused to call traces during remove_mkeys().

This reverts commit bb7e22a8ab00ff9ba911a45ba8784cef9e6d6f7a.

Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/mlx5/mr.c

index 1bd8c1b..fd6ea1f 100644 (file)
@@ -73,8 +73,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
        /* Wait until all page fault handlers using the mr complete. */
-       if (mr->umem && mr->umem->is_odp)
-               synchronize_srcu(&dev->mr_srcu);
+       synchronize_srcu(&dev->mr_srcu);
 #endif
 
        return err;
@@ -238,9 +237,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
 {
        struct mlx5_mr_cache *cache = &dev->cache;
        struct mlx5_cache_ent *ent = &cache->ent[c];
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-       bool odp_mkey_exist = false;
-#endif
        struct mlx5_ib_mr *tmp_mr;
        struct mlx5_ib_mr *mr;
        LIST_HEAD(del_list);
@@ -253,10 +249,6 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
                        break;
                }
                mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-               if (mr->umem && mr->umem->is_odp)
-                       odp_mkey_exist = true;
-#endif
                list_move(&mr->list, &del_list);
                ent->cur--;
                ent->size--;
@@ -265,8 +257,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
        }
 
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-       if (odp_mkey_exist)
-               synchronize_srcu(&dev->mr_srcu);
+       synchronize_srcu(&dev->mr_srcu);
 #endif
 
        list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
@@ -581,7 +572,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
 {
        struct mlx5_mr_cache *cache = &dev->cache;
        struct mlx5_cache_ent *ent = &cache->ent[c];
-       bool odp_mkey_exist = false;
        struct mlx5_ib_mr *tmp_mr;
        struct mlx5_ib_mr *mr;
        LIST_HEAD(del_list);
@@ -594,8 +584,6 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
                        break;
                }
                mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
-               if (mr->umem && mr->umem->is_odp)
-                       odp_mkey_exist = true;
                list_move(&mr->list, &del_list);
                ent->cur--;
                ent->size--;
@@ -604,8 +592,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
        }
 
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-       if (odp_mkey_exist)
-               synchronize_srcu(&dev->mr_srcu);
+       synchronize_srcu(&dev->mr_srcu);
 #endif
 
        list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {