list_add_tail(&mr->list, &ent->head);
ent->available_mrs++;
ent->total_mrs++;
+ /*
+ * Creating is always done in response to some demand, so do not call
+ * queue_adjust_cache_locked().
+ */
spin_unlock_irqrestore(&ent->lock, flags);
if (!completion_done(&ent->compl))
return 0;
}
+/*
+ * Check if the bucket is outside the high/low water mark and schedule an async
+ * update. The cache refill has hysteresis, once the low water mark is hit it is
+ * refilled up to the high mark.
+ */
+static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
+{
+ lockdep_assert_held(&ent->lock);
+
+ if (ent->available_mrs < ent->limit ||
+ ent->available_mrs > 2 * ent->limit)
+ queue_work(ent->dev->cache.wq, &ent->work);
+}
+
static void __cache_work_func(struct mlx5_cache_ent *ent)
{
struct mlx5_ib_dev *dev = ent->dev;
list);
list_del(&mr->list);
ent->available_mrs--;
+ queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
- if (ent->available_mrs < ent->limit)
- queue_work(cache->wq, &ent->work);
return mr;
}
}
list);
list_del(&mr->list);
ent->available_mrs--;
+ queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
- if (ent->available_mrs < ent->limit)
- queue_work(dev->cache.wq, &ent->work);
break;
}
+ queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
-
- queue_work(dev->cache.wq, &ent->work);
}
if (!mr)
void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
struct mlx5_cache_ent *ent = mr->cache_ent;
- int shrink = 0;
if (!ent)
return;
if (mlx5_mr_cache_invalidate(mr)) {
detach_mr_from_cache(mr);
destroy_mkey(dev, mr);
- if (ent->available_mrs < ent->limit)
- queue_work(dev->cache.wq, &ent->work);
return;
}
spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head);
ent->available_mrs++;
- if (ent->available_mrs > 2 * ent->limit)
- shrink = 1;
+ queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
-
- if (shrink)
- queue_work(dev->cache.wq, &ent->work);
}
static void clean_keys(struct mlx5_ib_dev *dev, int c)
ent->limit = dev->mdev->profile->mr_cache[i].limit;
else
ent->limit = 0;
- queue_work(cache->wq, &ent->work);
+ spin_lock_irq(&ent->lock);
+ queue_adjust_cache_locked(ent);
+ spin_unlock_irq(&ent->lock);
}
mlx5_mr_cache_debugfs_init(dev);