char *page, size_t len)
{
struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
- struct rtrs_srv_sess *sess = stats->sess;
- return scnprintf(page, len, "%lld %lld %lld %lld %u\n",
- (s64)atomic64_read(&r->dir[READ].cnt),
- (s64)atomic64_read(&r->dir[READ].size_total),
- (s64)atomic64_read(&r->dir[WRITE].cnt),
- (s64)atomic64_read(&r->dir[WRITE].size_total),
- atomic_read(&sess->ids_inflight));
+ return scnprintf(page, len, "%lld %lld %lld %lldn %u\n",
+ (s64)atomic64_read(&r->dir[READ].cnt),
+ (s64)atomic64_read(&r->dir[READ].size_total),
+ (s64)atomic64_read(&r->dir[WRITE].cnt),
+ (s64)atomic64_read(&r->dir[WRITE].size_total), 0);
}
struct rtrs_srv *srv = sess->srv;
int i;
- WARN_ON(atomic_read(&sess->ids_inflight));
if (sess->ops_ids) {
for (i = 0; i < srv->queue_depth; i++)
free_id(sess->ops_ids[i]);
.done = rtrs_srv_rdma_done
};
+static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref)
+{
+ struct rtrs_srv_sess *sess = container_of(ref, struct rtrs_srv_sess, ids_inflight_ref);
+
+ percpu_ref_exit(&sess->ids_inflight_ref);
+ complete(&sess->complete_done);
+}
+
static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess)
{
struct rtrs_srv *srv = sess->srv;
struct rtrs_srv_op *id;
- int i;
+ int i, ret;
sess->ops_ids = kcalloc(srv->queue_depth, sizeof(*sess->ops_ids),
GFP_KERNEL);
sess->ops_ids[i] = id;
}
- init_waitqueue_head(&sess->ids_waitq);
- atomic_set(&sess->ids_inflight, 0);
+
+ ret = percpu_ref_init(&sess->ids_inflight_ref,
+ rtrs_srv_inflight_ref_release, 0, GFP_KERNEL);
+ if (ret) {
+ pr_err("Percpu reference init failed\n");
+ goto err;
+ }
+ init_completion(&sess->complete_done);
return 0;
static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_sess *sess)
{
- atomic_inc(&sess->ids_inflight);
+ percpu_ref_get(&sess->ids_inflight_ref);
}
static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_sess *sess)
{
- if (atomic_dec_and_test(&sess->ids_inflight))
- wake_up(&sess->ids_waitq);
+ percpu_ref_put(&sess->ids_inflight_ref);
}
-static void rtrs_srv_wait_ops_ids(struct rtrs_srv_sess *sess)
-{
- wait_event(sess->ids_waitq, !atomic_read(&sess->ids_inflight));
-}
-
-
static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
rdma_disconnect(con->c.cm_id);
ib_drain_qp(con->c.qp);
}
- /* Wait for all inflights */
- rtrs_srv_wait_ops_ids(sess);
+
+ /*
+ * Degrade ref count to the usual model with a single shared
+ * atomic_t counter
+ */
+ percpu_ref_kill(&sess->ids_inflight_ref);
+
+ /* Wait for all completion */
+ wait_for_completion(&sess->complete_done);
/* Notify upper layer if we are the last path */
rtrs_srv_sess_down(sess);