kfree(mr);
}
-/* MRs are dynamically allocated, so simply clean up and release the MR.
- * A replacement MR will subsequently be allocated on demand.
- */
-static void
-frwr_mr_recycle_worker(struct work_struct *work)
+static void frwr_mr_recycle(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
{
- struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
- struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
-
trace_xprtrdma_mr_recycle(mr);
if (mr->mr_dir != DMA_NONE) {
frwr_release_mr(mr);
}
+/* MRs are dynamically allocated, so simply clean up and release the MR.
+ * A replacement MR will subsequently be allocated on demand.
+ */
+static void
+frwr_mr_recycle_worker(struct work_struct *work)
+{
+ struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr,
+ mr_recycle);
+
+ frwr_mr_recycle(mr->mr_xprt, mr);
+}
+
+/* frwr_recycle - Discard MRs
+ * @req: request to reset
+ *
+ * Used after a reconnect. These MRs could be in flight, we can't
+ * tell. Safe thing to do is release them.
+ */
+void frwr_recycle(struct rpcrdma_req *req)
+{
+ struct rpcrdma_mr *mr;
+
+ while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
+ frwr_mr_recycle(mr->mr_xprt, mr);
+}
+
/* frwr_reset - Place MRs back on the free list
* @req: request to reset
*
* chunks. Very likely the connection has been replaced,
* so these registrations are invalid and unusable.
*/
- frwr_reset(req);
+ frwr_recycle(req);
/* This implementation supports the following combinations
* of chunk lists in one RPC-over-RDMA Call message:
/* Memory registration calls xprtrdma/frwr_ops.c
*/
bool frwr_is_supported(struct ib_device *device);
+void frwr_recycle(struct rpcrdma_req *req);
void frwr_reset(struct rpcrdma_req *req);
int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep);
int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr);