2 * Copyright (c) 2018 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/rdma_cm.h>
36 #include <rdma/restrack.h>
37 #include <uapi/rdma/rdma_netlink.h>
39 static int fill_sq(struct sk_buff *msg, struct t4_wq *wq)
42 if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid))
44 if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed))
46 if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize))
48 if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx))
50 if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx))
52 if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx))
54 if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx))
56 if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use))
58 if (rdma_nl_put_driver_u32(msg, "size", wq->sq.size))
60 if (rdma_nl_put_driver_u32_hex(msg, "flags", wq->sq.flags))
67 static int fill_rq(struct sk_buff *msg, struct t4_wq *wq)
70 if (rdma_nl_put_driver_u32(msg, "rqid", wq->rq.qid))
72 if (rdma_nl_put_driver_u32(msg, "memsize", wq->rq.memsize))
74 if (rdma_nl_put_driver_u32(msg, "cidx", wq->rq.cidx))
76 if (rdma_nl_put_driver_u32(msg, "pidx", wq->rq.pidx))
78 if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->rq.wq_pidx))
80 if (rdma_nl_put_driver_u32(msg, "msn", wq->rq.msn))
82 if (rdma_nl_put_driver_u32_hex(msg, "rqt_hwaddr", wq->rq.rqt_hwaddr))
84 if (rdma_nl_put_driver_u32(msg, "rqt_size", wq->rq.rqt_size))
86 if (rdma_nl_put_driver_u32(msg, "in_use", wq->rq.in_use))
88 if (rdma_nl_put_driver_u32(msg, "size", wq->rq.size))
95 static int fill_swsqe(struct sk_buff *msg, struct t4_sq *sq, u16 idx,
98 if (rdma_nl_put_driver_u32(msg, "idx", idx))
100 if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode))
102 if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete))
105 rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe)))
107 if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled))
109 if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed))
117 * Dump the first and last pending sqes.
119 static int fill_swsqes(struct sk_buff *msg, struct t4_sq *sq,
120 u16 first_idx, struct t4_swsqe *first_sqe,
121 u16 last_idx, struct t4_swsqe *last_sqe)
125 if (fill_swsqe(msg, sq, first_idx, first_sqe))
129 if (fill_swsqe(msg, sq, last_idx, last_sqe))
137 static int fill_res_qp_entry(struct sk_buff *msg,
138 struct rdma_restrack_entry *res)
140 struct ib_qp *ibqp = container_of(res, struct ib_qp, res);
141 struct t4_swsqe *fsp = NULL, *lsp = NULL;
142 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
143 u16 first_sq_idx = 0, last_sq_idx = 0;
144 struct t4_swsqe first_sqe, last_sqe;
145 struct nlattr *table_attr;
148 /* User qp state is not available, so don't dump user qps */
152 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
156 /* Get a consistent snapshot */
157 spin_lock_irq(&qhp->lock);
160 /* If there are any pending sqes, copy the first and last */
161 if (wq.sq.cidx != wq.sq.pidx) {
162 first_sq_idx = wq.sq.cidx;
163 first_sqe = qhp->wq.sq.sw_sq[first_sq_idx];
165 last_sq_idx = wq.sq.pidx;
166 if (last_sq_idx-- == 0)
167 last_sq_idx = wq.sq.size - 1;
168 if (last_sq_idx != first_sq_idx) {
169 last_sqe = qhp->wq.sq.sw_sq[last_sq_idx];
173 spin_unlock_irq(&qhp->lock);
175 if (fill_sq(msg, &wq))
176 goto err_cancel_table;
178 if (fill_swsqes(msg, &wq.sq, first_sq_idx, fsp, last_sq_idx, lsp))
179 goto err_cancel_table;
181 if (fill_rq(msg, &wq))
182 goto err_cancel_table;
184 nla_nest_end(msg, table_attr);
188 nla_nest_cancel(msg, table_attr);
194 struct c4iw_listen_ep lep;
198 static int fill_res_ep_entry(struct sk_buff *msg,
199 struct rdma_restrack_entry *res)
201 struct rdma_cm_id *cm_id = rdma_res_to_id(res);
202 struct nlattr *table_attr;
203 struct c4iw_ep_common *epcp;
204 struct c4iw_listen_ep *listen_ep = NULL;
205 struct c4iw_ep *ep = NULL;
206 struct iw_cm_id *iw_cm_id;
209 iw_cm_id = rdma_iw_cm_id(cm_id);
212 epcp = (struct c4iw_ep_common *)iw_cm_id->provider_data;
215 uep = kcalloc(1, sizeof(*uep), GFP_KERNEL);
219 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
223 /* Get a consistent snapshot */
224 mutex_lock(&epcp->mutex);
225 if (epcp->state == LISTEN) {
226 uep->lep = *(struct c4iw_listen_ep *)epcp;
227 mutex_unlock(&epcp->mutex);
228 listen_ep = &uep->lep;
229 epcp = &listen_ep->com;
231 uep->ep = *(struct c4iw_ep *)epcp;
232 mutex_unlock(&epcp->mutex);
237 if (rdma_nl_put_driver_u32(msg, "state", epcp->state))
238 goto err_cancel_table;
239 if (rdma_nl_put_driver_u64_hex(msg, "flags", epcp->flags))
240 goto err_cancel_table;
241 if (rdma_nl_put_driver_u64_hex(msg, "history", epcp->history))
242 goto err_cancel_table;
244 if (epcp->state == LISTEN) {
245 if (rdma_nl_put_driver_u32(msg, "stid", listen_ep->stid))
246 goto err_cancel_table;
247 if (rdma_nl_put_driver_u32(msg, "backlog", listen_ep->backlog))
248 goto err_cancel_table;
250 if (rdma_nl_put_driver_u32(msg, "hwtid", ep->hwtid))
251 goto err_cancel_table;
252 if (rdma_nl_put_driver_u32(msg, "ord", ep->ord))
253 goto err_cancel_table;
254 if (rdma_nl_put_driver_u32(msg, "ird", ep->ird))
255 goto err_cancel_table;
256 if (rdma_nl_put_driver_u32(msg, "emss", ep->emss))
257 goto err_cancel_table;
259 if (!ep->parent_ep && rdma_nl_put_driver_u32(msg, "atid",
261 goto err_cancel_table;
263 nla_nest_end(msg, table_attr);
268 nla_nest_cancel(msg, table_attr);
274 c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX] = {
275 [RDMA_RESTRACK_QP] = fill_res_qp_entry,
276 [RDMA_RESTRACK_CM_ID] = fill_res_ep_entry,