2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Neither the names of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") version 2 as published by the Free
18 * Software Foundation.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <linux/module.h>
34 #include <linux/pid.h>
35 #include <linux/pid_namespace.h>
36 #include <net/netlink.h>
37 #include <rdma/rdma_cm.h>
38 #include <rdma/rdma_netlink.h>
40 #include "core_priv.h"
43 static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
44 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
45 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING,
46 .len = IB_DEVICE_NAME_MAX - 1},
47 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 },
48 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING,
49 .len = IB_FW_VERSION_NAME_MAX - 1},
50 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 },
51 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
52 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
53 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 },
54 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
55 [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 },
56 [RDMA_NLDEV_ATTR_PORT_STATE] = { .type = NLA_U8 },
57 [RDMA_NLDEV_ATTR_PORT_PHYS_STATE] = { .type = NLA_U8 },
58 [RDMA_NLDEV_ATTR_DEV_NODE_TYPE] = { .type = NLA_U8 },
59 [RDMA_NLDEV_ATTR_RES_SUMMARY] = { .type = NLA_NESTED },
60 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY] = { .type = NLA_NESTED },
61 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME] = { .type = NLA_NUL_STRING,
63 [RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR] = { .type = NLA_U64 },
64 [RDMA_NLDEV_ATTR_RES_QP] = { .type = NLA_NESTED },
65 [RDMA_NLDEV_ATTR_RES_QP_ENTRY] = { .type = NLA_NESTED },
66 [RDMA_NLDEV_ATTR_RES_LQPN] = { .type = NLA_U32 },
67 [RDMA_NLDEV_ATTR_RES_RQPN] = { .type = NLA_U32 },
68 [RDMA_NLDEV_ATTR_RES_RQ_PSN] = { .type = NLA_U32 },
69 [RDMA_NLDEV_ATTR_RES_SQ_PSN] = { .type = NLA_U32 },
70 [RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE] = { .type = NLA_U8 },
71 [RDMA_NLDEV_ATTR_RES_TYPE] = { .type = NLA_U8 },
72 [RDMA_NLDEV_ATTR_RES_STATE] = { .type = NLA_U8 },
73 [RDMA_NLDEV_ATTR_RES_PID] = { .type = NLA_U32 },
74 [RDMA_NLDEV_ATTR_RES_KERN_NAME] = { .type = NLA_NUL_STRING,
75 .len = TASK_COMM_LEN },
76 [RDMA_NLDEV_ATTR_RES_CM_ID] = { .type = NLA_NESTED },
77 [RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY] = { .type = NLA_NESTED },
78 [RDMA_NLDEV_ATTR_RES_PS] = { .type = NLA_U32 },
79 [RDMA_NLDEV_ATTR_RES_SRC_ADDR] = {
80 .len = sizeof(struct __kernel_sockaddr_storage) },
81 [RDMA_NLDEV_ATTR_RES_DST_ADDR] = {
82 .len = sizeof(struct __kernel_sockaddr_storage) },
83 [RDMA_NLDEV_ATTR_RES_CQ] = { .type = NLA_NESTED },
84 [RDMA_NLDEV_ATTR_RES_CQ_ENTRY] = { .type = NLA_NESTED },
85 [RDMA_NLDEV_ATTR_RES_CQE] = { .type = NLA_U32 },
86 [RDMA_NLDEV_ATTR_RES_USECNT] = { .type = NLA_U64 },
87 [RDMA_NLDEV_ATTR_RES_POLL_CTX] = { .type = NLA_U8 },
88 [RDMA_NLDEV_ATTR_RES_MR] = { .type = NLA_NESTED },
89 [RDMA_NLDEV_ATTR_RES_MR_ENTRY] = { .type = NLA_NESTED },
90 [RDMA_NLDEV_ATTR_RES_RKEY] = { .type = NLA_U32 },
91 [RDMA_NLDEV_ATTR_RES_LKEY] = { .type = NLA_U32 },
92 [RDMA_NLDEV_ATTR_RES_IOVA] = { .type = NLA_U64 },
93 [RDMA_NLDEV_ATTR_RES_MRLEN] = { .type = NLA_U64 },
94 [RDMA_NLDEV_ATTR_RES_PD] = { .type = NLA_NESTED },
95 [RDMA_NLDEV_ATTR_RES_PD_ENTRY] = { .type = NLA_NESTED },
96 [RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY] = { .type = NLA_U32 },
97 [RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY] = { .type = NLA_U32 },
100 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
102 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
104 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
110 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
112 char fw[IB_FW_VERSION_NAME_MAX];
114 if (fill_nldev_handle(msg, device))
117 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
120 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
121 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
122 device->attrs.device_cap_flags, 0))
125 ib_get_device_fw_str(device, fw);
126 /* Device without FW has strlen(fw) */
127 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
130 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
131 be64_to_cpu(device->node_guid), 0))
133 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
134 be64_to_cpu(device->attrs.sys_image_guid), 0))
136 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type))
141 static int fill_port_info(struct sk_buff *msg,
142 struct ib_device *device, u32 port)
144 struct ib_port_attr attr;
147 if (fill_nldev_handle(msg, device))
150 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
153 ret = ib_query_port(device, port, &attr);
157 BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
158 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
159 (u64)attr.port_cap_flags, 0))
161 if (rdma_protocol_ib(device, port) &&
162 nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
163 attr.subnet_prefix, 0))
165 if (rdma_protocol_ib(device, port)) {
166 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
168 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
170 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
173 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state))
175 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state))
180 static int fill_res_info_entry(struct sk_buff *msg,
181 const char *name, u64 curr)
183 struct nlattr *entry_attr;
185 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
189 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name))
191 if (nla_put_u64_64bit(msg,
192 RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, 0))
195 nla_nest_end(msg, entry_attr);
199 nla_nest_cancel(msg, entry_attr);
203 static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
205 static const char * const names[RDMA_RESTRACK_MAX] = {
206 [RDMA_RESTRACK_PD] = "pd",
207 [RDMA_RESTRACK_CQ] = "cq",
208 [RDMA_RESTRACK_QP] = "qp",
209 [RDMA_RESTRACK_CM_ID] = "cm_id",
210 [RDMA_RESTRACK_MR] = "mr",
213 struct rdma_restrack_root *res = &device->res;
214 struct nlattr *table_attr;
217 if (fill_nldev_handle(msg, device))
220 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
224 for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
227 curr = rdma_restrack_count(res, i, task_active_pid_ns(current));
228 ret = fill_res_info_entry(msg, names[i], curr);
233 nla_nest_end(msg, table_attr);
237 nla_nest_cancel(msg, table_attr);
241 static int fill_res_name_pid(struct sk_buff *msg,
242 struct rdma_restrack_entry *res)
245 * For user resources, user is should read /proc/PID/comm to get the
246 * name of the task file.
248 if (rdma_is_kernel_res(res)) {
249 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME,
253 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID,
254 task_pid_vnr(res->task)))
260 static int fill_res_qp_entry(struct sk_buff *msg, struct netlink_callback *cb,
261 struct rdma_restrack_entry *res, uint32_t port)
263 struct ib_qp *qp = container_of(res, struct ib_qp, res);
264 struct ib_qp_init_attr qp_init_attr;
265 struct nlattr *entry_attr;
266 struct ib_qp_attr qp_attr;
269 ret = ib_query_qp(qp, &qp_attr, 0, &qp_init_attr);
273 if (port && port != qp_attr.port_num)
276 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY);
280 /* In create_qp() port is not set yet */
281 if (qp_attr.port_num &&
282 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp_attr.port_num))
285 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num))
287 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC) {
288 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN,
289 qp_attr.dest_qp_num))
291 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN,
296 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn))
299 if (qp->qp_type == IB_QPT_RC || qp->qp_type == IB_QPT_UC ||
300 qp->qp_type == IB_QPT_XRC_INI || qp->qp_type == IB_QPT_XRC_TGT) {
301 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE,
302 qp_attr.path_mig_state))
305 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type))
307 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state))
310 if (fill_res_name_pid(msg, res))
313 nla_nest_end(msg, entry_attr);
317 nla_nest_cancel(msg, entry_attr);
322 static int fill_res_cm_id_entry(struct sk_buff *msg,
323 struct netlink_callback *cb,
324 struct rdma_restrack_entry *res, uint32_t port)
326 struct rdma_id_private *id_priv =
327 container_of(res, struct rdma_id_private, res);
328 struct rdma_cm_id *cm_id = &id_priv->id;
329 struct nlattr *entry_attr;
331 if (port && port != cm_id->port_num)
334 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CM_ID_ENTRY);
338 if (cm_id->port_num &&
339 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num))
342 if (id_priv->qp_num) {
343 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num))
345 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type))
349 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps))
352 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state))
355 if (cm_id->route.addr.src_addr.ss_family &&
356 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR,
357 sizeof(cm_id->route.addr.src_addr),
358 &cm_id->route.addr.src_addr))
360 if (cm_id->route.addr.dst_addr.ss_family &&
361 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR,
362 sizeof(cm_id->route.addr.dst_addr),
363 &cm_id->route.addr.dst_addr))
366 if (fill_res_name_pid(msg, res))
369 nla_nest_end(msg, entry_attr);
373 nla_nest_cancel(msg, entry_attr);
378 static int fill_res_cq_entry(struct sk_buff *msg, struct netlink_callback *cb,
379 struct rdma_restrack_entry *res, uint32_t port)
381 struct ib_cq *cq = container_of(res, struct ib_cq, res);
382 struct nlattr *entry_attr;
384 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_CQ_ENTRY);
388 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
390 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
391 atomic_read(&cq->usecnt), 0))
394 /* Poll context is only valid for kernel CQs */
395 if (rdma_is_kernel_res(res) &&
396 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx))
399 if (fill_res_name_pid(msg, res))
402 nla_nest_end(msg, entry_attr);
406 nla_nest_cancel(msg, entry_attr);
411 static int fill_res_mr_entry(struct sk_buff *msg, struct netlink_callback *cb,
412 struct rdma_restrack_entry *res, uint32_t port)
414 struct ib_mr *mr = container_of(res, struct ib_mr, res);
415 struct nlattr *entry_attr;
417 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_MR_ENTRY);
421 if (netlink_capable(cb->skb, CAP_NET_ADMIN)) {
422 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey))
424 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey))
426 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_IOVA,
431 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, 0))
434 if (fill_res_name_pid(msg, res))
437 nla_nest_end(msg, entry_attr);
441 nla_nest_cancel(msg, entry_attr);
446 static int fill_res_pd_entry(struct sk_buff *msg, struct netlink_callback *cb,
447 struct rdma_restrack_entry *res, uint32_t port)
449 struct ib_pd *pd = container_of(res, struct ib_pd, res);
450 struct nlattr *entry_attr;
452 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_PD_ENTRY);
456 if (netlink_capable(cb->skb, CAP_NET_ADMIN)) {
457 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY,
460 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
461 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
462 pd->unsafe_global_rkey))
465 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT,
466 atomic_read(&pd->usecnt), 0))
468 if ((pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) &&
469 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY,
470 pd->unsafe_global_rkey))
473 if (fill_res_name_pid(msg, res))
476 nla_nest_end(msg, entry_attr);
480 nla_nest_cancel(msg, entry_attr);
485 static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
486 struct netlink_ext_ack *extack)
488 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
489 struct ib_device *device;
494 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
495 nldev_policy, extack);
496 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
499 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
501 device = ib_device_get_by_index(index);
505 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
511 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
512 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
515 err = fill_dev_info(msg, device);
521 put_device(&device->dev);
522 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
527 put_device(&device->dev);
531 static int _nldev_get_dumpit(struct ib_device *device,
533 struct netlink_callback *cb,
536 int start = cb->args[0];
537 struct nlmsghdr *nlh;
542 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
543 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
546 if (fill_dev_info(skb, device)) {
547 nlmsg_cancel(skb, nlh);
555 out: cb->args[0] = idx;
559 static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
562 * There is no need to take lock, because
563 * we are relying on ib_core's lists_rwsem
565 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
568 static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
569 struct netlink_ext_ack *extack)
571 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
572 struct ib_device *device;
578 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
579 nldev_policy, extack);
581 !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
582 !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
585 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
586 device = ib_device_get_by_index(index);
590 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
591 if (!rdma_is_port_valid(device, port)) {
596 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
602 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
603 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
606 err = fill_port_info(msg, device, port);
611 put_device(&device->dev);
613 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
618 put_device(&device->dev);
622 static int nldev_port_get_dumpit(struct sk_buff *skb,
623 struct netlink_callback *cb)
625 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
626 struct ib_device *device;
627 int start = cb->args[0];
628 struct nlmsghdr *nlh;
634 err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
636 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
639 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
640 device = ib_device_get_by_index(ifindex);
644 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
646 * The dumpit function returns all information from specific
647 * index. This specific index is taken from the netlink
648 * messages request sent by user and it is available
651 * Usually, the user doesn't fill this field and it causes
652 * to return everything.
660 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
662 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
663 RDMA_NLDEV_CMD_PORT_GET),
666 if (fill_port_info(skb, device, p)) {
667 nlmsg_cancel(skb, nlh);
675 put_device(&device->dev);
680 static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
681 struct netlink_ext_ack *extack)
683 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
684 struct ib_device *device;
689 ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
690 nldev_policy, extack);
691 if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
694 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
695 device = ib_device_get_by_index(index);
699 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
705 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
706 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
709 ret = fill_res_info(msg, device);
714 put_device(&device->dev);
715 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
720 put_device(&device->dev);
724 static int _nldev_res_get_dumpit(struct ib_device *device,
726 struct netlink_callback *cb,
729 int start = cb->args[0];
730 struct nlmsghdr *nlh;
735 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
736 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_RES_GET),
739 if (fill_res_info(skb, device)) {
740 nlmsg_cancel(skb, nlh);
753 static int nldev_res_get_dumpit(struct sk_buff *skb,
754 struct netlink_callback *cb)
756 return ib_enum_all_devs(_nldev_res_get_dumpit, skb, cb);
759 struct nldev_fill_res_entry {
760 int (*fill_res_func)(struct sk_buff *msg, struct netlink_callback *cb,
761 struct rdma_restrack_entry *res, u32 port);
762 enum rdma_nldev_attr nldev_attr;
763 enum rdma_nldev_command nldev_cmd;
766 static const struct nldev_fill_res_entry fill_entries[RDMA_RESTRACK_MAX] = {
767 [RDMA_RESTRACK_QP] = {
768 .fill_res_func = fill_res_qp_entry,
769 .nldev_cmd = RDMA_NLDEV_CMD_RES_QP_GET,
770 .nldev_attr = RDMA_NLDEV_ATTR_RES_QP,
772 [RDMA_RESTRACK_CM_ID] = {
773 .fill_res_func = fill_res_cm_id_entry,
774 .nldev_cmd = RDMA_NLDEV_CMD_RES_CM_ID_GET,
775 .nldev_attr = RDMA_NLDEV_ATTR_RES_CM_ID,
777 [RDMA_RESTRACK_CQ] = {
778 .fill_res_func = fill_res_cq_entry,
779 .nldev_cmd = RDMA_NLDEV_CMD_RES_CQ_GET,
780 .nldev_attr = RDMA_NLDEV_ATTR_RES_CQ,
782 [RDMA_RESTRACK_MR] = {
783 .fill_res_func = fill_res_mr_entry,
784 .nldev_cmd = RDMA_NLDEV_CMD_RES_MR_GET,
785 .nldev_attr = RDMA_NLDEV_ATTR_RES_MR,
787 [RDMA_RESTRACK_PD] = {
788 .fill_res_func = fill_res_pd_entry,
789 .nldev_cmd = RDMA_NLDEV_CMD_RES_PD_GET,
790 .nldev_attr = RDMA_NLDEV_ATTR_RES_PD,
794 static int res_get_common_dumpit(struct sk_buff *skb,
795 struct netlink_callback *cb,
796 enum rdma_restrack_type res_type)
798 const struct nldev_fill_res_entry *fe = &fill_entries[res_type];
799 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
800 struct rdma_restrack_entry *res;
801 int err, ret = 0, idx = 0;
802 struct nlattr *table_attr;
803 struct ib_device *device;
804 int start = cb->args[0];
805 struct nlmsghdr *nlh;
809 err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
812 * Right now, we are expecting the device index to get res information,
813 * but it is possible to extend this code to return all devices in
814 * one shot by checking the existence of RDMA_NLDEV_ATTR_DEV_INDEX.
815 * if it doesn't exist, we will iterate over all devices.
817 * But it is not needed for now.
819 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
822 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
823 device = ib_device_get_by_index(index);
828 * If no PORT_INDEX is supplied, we will return all QPs from that device
830 if (tb[RDMA_NLDEV_ATTR_PORT_INDEX]) {
831 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
832 if (!rdma_is_port_valid(device, port)) {
838 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
839 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, fe->nldev_cmd),
842 if (fill_nldev_handle(skb, device)) {
847 table_attr = nla_nest_start(skb, fe->nldev_attr);
853 down_read(&device->res.rwsem);
854 hash_for_each_possible(device->res.hash, res, node, res_type) {
858 if ((rdma_is_kernel_res(res) &&
859 task_active_pid_ns(current) != &init_pid_ns) ||
860 (!rdma_is_kernel_res(res) && task_active_pid_ns(current) !=
861 task_active_pid_ns(res->task)))
863 * 1. Kern resources should be visible in init
865 * 2. Present only resources visible in the current
870 if (!rdma_restrack_get(res))
872 * Resource is under release now, but we are not
873 * relesing lock now, so it will be released in
874 * our next pass, once we will get ->next pointer.
880 up_read(&device->res.rwsem);
881 ret = fe->fill_res_func(skb, cb, res, port);
882 down_read(&device->res.rwsem);
884 * Return resource back, but it won't be released till
885 * the &device->res.rwsem will be released for write.
887 rdma_restrack_put(res);
889 if (ret == -EMSGSIZE)
891 * There is a chance to optimize here.
892 * It can be done by using list_prepare_entry
893 * and list_for_each_entry_continue afterwards.
900 up_read(&device->res.rwsem);
902 nla_nest_end(skb, table_attr);
907 * No more entries to fill, cancel the message and
908 * return 0 to mark end of dumpit.
913 put_device(&device->dev);
917 nla_nest_cancel(skb, table_attr);
918 up_read(&device->res.rwsem);
921 nlmsg_cancel(skb, nlh);
924 put_device(&device->dev);
928 static int nldev_res_get_qp_dumpit(struct sk_buff *skb,
929 struct netlink_callback *cb)
931 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_QP);
934 static int nldev_res_get_cm_id_dumpit(struct sk_buff *skb,
935 struct netlink_callback *cb)
937 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CM_ID);
940 static int nldev_res_get_cq_dumpit(struct sk_buff *skb,
941 struct netlink_callback *cb)
943 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_CQ);
946 static int nldev_res_get_mr_dumpit(struct sk_buff *skb,
947 struct netlink_callback *cb)
949 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_MR);
952 static int nldev_res_get_pd_dumpit(struct sk_buff *skb,
953 struct netlink_callback *cb)
955 return res_get_common_dumpit(skb, cb, RDMA_RESTRACK_PD);
958 static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
959 [RDMA_NLDEV_CMD_GET] = {
960 .doit = nldev_get_doit,
961 .dump = nldev_get_dumpit,
963 [RDMA_NLDEV_CMD_PORT_GET] = {
964 .doit = nldev_port_get_doit,
965 .dump = nldev_port_get_dumpit,
967 [RDMA_NLDEV_CMD_RES_GET] = {
968 .doit = nldev_res_get_doit,
969 .dump = nldev_res_get_dumpit,
971 [RDMA_NLDEV_CMD_RES_QP_GET] = {
972 .dump = nldev_res_get_qp_dumpit,
974 * .doit is not implemented yet for two reasons:
975 * 1. It is not needed yet.
976 * 2. There is a need to provide identifier, while it is easy
977 * for the QPs (device index + port index + LQPN), it is not
978 * the case for the rest of resources (PD and CQ). Because it
979 * is better to provide similar interface for all resources,
980 * let's wait till we will have other resources implemented
984 [RDMA_NLDEV_CMD_RES_CM_ID_GET] = {
985 .dump = nldev_res_get_cm_id_dumpit,
987 [RDMA_NLDEV_CMD_RES_CQ_GET] = {
988 .dump = nldev_res_get_cq_dumpit,
990 [RDMA_NLDEV_CMD_RES_MR_GET] = {
991 .dump = nldev_res_get_mr_dumpit,
993 [RDMA_NLDEV_CMD_RES_PD_GET] = {
994 .dump = nldev_res_get_pd_dumpit,
998 void __init nldev_init(void)
1000 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
1003 void __exit nldev_exit(void)
1005 rdma_nl_unregister(RDMA_NL_NLDEV);
1008 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_NLDEV, 5);