2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
46 #include <rdma/rdma_user_cm.h>
47 #include <rdma/ib_marshall.h>
48 #include <rdma/rdma_cm.h>
49 #include <rdma/rdma_cm_ib.h>
51 MODULE_AUTHOR("Sean Hefty");
52 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
53 MODULE_LICENSE("Dual BSD/GPL");
55 static unsigned int max_backlog = 1024;
57 static struct ctl_table_header *ucma_ctl_table_hdr;
58 static ctl_table ucma_ctl_table[] = {
60 .procname = "max_backlog",
62 .maxlen = sizeof max_backlog,
64 .proc_handler = proc_dointvec,
72 struct list_head ctx_list;
73 struct list_head event_list;
74 wait_queue_head_t poll_wait;
79 struct completion comp;
84 struct ucma_file *file;
85 struct rdma_cm_id *cm_id;
88 struct list_head list;
89 struct list_head mc_list;
92 struct ucma_multicast {
93 struct ucma_context *ctx;
98 struct list_head list;
99 struct sockaddr_storage addr;
103 struct ucma_context *ctx;
104 struct ucma_multicast *mc;
105 struct list_head list;
106 struct rdma_cm_id *cm_id;
107 struct rdma_ucm_event_resp resp;
110 static DEFINE_MUTEX(mut);
111 static DEFINE_IDR(ctx_idr);
112 static DEFINE_IDR(multicast_idr);
114 static inline struct ucma_context *_ucma_find_context(int id,
115 struct ucma_file *file)
117 struct ucma_context *ctx;
119 ctx = idr_find(&ctx_idr, id);
121 ctx = ERR_PTR(-ENOENT);
122 else if (ctx->file != file)
123 ctx = ERR_PTR(-EINVAL);
127 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
129 struct ucma_context *ctx;
132 ctx = _ucma_find_context(id, file);
134 atomic_inc(&ctx->ref);
139 static void ucma_put_ctx(struct ucma_context *ctx)
141 if (atomic_dec_and_test(&ctx->ref))
142 complete(&ctx->comp);
145 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
147 struct ucma_context *ctx;
149 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
153 atomic_set(&ctx->ref, 1);
154 init_completion(&ctx->comp);
155 INIT_LIST_HEAD(&ctx->mc_list);
159 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
164 list_add_tail(&ctx->list, &file->ctx_list);
172 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
174 struct ucma_multicast *mc;
176 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
181 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
187 list_add_tail(&mc->list, &ctx->mc_list);
195 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
196 struct rdma_conn_param *src)
198 if (src->private_data_len)
199 memcpy(dst->private_data, src->private_data,
200 src->private_data_len);
201 dst->private_data_len = src->private_data_len;
202 dst->responder_resources =src->responder_resources;
203 dst->initiator_depth = src->initiator_depth;
204 dst->flow_control = src->flow_control;
205 dst->retry_count = src->retry_count;
206 dst->rnr_retry_count = src->rnr_retry_count;
208 dst->qp_num = src->qp_num;
211 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
212 struct rdma_ud_param *src)
214 if (src->private_data_len)
215 memcpy(dst->private_data, src->private_data,
216 src->private_data_len);
217 dst->private_data_len = src->private_data_len;
218 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
219 dst->qp_num = src->qp_num;
220 dst->qkey = src->qkey;
223 static void ucma_set_event_context(struct ucma_context *ctx,
224 struct rdma_cm_event *event,
225 struct ucma_event *uevent)
228 switch (event->event) {
229 case RDMA_CM_EVENT_MULTICAST_JOIN:
230 case RDMA_CM_EVENT_MULTICAST_ERROR:
231 uevent->mc = (struct ucma_multicast *)
232 event->param.ud.private_data;
233 uevent->resp.uid = uevent->mc->uid;
234 uevent->resp.id = uevent->mc->id;
237 uevent->resp.uid = ctx->uid;
238 uevent->resp.id = ctx->id;
243 static int ucma_event_handler(struct rdma_cm_id *cm_id,
244 struct rdma_cm_event *event)
246 struct ucma_event *uevent;
247 struct ucma_context *ctx = cm_id->context;
250 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
252 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
254 mutex_lock(&ctx->file->mut);
255 uevent->cm_id = cm_id;
256 ucma_set_event_context(ctx, event, uevent);
257 uevent->resp.event = event->event;
258 uevent->resp.status = event->status;
259 if (cm_id->qp_type == IB_QPT_UD)
260 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
262 ucma_copy_conn_event(&uevent->resp.param.conn,
265 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
272 } else if (!ctx->uid) {
274 * We ignore events for new connections until userspace has set
275 * their context. This can only happen if an error occurs on a
276 * new connection before the user accepts it. This is okay,
277 * since the accept will just fail later.
283 list_add_tail(&uevent->list, &ctx->file->event_list);
284 wake_up_interruptible(&ctx->file->poll_wait);
286 mutex_unlock(&ctx->file->mut);
290 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
291 int in_len, int out_len)
293 struct ucma_context *ctx;
294 struct rdma_ucm_get_event cmd;
295 struct ucma_event *uevent;
298 if (out_len < sizeof uevent->resp)
301 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
304 mutex_lock(&file->mut);
305 while (list_empty(&file->event_list)) {
306 mutex_unlock(&file->mut);
308 if (file->filp->f_flags & O_NONBLOCK)
311 if (wait_event_interruptible(file->poll_wait,
312 !list_empty(&file->event_list)))
315 mutex_lock(&file->mut);
318 uevent = list_entry(file->event_list.next, struct ucma_event, list);
320 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
321 ctx = ucma_alloc_ctx(file);
326 uevent->ctx->backlog++;
327 ctx->cm_id = uevent->cm_id;
328 ctx->cm_id->context = ctx;
329 uevent->resp.id = ctx->id;
332 if (copy_to_user((void __user *)(unsigned long)cmd.response,
333 &uevent->resp, sizeof uevent->resp)) {
338 list_del(&uevent->list);
339 uevent->ctx->events_reported++;
341 uevent->mc->events_reported++;
344 mutex_unlock(&file->mut);
348 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
352 *qp_type = IB_QPT_RC;
356 *qp_type = IB_QPT_UD;
359 *qp_type = cmd->qp_type;
366 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
367 int in_len, int out_len)
369 struct rdma_ucm_create_id cmd;
370 struct rdma_ucm_create_id_resp resp;
371 struct ucma_context *ctx;
372 enum ib_qp_type qp_type;
375 if (out_len < sizeof(resp))
378 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
381 ret = ucma_get_qp_type(&cmd, &qp_type);
385 mutex_lock(&file->mut);
386 ctx = ucma_alloc_ctx(file);
387 mutex_unlock(&file->mut);
392 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
393 if (IS_ERR(ctx->cm_id)) {
394 ret = PTR_ERR(ctx->cm_id);
399 if (copy_to_user((void __user *)(unsigned long)cmd.response,
400 &resp, sizeof(resp))) {
407 rdma_destroy_id(ctx->cm_id);
410 idr_remove(&ctx_idr, ctx->id);
416 static void ucma_cleanup_multicast(struct ucma_context *ctx)
418 struct ucma_multicast *mc, *tmp;
421 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
423 idr_remove(&multicast_idr, mc->id);
429 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
431 struct ucma_event *uevent, *tmp;
433 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
434 if (uevent->mc != mc)
437 list_del(&uevent->list);
443 * We cannot hold file->mut when calling rdma_destroy_id() or we can
444 * deadlock. We also acquire file->mut in ucma_event_handler(), and
445 * rdma_destroy_id() will wait until all callbacks have completed.
447 static int ucma_free_ctx(struct ucma_context *ctx)
450 struct ucma_event *uevent, *tmp;
453 /* No new events will be generated after destroying the id. */
454 rdma_destroy_id(ctx->cm_id);
456 ucma_cleanup_multicast(ctx);
458 /* Cleanup events not yet reported to the user. */
459 mutex_lock(&ctx->file->mut);
460 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
461 if (uevent->ctx == ctx)
462 list_move_tail(&uevent->list, &list);
464 list_del(&ctx->list);
465 mutex_unlock(&ctx->file->mut);
467 list_for_each_entry_safe(uevent, tmp, &list, list) {
468 list_del(&uevent->list);
469 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
470 rdma_destroy_id(uevent->cm_id);
474 events_reported = ctx->events_reported;
476 return events_reported;
479 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
480 int in_len, int out_len)
482 struct rdma_ucm_destroy_id cmd;
483 struct rdma_ucm_destroy_id_resp resp;
484 struct ucma_context *ctx;
487 if (out_len < sizeof(resp))
490 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
494 ctx = _ucma_find_context(cmd.id, file);
496 idr_remove(&ctx_idr, ctx->id);
503 wait_for_completion(&ctx->comp);
504 resp.events_reported = ucma_free_ctx(ctx);
506 if (copy_to_user((void __user *)(unsigned long)cmd.response,
507 &resp, sizeof(resp)))
513 static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
514 int in_len, int out_len)
516 struct rdma_ucm_bind_addr cmd;
517 struct ucma_context *ctx;
520 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
523 ctx = ucma_get_ctx(file, cmd.id);
527 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
532 static ssize_t ucma_resolve_addr(struct ucma_file *file,
533 const char __user *inbuf,
534 int in_len, int out_len)
536 struct rdma_ucm_resolve_addr cmd;
537 struct ucma_context *ctx;
540 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
543 ctx = ucma_get_ctx(file, cmd.id);
547 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
548 (struct sockaddr *) &cmd.dst_addr,
554 static ssize_t ucma_resolve_route(struct ucma_file *file,
555 const char __user *inbuf,
556 int in_len, int out_len)
558 struct rdma_ucm_resolve_route cmd;
559 struct ucma_context *ctx;
562 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
565 ctx = ucma_get_ctx(file, cmd.id);
569 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
574 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
575 struct rdma_route *route)
577 struct rdma_dev_addr *dev_addr;
579 resp->num_paths = route->num_paths;
580 switch (route->num_paths) {
582 dev_addr = &route->addr.dev_addr;
583 rdma_addr_get_dgid(dev_addr,
584 (union ib_gid *) &resp->ib_route[0].dgid);
585 rdma_addr_get_sgid(dev_addr,
586 (union ib_gid *) &resp->ib_route[0].sgid);
587 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
590 ib_copy_path_rec_to_user(&resp->ib_route[1],
591 &route->path_rec[1]);
594 ib_copy_path_rec_to_user(&resp->ib_route[0],
595 &route->path_rec[0]);
602 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
603 struct rdma_route *route)
605 struct rdma_dev_addr *dev_addr;
606 struct net_device *dev;
609 resp->num_paths = route->num_paths;
610 switch (route->num_paths) {
612 dev_addr = &route->addr.dev_addr;
613 dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
615 vid = rdma_vlan_dev_vlan_id(dev);
619 iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
620 dev_addr->dst_dev_addr, vid);
621 iboe_addr_get_sgid(dev_addr,
622 (union ib_gid *) &resp->ib_route[0].sgid);
623 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
626 ib_copy_path_rec_to_user(&resp->ib_route[1],
627 &route->path_rec[1]);
630 ib_copy_path_rec_to_user(&resp->ib_route[0],
631 &route->path_rec[0]);
638 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
639 struct rdma_route *route)
641 struct rdma_dev_addr *dev_addr;
643 dev_addr = &route->addr.dev_addr;
644 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
645 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
648 static ssize_t ucma_query_route(struct ucma_file *file,
649 const char __user *inbuf,
650 int in_len, int out_len)
652 struct rdma_ucm_query_route cmd;
653 struct rdma_ucm_query_route_resp resp;
654 struct ucma_context *ctx;
655 struct sockaddr *addr;
658 if (out_len < sizeof(resp))
661 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
664 ctx = ucma_get_ctx(file, cmd.id);
668 memset(&resp, 0, sizeof resp);
669 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
670 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
671 sizeof(struct sockaddr_in) :
672 sizeof(struct sockaddr_in6));
673 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
674 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
675 sizeof(struct sockaddr_in) :
676 sizeof(struct sockaddr_in6));
677 if (!ctx->cm_id->device)
680 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
681 resp.port_num = ctx->cm_id->port_num;
682 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
683 case RDMA_TRANSPORT_IB:
684 switch (rdma_port_get_link_layer(ctx->cm_id->device,
685 ctx->cm_id->port_num)) {
686 case IB_LINK_LAYER_INFINIBAND:
687 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
689 case IB_LINK_LAYER_ETHERNET:
690 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
696 case RDMA_TRANSPORT_IWARP:
697 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
704 if (copy_to_user((void __user *)(unsigned long)cmd.response,
705 &resp, sizeof(resp)))
712 static void ucma_copy_conn_param(struct rdma_conn_param *dst,
713 struct rdma_ucm_conn_param *src)
715 dst->private_data = src->private_data;
716 dst->private_data_len = src->private_data_len;
717 dst->responder_resources =src->responder_resources;
718 dst->initiator_depth = src->initiator_depth;
719 dst->flow_control = src->flow_control;
720 dst->retry_count = src->retry_count;
721 dst->rnr_retry_count = src->rnr_retry_count;
723 dst->qp_num = src->qp_num;
726 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
727 int in_len, int out_len)
729 struct rdma_ucm_connect cmd;
730 struct rdma_conn_param conn_param;
731 struct ucma_context *ctx;
734 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
737 if (!cmd.conn_param.valid)
740 ctx = ucma_get_ctx(file, cmd.id);
744 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
745 ret = rdma_connect(ctx->cm_id, &conn_param);
750 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
751 int in_len, int out_len)
753 struct rdma_ucm_listen cmd;
754 struct ucma_context *ctx;
757 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
760 ctx = ucma_get_ctx(file, cmd.id);
764 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
765 cmd.backlog : max_backlog;
766 ret = rdma_listen(ctx->cm_id, ctx->backlog);
771 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
772 int in_len, int out_len)
774 struct rdma_ucm_accept cmd;
775 struct rdma_conn_param conn_param;
776 struct ucma_context *ctx;
779 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
782 ctx = ucma_get_ctx(file, cmd.id);
786 if (cmd.conn_param.valid) {
787 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
788 mutex_lock(&file->mut);
789 ret = rdma_accept(ctx->cm_id, &conn_param);
792 mutex_unlock(&file->mut);
794 ret = rdma_accept(ctx->cm_id, NULL);
800 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
801 int in_len, int out_len)
803 struct rdma_ucm_reject cmd;
804 struct ucma_context *ctx;
807 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
810 ctx = ucma_get_ctx(file, cmd.id);
814 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
819 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
820 int in_len, int out_len)
822 struct rdma_ucm_disconnect cmd;
823 struct ucma_context *ctx;
826 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
829 ctx = ucma_get_ctx(file, cmd.id);
833 ret = rdma_disconnect(ctx->cm_id);
838 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
839 const char __user *inbuf,
840 int in_len, int out_len)
842 struct rdma_ucm_init_qp_attr cmd;
843 struct ib_uverbs_qp_attr resp;
844 struct ucma_context *ctx;
845 struct ib_qp_attr qp_attr;
848 if (out_len < sizeof(resp))
851 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
854 ctx = ucma_get_ctx(file, cmd.id);
858 resp.qp_attr_mask = 0;
859 memset(&qp_attr, 0, sizeof qp_attr);
860 qp_attr.qp_state = cmd.qp_state;
861 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
865 ib_copy_qp_attr_to_user(&resp, &qp_attr);
866 if (copy_to_user((void __user *)(unsigned long)cmd.response,
867 &resp, sizeof(resp)))
875 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
876 void *optval, size_t optlen)
881 case RDMA_OPTION_ID_TOS:
882 if (optlen != sizeof(u8)) {
886 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
888 case RDMA_OPTION_ID_REUSEADDR:
889 if (optlen != sizeof(int)) {
893 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
895 case RDMA_OPTION_ID_AFONLY:
896 if (optlen != sizeof(int)) {
900 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
909 static int ucma_set_ib_path(struct ucma_context *ctx,
910 struct ib_path_rec_data *path_data, size_t optlen)
912 struct ib_sa_path_rec sa_path;
913 struct rdma_cm_event event;
916 if (optlen % sizeof(*path_data))
919 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
920 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
921 IB_PATH_BIDIRECTIONAL))
928 ib_sa_unpack_path(path_data->path_rec, &sa_path);
929 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
933 memset(&event, 0, sizeof event);
934 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
935 return ucma_event_handler(ctx->cm_id, &event);
938 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
939 void *optval, size_t optlen)
944 case RDMA_OPTION_IB_PATH:
945 ret = ucma_set_ib_path(ctx, optval, optlen);
954 static int ucma_set_option_level(struct ucma_context *ctx, int level,
955 int optname, void *optval, size_t optlen)
961 ret = ucma_set_option_id(ctx, optname, optval, optlen);
964 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
973 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
974 int in_len, int out_len)
976 struct rdma_ucm_set_option cmd;
977 struct ucma_context *ctx;
981 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
984 ctx = ucma_get_ctx(file, cmd.id);
988 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
990 if (IS_ERR(optval)) {
991 ret = PTR_ERR(optval);
995 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1004 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1005 int in_len, int out_len)
1007 struct rdma_ucm_notify cmd;
1008 struct ucma_context *ctx;
1011 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1014 ctx = ucma_get_ctx(file, cmd.id);
1016 return PTR_ERR(ctx);
1018 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1023 static ssize_t ucma_join_multicast(struct ucma_file *file,
1024 const char __user *inbuf,
1025 int in_len, int out_len)
1027 struct rdma_ucm_join_mcast cmd;
1028 struct rdma_ucm_create_id_resp resp;
1029 struct ucma_context *ctx;
1030 struct ucma_multicast *mc;
1033 if (out_len < sizeof(resp))
1036 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1039 ctx = ucma_get_ctx(file, cmd.id);
1041 return PTR_ERR(ctx);
1043 mutex_lock(&file->mut);
1044 mc = ucma_alloc_multicast(ctx);
1051 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
1052 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
1057 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1058 &resp, sizeof(resp))) {
1063 mutex_unlock(&file->mut);
1068 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1069 ucma_cleanup_mc_events(mc);
1072 idr_remove(&multicast_idr, mc->id);
1074 list_del(&mc->list);
1077 mutex_unlock(&file->mut);
1082 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1083 const char __user *inbuf,
1084 int in_len, int out_len)
1086 struct rdma_ucm_destroy_id cmd;
1087 struct rdma_ucm_destroy_id_resp resp;
1088 struct ucma_multicast *mc;
1091 if (out_len < sizeof(resp))
1094 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1098 mc = idr_find(&multicast_idr, cmd.id);
1100 mc = ERR_PTR(-ENOENT);
1101 else if (mc->ctx->file != file)
1102 mc = ERR_PTR(-EINVAL);
1104 idr_remove(&multicast_idr, mc->id);
1105 atomic_inc(&mc->ctx->ref);
1114 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1115 mutex_lock(&mc->ctx->file->mut);
1116 ucma_cleanup_mc_events(mc);
1117 list_del(&mc->list);
1118 mutex_unlock(&mc->ctx->file->mut);
1120 ucma_put_ctx(mc->ctx);
1121 resp.events_reported = mc->events_reported;
1124 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1125 &resp, sizeof(resp)))
1131 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1133 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1134 if (file1 < file2) {
1135 mutex_lock(&file1->mut);
1136 mutex_lock(&file2->mut);
1138 mutex_lock(&file2->mut);
1139 mutex_lock(&file1->mut);
1143 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1145 if (file1 < file2) {
1146 mutex_unlock(&file2->mut);
1147 mutex_unlock(&file1->mut);
1149 mutex_unlock(&file1->mut);
1150 mutex_unlock(&file2->mut);
1154 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1156 struct ucma_event *uevent, *tmp;
1158 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1159 if (uevent->ctx == ctx)
1160 list_move_tail(&uevent->list, &file->event_list);
1163 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1164 const char __user *inbuf,
1165 int in_len, int out_len)
1167 struct rdma_ucm_migrate_id cmd;
1168 struct rdma_ucm_migrate_resp resp;
1169 struct ucma_context *ctx;
1171 struct ucma_file *cur_file;
1174 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1177 /* Get current fd to protect against it being closed */
1182 /* Validate current fd and prevent destruction of id. */
1183 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1189 cur_file = ctx->file;
1190 if (cur_file == new_file) {
1191 resp.events_reported = ctx->events_reported;
1196 * Migrate events between fd's, maintaining order, and avoiding new
1197 * events being added before existing events.
1199 ucma_lock_files(cur_file, new_file);
1202 list_move_tail(&ctx->list, &new_file->ctx_list);
1203 ucma_move_events(ctx, new_file);
1204 ctx->file = new_file;
1205 resp.events_reported = ctx->events_reported;
1208 ucma_unlock_files(cur_file, new_file);
1211 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1212 &resp, sizeof(resp)))
1221 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1222 const char __user *inbuf,
1223 int in_len, int out_len) = {
1224 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1225 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1226 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
1227 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1228 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
1229 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1230 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1231 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1232 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1233 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1234 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1235 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1236 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1237 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1238 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1239 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1240 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
1241 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1242 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id
1245 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1246 size_t len, loff_t *pos)
1248 struct ucma_file *file = filp->private_data;
1249 struct rdma_ucm_cmd_hdr hdr;
1252 if (len < sizeof(hdr))
1255 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1258 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1261 if (hdr.in + sizeof(hdr) > len)
1264 if (!ucma_cmd_table[hdr.cmd])
1267 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1274 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1276 struct ucma_file *file = filp->private_data;
1277 unsigned int mask = 0;
1279 poll_wait(filp, &file->poll_wait, wait);
1281 if (!list_empty(&file->event_list))
1282 mask = POLLIN | POLLRDNORM;
1288 * ucma_open() does not need the BKL:
1290 * - no global state is referred to;
1291 * - there is no ioctl method to race against;
1292 * - no further module initialization is required for open to work
1293 * after the device is registered.
1295 static int ucma_open(struct inode *inode, struct file *filp)
1297 struct ucma_file *file;
1299 file = kmalloc(sizeof *file, GFP_KERNEL);
1303 INIT_LIST_HEAD(&file->event_list);
1304 INIT_LIST_HEAD(&file->ctx_list);
1305 init_waitqueue_head(&file->poll_wait);
1306 mutex_init(&file->mut);
1308 filp->private_data = file;
1311 return nonseekable_open(inode, filp);
1314 static int ucma_close(struct inode *inode, struct file *filp)
1316 struct ucma_file *file = filp->private_data;
1317 struct ucma_context *ctx, *tmp;
1319 mutex_lock(&file->mut);
1320 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1321 mutex_unlock(&file->mut);
1324 idr_remove(&ctx_idr, ctx->id);
1328 mutex_lock(&file->mut);
1330 mutex_unlock(&file->mut);
1335 static const struct file_operations ucma_fops = {
1336 .owner = THIS_MODULE,
1338 .release = ucma_close,
1339 .write = ucma_write,
1341 .llseek = no_llseek,
1344 static struct miscdevice ucma_misc = {
1345 .minor = MISC_DYNAMIC_MINOR,
1347 .nodename = "infiniband/rdma_cm",
1352 static ssize_t show_abi_version(struct device *dev,
1353 struct device_attribute *attr,
1356 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1358 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1360 static int __init ucma_init(void)
1364 ret = misc_register(&ucma_misc);
1368 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1370 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1374 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1375 if (!ucma_ctl_table_hdr) {
1376 printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1382 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1384 misc_deregister(&ucma_misc);
1388 static void __exit ucma_cleanup(void)
1390 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1391 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1392 misc_deregister(&ucma_misc);
1393 idr_destroy(&ctx_idr);
1396 module_init(ucma_init);
1397 module_exit(ucma_cleanup);