2 * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/uverbs_std_types.h>
34 #include <rdma/ib_user_verbs.h>
35 #include <rdma/ib_verbs.h>
36 #include <linux/bug.h>
37 #include <linux/file.h>
38 #include "rdma_core.h"
41 static int uverbs_free_ah(struct ib_uobject *uobject,
42 enum rdma_remove_reason why)
44 return rdma_destroy_ah((struct ib_ah *)uobject->object);
47 static int uverbs_free_flow(struct ib_uobject *uobject,
48 enum rdma_remove_reason why)
50 return ib_destroy_flow((struct ib_flow *)uobject->object);
53 static int uverbs_free_mw(struct ib_uobject *uobject,
54 enum rdma_remove_reason why)
56 return uverbs_dealloc_mw((struct ib_mw *)uobject->object);
59 static int uverbs_free_qp(struct ib_uobject *uobject,
60 enum rdma_remove_reason why)
62 struct ib_qp *qp = uobject->object;
63 struct ib_uqp_object *uqp =
64 container_of(uobject, struct ib_uqp_object, uevent.uobject);
67 if (why == RDMA_REMOVE_DESTROY) {
68 if (!list_empty(&uqp->mcast_list))
70 } else if (qp == qp->real_qp) {
71 ib_uverbs_detach_umcast(qp, uqp);
74 ret = ib_destroy_qp(qp);
75 if (ret && why == RDMA_REMOVE_DESTROY)
79 atomic_dec(&uqp->uxrcd->refcnt);
81 ib_uverbs_release_uevent(uobject->context->ufile, &uqp->uevent);
85 static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
86 enum rdma_remove_reason why)
88 struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object;
89 struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
92 ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
93 if (!ret || why != RDMA_REMOVE_DESTROY)
98 static int uverbs_free_wq(struct ib_uobject *uobject,
99 enum rdma_remove_reason why)
101 struct ib_wq *wq = uobject->object;
102 struct ib_uwq_object *uwq =
103 container_of(uobject, struct ib_uwq_object, uevent.uobject);
106 ret = ib_destroy_wq(wq);
107 if (!ret || why != RDMA_REMOVE_DESTROY)
108 ib_uverbs_release_uevent(uobject->context->ufile, &uwq->uevent);
112 static int uverbs_free_srq(struct ib_uobject *uobject,
113 enum rdma_remove_reason why)
115 struct ib_srq *srq = uobject->object;
116 struct ib_uevent_object *uevent =
117 container_of(uobject, struct ib_uevent_object, uobject);
118 enum ib_srq_type srq_type = srq->srq_type;
121 ret = ib_destroy_srq(srq);
123 if (ret && why == RDMA_REMOVE_DESTROY)
126 if (srq_type == IB_SRQT_XRC) {
127 struct ib_usrq_object *us =
128 container_of(uevent, struct ib_usrq_object, uevent);
130 atomic_dec(&us->uxrcd->refcnt);
133 ib_uverbs_release_uevent(uobject->context->ufile, uevent);
137 static int uverbs_free_cq(struct ib_uobject *uobject,
138 enum rdma_remove_reason why)
140 struct ib_cq *cq = uobject->object;
141 struct ib_uverbs_event_queue *ev_queue = cq->cq_context;
142 struct ib_ucq_object *ucq =
143 container_of(uobject, struct ib_ucq_object, uobject);
146 ret = ib_destroy_cq(cq);
147 if (!ret || why != RDMA_REMOVE_DESTROY)
148 ib_uverbs_release_ucq(uobject->context->ufile, ev_queue ?
149 container_of(ev_queue,
150 struct ib_uverbs_completion_event_file,
156 static int uverbs_free_mr(struct ib_uobject *uobject,
157 enum rdma_remove_reason why)
159 return ib_dereg_mr((struct ib_mr *)uobject->object);
162 static int uverbs_free_xrcd(struct ib_uobject *uobject,
163 enum rdma_remove_reason why)
165 struct ib_xrcd *xrcd = uobject->object;
166 struct ib_uxrcd_object *uxrcd =
167 container_of(uobject, struct ib_uxrcd_object, uobject);
170 mutex_lock(&uobject->context->ufile->device->xrcd_tree_mutex);
171 if (why == RDMA_REMOVE_DESTROY && atomic_read(&uxrcd->refcnt))
174 ret = ib_uverbs_dealloc_xrcd(uobject->context->ufile->device,
176 mutex_unlock(&uobject->context->ufile->device->xrcd_tree_mutex);
181 static int uverbs_free_pd(struct ib_uobject *uobject,
182 enum rdma_remove_reason why)
184 struct ib_pd *pd = uobject->object;
186 if (why == RDMA_REMOVE_DESTROY && atomic_read(&pd->usecnt))
189 ib_dealloc_pd((struct ib_pd *)uobject->object);
193 static int uverbs_hot_unplug_completion_event_file(struct ib_uobject_file *uobj_file,
194 enum rdma_remove_reason why)
196 struct ib_uverbs_completion_event_file *comp_event_file =
197 container_of(uobj_file, struct ib_uverbs_completion_event_file,
199 struct ib_uverbs_event_queue *event_queue = &comp_event_file->ev_queue;
201 spin_lock_irq(&event_queue->lock);
202 event_queue->is_closed = 1;
203 spin_unlock_irq(&event_queue->lock);
205 if (why == RDMA_REMOVE_DRIVER_REMOVE) {
206 wake_up_interruptible(&event_queue->poll_wait);
207 kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
213 * This spec is used in order to pass information to the hardware driver in a
214 * legacy way. Every verb that could get driver specific data should get this
217 static const struct uverbs_attr_def uverbs_uhw_compat_in =
218 UVERBS_ATTR_PTR_IN_SZ(UVERBS_UHW_IN, 0, UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ));
219 static const struct uverbs_attr_def uverbs_uhw_compat_out =
220 UVERBS_ATTR_PTR_OUT_SZ(UVERBS_UHW_OUT, 0, UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ));
222 static void create_udata(struct uverbs_attr_bundle *ctx,
223 struct ib_udata *udata)
226 * This is for ease of conversion. The purpose is to convert all drivers
227 * to use uverbs_attr_bundle instead of ib_udata.
228 * Assume attr == 0 is input and attr == 1 is output.
231 size_t inbuf_len = 0;
233 size_t outbuf_len = 0;
234 const struct uverbs_attr *uhw_in =
235 uverbs_attr_get(ctx, UVERBS_UHW_IN);
236 const struct uverbs_attr *uhw_out =
237 uverbs_attr_get(ctx, UVERBS_UHW_OUT);
239 if (!IS_ERR(uhw_in)) {
240 inbuf = uhw_in->ptr_attr.ptr;
241 inbuf_len = uhw_in->ptr_attr.len;
244 if (!IS_ERR(uhw_out)) {
245 outbuf = uhw_out->ptr_attr.ptr;
246 outbuf_len = uhw_out->ptr_attr.len;
249 ib_uverbs_init_udata_buf_or_null(udata, inbuf, outbuf, inbuf_len,
253 static int uverbs_create_cq_handler(struct ib_device *ib_dev,
254 struct ib_uverbs_file *file,
255 struct uverbs_attr_bundle *attrs)
257 struct ib_ucontext *ucontext = file->ucontext;
258 struct ib_ucq_object *obj;
262 struct ib_cq_init_attr attr = {};
264 struct ib_uverbs_completion_event_file *ev_file = NULL;
265 const struct uverbs_attr *ev_file_attr;
266 struct ib_uobject *ev_file_uobj;
268 if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_CREATE_CQ))
271 ret = uverbs_copy_from(&attr.comp_vector, attrs, CREATE_CQ_COMP_VECTOR);
273 ret = uverbs_copy_from(&attr.cqe, attrs, CREATE_CQ_CQE);
275 ret = uverbs_copy_from(&user_handle, attrs, CREATE_CQ_USER_HANDLE);
279 /* Optional param, if it doesn't exist, we get -ENOENT and skip it */
280 if (uverbs_copy_from(&attr.flags, attrs, CREATE_CQ_FLAGS) == -EFAULT)
283 ev_file_attr = uverbs_attr_get(attrs, CREATE_CQ_COMP_CHANNEL);
284 if (!IS_ERR(ev_file_attr)) {
285 ev_file_uobj = ev_file_attr->obj_attr.uobject;
287 ev_file = container_of(ev_file_uobj,
288 struct ib_uverbs_completion_event_file,
290 uverbs_uobject_get(ev_file_uobj);
293 if (attr.comp_vector >= ucontext->ufile->device->num_comp_vectors) {
298 obj = container_of(uverbs_attr_get(attrs, CREATE_CQ_HANDLE)->obj_attr.uobject,
299 typeof(*obj), uobject);
300 obj->uverbs_file = ucontext->ufile;
301 obj->comp_events_reported = 0;
302 obj->async_events_reported = 0;
303 INIT_LIST_HEAD(&obj->comp_list);
304 INIT_LIST_HEAD(&obj->async_list);
306 /* Temporary, only until drivers get the new uverbs_attr_bundle */
307 create_udata(attrs, &uhw);
309 cq = ib_dev->create_cq(ib_dev, &attr, ucontext, &uhw);
316 cq->uobject = &obj->uobject;
317 cq->comp_handler = ib_uverbs_comp_handler;
318 cq->event_handler = ib_uverbs_cq_event_handler;
319 cq->cq_context = &ev_file->ev_queue;
320 obj->uobject.object = cq;
321 obj->uobject.user_handle = user_handle;
322 atomic_set(&cq->usecnt, 0);
324 ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe);
334 uverbs_uobject_put(ev_file_uobj);
338 static DECLARE_UVERBS_METHOD(
339 uverbs_method_cq_create, UVERBS_CQ_CREATE, uverbs_create_cq_handler,
340 &UVERBS_ATTR_IDR(CREATE_CQ_HANDLE, UVERBS_OBJECT_CQ, UVERBS_ACCESS_NEW,
341 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
342 &UVERBS_ATTR_PTR_IN(CREATE_CQ_CQE, u32,
343 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
344 &UVERBS_ATTR_PTR_IN(CREATE_CQ_USER_HANDLE, u64,
345 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
346 &UVERBS_ATTR_FD(CREATE_CQ_COMP_CHANNEL, UVERBS_OBJECT_COMP_CHANNEL,
348 &UVERBS_ATTR_PTR_IN(CREATE_CQ_COMP_VECTOR, u32,
349 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
350 &UVERBS_ATTR_PTR_IN(CREATE_CQ_FLAGS, u32),
351 &UVERBS_ATTR_PTR_OUT(CREATE_CQ_RESP_CQE, u32,
352 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
353 &uverbs_uhw_compat_in, &uverbs_uhw_compat_out);
355 static int uverbs_destroy_cq_handler(struct ib_device *ib_dev,
356 struct ib_uverbs_file *file,
357 struct uverbs_attr_bundle *attrs)
359 struct ib_uverbs_destroy_cq_resp resp;
360 struct ib_uobject *uobj =
361 uverbs_attr_get(attrs, DESTROY_CQ_HANDLE)->obj_attr.uobject;
362 struct ib_ucq_object *obj = container_of(uobj, struct ib_ucq_object,
366 if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_DESTROY_CQ))
369 ret = rdma_explicit_destroy(uobj);
373 resp.comp_events_reported = obj->comp_events_reported;
374 resp.async_events_reported = obj->async_events_reported;
376 return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp);
379 static DECLARE_UVERBS_METHOD(
380 uverbs_method_cq_destroy, UVERBS_CQ_DESTROY, uverbs_destroy_cq_handler,
381 &UVERBS_ATTR_IDR(DESTROY_CQ_HANDLE, UVERBS_OBJECT_CQ,
382 UVERBS_ACCESS_DESTROY,
383 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
384 &UVERBS_ATTR_PTR_OUT(DESTROY_CQ_RESP, struct ib_uverbs_destroy_cq_resp,
385 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
387 DECLARE_UVERBS_OBJECT(uverbs_object_comp_channel,
388 UVERBS_OBJECT_COMP_CHANNEL,
389 &UVERBS_TYPE_ALLOC_FD(0,
390 sizeof(struct ib_uverbs_completion_event_file),
391 uverbs_hot_unplug_completion_event_file,
393 "[infinibandevent]", O_RDONLY));
395 DECLARE_UVERBS_OBJECT(uverbs_object_cq, UVERBS_OBJECT_CQ,
396 &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object), 0,
398 &uverbs_method_cq_create,
399 &uverbs_method_cq_destroy);
401 DECLARE_UVERBS_OBJECT(uverbs_object_qp, UVERBS_OBJECT_QP,
402 &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), 0,
405 DECLARE_UVERBS_OBJECT(uverbs_object_mw, UVERBS_OBJECT_MW,
406 &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_mw));
408 DECLARE_UVERBS_OBJECT(uverbs_object_mr, UVERBS_OBJECT_MR,
409 /* 1 is used in order to free the MR after all the MWs */
410 &UVERBS_TYPE_ALLOC_IDR(1, uverbs_free_mr));
412 DECLARE_UVERBS_OBJECT(uverbs_object_srq, UVERBS_OBJECT_SRQ,
413 &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object), 0,
416 DECLARE_UVERBS_OBJECT(uverbs_object_ah, UVERBS_OBJECT_AH,
417 &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_ah));
419 DECLARE_UVERBS_OBJECT(uverbs_object_flow, UVERBS_OBJECT_FLOW,
420 &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_flow));
422 DECLARE_UVERBS_OBJECT(uverbs_object_wq, UVERBS_OBJECT_WQ,
423 &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), 0,
426 DECLARE_UVERBS_OBJECT(uverbs_object_rwq_ind_table,
427 UVERBS_OBJECT_RWQ_IND_TBL,
428 &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_rwq_ind_tbl));
430 DECLARE_UVERBS_OBJECT(uverbs_object_xrcd, UVERBS_OBJECT_XRCD,
431 &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object), 0,
434 DECLARE_UVERBS_OBJECT(uverbs_object_pd, UVERBS_OBJECT_PD,
435 /* 2 is used in order to free the PD after MRs */
436 &UVERBS_TYPE_ALLOC_IDR(2, uverbs_free_pd));
438 DECLARE_UVERBS_OBJECT(uverbs_object_device, UVERBS_OBJECT_DEVICE, NULL);
440 DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects,
441 &uverbs_object_device,
444 &uverbs_object_comp_channel,
452 &uverbs_object_rwq_ind_table,
453 &uverbs_object_xrcd);