Merge branch 'cubox-i-init' into for-linus
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / infiniband / core / uverbs_cmd.c
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2005 PathScale, Inc.  All rights reserved.
5  * Copyright (c) 2006 Mellanox Technologies.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/file.h>
37 #include <linux/fs.h>
38 #include <linux/slab.h>
39
40 #include <asm/uaccess.h>
41
42 #include "uverbs.h"
43 #include "core_priv.h"
44
45 struct uverbs_lock_class {
46         struct lock_class_key   key;
47         char                    name[16];
48 };
49
50 static struct uverbs_lock_class pd_lock_class   = { .name = "PD-uobj" };
51 static struct uverbs_lock_class mr_lock_class   = { .name = "MR-uobj" };
52 static struct uverbs_lock_class mw_lock_class   = { .name = "MW-uobj" };
53 static struct uverbs_lock_class cq_lock_class   = { .name = "CQ-uobj" };
54 static struct uverbs_lock_class qp_lock_class   = { .name = "QP-uobj" };
55 static struct uverbs_lock_class ah_lock_class   = { .name = "AH-uobj" };
56 static struct uverbs_lock_class srq_lock_class  = { .name = "SRQ-uobj" };
57 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
58 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
59
60 /*
61  * The ib_uobject locking scheme is as follows:
62  *
63  * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
64  *   needs to be held during all idr operations.  When an object is
65  *   looked up, a reference must be taken on the object's kref before
66  *   dropping this lock.
67  *
68  * - Each object also has an rwsem.  This rwsem must be held for
69  *   reading while an operation that uses the object is performed.
70  *   For example, while registering an MR, the associated PD's
71  *   uobject.mutex must be held for reading.  The rwsem must be held
72  *   for writing while initializing or destroying an object.
73  *
74  * - In addition, each object has a "live" flag.  If this flag is not
75  *   set, then lookups of the object will fail even if it is found in
76  *   the idr.  This handles a reader that blocks and does not acquire
77  *   the rwsem until after the object is destroyed.  The destroy
78  *   operation will set the live flag to 0 and then drop the rwsem;
79  *   this will allow the reader to acquire the rwsem, see that the
80  *   live flag is 0, and then drop the rwsem and its reference to
81  *   object.  The underlying storage will not be freed until the last
82  *   reference to the object is dropped.
83  */
84
85 static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
86                       struct ib_ucontext *context, struct uverbs_lock_class *c)
87 {
88         uobj->user_handle = user_handle;
89         uobj->context     = context;
90         kref_init(&uobj->ref);
91         init_rwsem(&uobj->mutex);
92         lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
93         uobj->live        = 0;
94 }
95
96 static void release_uobj(struct kref *kref)
97 {
98         kfree(container_of(kref, struct ib_uobject, ref));
99 }
100
101 static void put_uobj(struct ib_uobject *uobj)
102 {
103         kref_put(&uobj->ref, release_uobj);
104 }
105
106 static void put_uobj_read(struct ib_uobject *uobj)
107 {
108         up_read(&uobj->mutex);
109         put_uobj(uobj);
110 }
111
112 static void put_uobj_write(struct ib_uobject *uobj)
113 {
114         up_write(&uobj->mutex);
115         put_uobj(uobj);
116 }
117
118 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
119 {
120         int ret;
121
122         idr_preload(GFP_KERNEL);
123         spin_lock(&ib_uverbs_idr_lock);
124
125         ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
126         if (ret >= 0)
127                 uobj->id = ret;
128
129         spin_unlock(&ib_uverbs_idr_lock);
130         idr_preload_end();
131
132         return ret < 0 ? ret : 0;
133 }
134
135 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
136 {
137         spin_lock(&ib_uverbs_idr_lock);
138         idr_remove(idr, uobj->id);
139         spin_unlock(&ib_uverbs_idr_lock);
140 }
141
142 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
143                                          struct ib_ucontext *context)
144 {
145         struct ib_uobject *uobj;
146
147         spin_lock(&ib_uverbs_idr_lock);
148         uobj = idr_find(idr, id);
149         if (uobj) {
150                 if (uobj->context == context)
151                         kref_get(&uobj->ref);
152                 else
153                         uobj = NULL;
154         }
155         spin_unlock(&ib_uverbs_idr_lock);
156
157         return uobj;
158 }
159
160 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
161                                         struct ib_ucontext *context, int nested)
162 {
163         struct ib_uobject *uobj;
164
165         uobj = __idr_get_uobj(idr, id, context);
166         if (!uobj)
167                 return NULL;
168
169         if (nested)
170                 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
171         else
172                 down_read(&uobj->mutex);
173         if (!uobj->live) {
174                 put_uobj_read(uobj);
175                 return NULL;
176         }
177
178         return uobj;
179 }
180
181 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
182                                          struct ib_ucontext *context)
183 {
184         struct ib_uobject *uobj;
185
186         uobj = __idr_get_uobj(idr, id, context);
187         if (!uobj)
188                 return NULL;
189
190         down_write(&uobj->mutex);
191         if (!uobj->live) {
192                 put_uobj_write(uobj);
193                 return NULL;
194         }
195
196         return uobj;
197 }
198
199 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
200                           int nested)
201 {
202         struct ib_uobject *uobj;
203
204         uobj = idr_read_uobj(idr, id, context, nested);
205         return uobj ? uobj->object : NULL;
206 }
207
208 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
209 {
210         return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
211 }
212
213 static void put_pd_read(struct ib_pd *pd)
214 {
215         put_uobj_read(pd->uobject);
216 }
217
218 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
219 {
220         return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
221 }
222
223 static void put_cq_read(struct ib_cq *cq)
224 {
225         put_uobj_read(cq->uobject);
226 }
227
228 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
229 {
230         return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
231 }
232
233 static void put_ah_read(struct ib_ah *ah)
234 {
235         put_uobj_read(ah->uobject);
236 }
237
238 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
239 {
240         return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
241 }
242
243 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
244 {
245         struct ib_uobject *uobj;
246
247         uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
248         return uobj ? uobj->object : NULL;
249 }
250
251 static void put_qp_read(struct ib_qp *qp)
252 {
253         put_uobj_read(qp->uobject);
254 }
255
256 static void put_qp_write(struct ib_qp *qp)
257 {
258         put_uobj_write(qp->uobject);
259 }
260
261 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
262 {
263         return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
264 }
265
266 static void put_srq_read(struct ib_srq *srq)
267 {
268         put_uobj_read(srq->uobject);
269 }
270
271 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
272                                      struct ib_uobject **uobj)
273 {
274         *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
275         return *uobj ? (*uobj)->object : NULL;
276 }
277
278 static void put_xrcd_read(struct ib_uobject *uobj)
279 {
280         put_uobj_read(uobj);
281 }
282
283 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
284                               const char __user *buf,
285                               int in_len, int out_len)
286 {
287         struct ib_uverbs_get_context      cmd;
288         struct ib_uverbs_get_context_resp resp;
289         struct ib_udata                   udata;
290         struct ib_device                 *ibdev = file->device->ib_dev;
291         struct ib_ucontext               *ucontext;
292         struct file                      *filp;
293         int ret;
294
295         if (out_len < sizeof resp)
296                 return -ENOSPC;
297
298         if (copy_from_user(&cmd, buf, sizeof cmd))
299                 return -EFAULT;
300
301         mutex_lock(&file->mutex);
302
303         if (file->ucontext) {
304                 ret = -EINVAL;
305                 goto err;
306         }
307
308         INIT_UDATA(&udata, buf + sizeof cmd,
309                    (unsigned long) cmd.response + sizeof resp,
310                    in_len - sizeof cmd, out_len - sizeof resp);
311
312         ucontext = ibdev->alloc_ucontext(ibdev, &udata);
313         if (IS_ERR(ucontext)) {
314                 ret = PTR_ERR(ucontext);
315                 goto err;
316         }
317
318         ucontext->device = ibdev;
319         INIT_LIST_HEAD(&ucontext->pd_list);
320         INIT_LIST_HEAD(&ucontext->mr_list);
321         INIT_LIST_HEAD(&ucontext->mw_list);
322         INIT_LIST_HEAD(&ucontext->cq_list);
323         INIT_LIST_HEAD(&ucontext->qp_list);
324         INIT_LIST_HEAD(&ucontext->srq_list);
325         INIT_LIST_HEAD(&ucontext->ah_list);
326         INIT_LIST_HEAD(&ucontext->xrcd_list);
327         INIT_LIST_HEAD(&ucontext->rule_list);
328         ucontext->closing = 0;
329
330         resp.num_comp_vectors = file->device->num_comp_vectors;
331
332         ret = get_unused_fd_flags(O_CLOEXEC);
333         if (ret < 0)
334                 goto err_free;
335         resp.async_fd = ret;
336
337         filp = ib_uverbs_alloc_event_file(file, 1);
338         if (IS_ERR(filp)) {
339                 ret = PTR_ERR(filp);
340                 goto err_fd;
341         }
342
343         if (copy_to_user((void __user *) (unsigned long) cmd.response,
344                          &resp, sizeof resp)) {
345                 ret = -EFAULT;
346                 goto err_file;
347         }
348
349         file->async_file = filp->private_data;
350
351         INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
352                               ib_uverbs_event_handler);
353         ret = ib_register_event_handler(&file->event_handler);
354         if (ret)
355                 goto err_file;
356
357         kref_get(&file->async_file->ref);
358         kref_get(&file->ref);
359         file->ucontext = ucontext;
360
361         fd_install(resp.async_fd, filp);
362
363         mutex_unlock(&file->mutex);
364
365         return in_len;
366
367 err_file:
368         fput(filp);
369
370 err_fd:
371         put_unused_fd(resp.async_fd);
372
373 err_free:
374         ibdev->dealloc_ucontext(ucontext);
375
376 err:
377         mutex_unlock(&file->mutex);
378         return ret;
379 }
380
381 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
382                                const char __user *buf,
383                                int in_len, int out_len)
384 {
385         struct ib_uverbs_query_device      cmd;
386         struct ib_uverbs_query_device_resp resp;
387         struct ib_device_attr              attr;
388         int                                ret;
389
390         if (out_len < sizeof resp)
391                 return -ENOSPC;
392
393         if (copy_from_user(&cmd, buf, sizeof cmd))
394                 return -EFAULT;
395
396         ret = ib_query_device(file->device->ib_dev, &attr);
397         if (ret)
398                 return ret;
399
400         memset(&resp, 0, sizeof resp);
401
402         resp.fw_ver                    = attr.fw_ver;
403         resp.node_guid                 = file->device->ib_dev->node_guid;
404         resp.sys_image_guid            = attr.sys_image_guid;
405         resp.max_mr_size               = attr.max_mr_size;
406         resp.page_size_cap             = attr.page_size_cap;
407         resp.vendor_id                 = attr.vendor_id;
408         resp.vendor_part_id            = attr.vendor_part_id;
409         resp.hw_ver                    = attr.hw_ver;
410         resp.max_qp                    = attr.max_qp;
411         resp.max_qp_wr                 = attr.max_qp_wr;
412         resp.device_cap_flags          = attr.device_cap_flags;
413         resp.max_sge                   = attr.max_sge;
414         resp.max_sge_rd                = attr.max_sge_rd;
415         resp.max_cq                    = attr.max_cq;
416         resp.max_cqe                   = attr.max_cqe;
417         resp.max_mr                    = attr.max_mr;
418         resp.max_pd                    = attr.max_pd;
419         resp.max_qp_rd_atom            = attr.max_qp_rd_atom;
420         resp.max_ee_rd_atom            = attr.max_ee_rd_atom;
421         resp.max_res_rd_atom           = attr.max_res_rd_atom;
422         resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
423         resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
424         resp.atomic_cap                = attr.atomic_cap;
425         resp.max_ee                    = attr.max_ee;
426         resp.max_rdd                   = attr.max_rdd;
427         resp.max_mw                    = attr.max_mw;
428         resp.max_raw_ipv6_qp           = attr.max_raw_ipv6_qp;
429         resp.max_raw_ethy_qp           = attr.max_raw_ethy_qp;
430         resp.max_mcast_grp             = attr.max_mcast_grp;
431         resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
432         resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
433         resp.max_ah                    = attr.max_ah;
434         resp.max_fmr                   = attr.max_fmr;
435         resp.max_map_per_fmr           = attr.max_map_per_fmr;
436         resp.max_srq                   = attr.max_srq;
437         resp.max_srq_wr                = attr.max_srq_wr;
438         resp.max_srq_sge               = attr.max_srq_sge;
439         resp.max_pkeys                 = attr.max_pkeys;
440         resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
441         resp.phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
442
443         if (copy_to_user((void __user *) (unsigned long) cmd.response,
444                          &resp, sizeof resp))
445                 return -EFAULT;
446
447         return in_len;
448 }
449
450 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
451                              const char __user *buf,
452                              int in_len, int out_len)
453 {
454         struct ib_uverbs_query_port      cmd;
455         struct ib_uverbs_query_port_resp resp;
456         struct ib_port_attr              attr;
457         int                              ret;
458
459         if (out_len < sizeof resp)
460                 return -ENOSPC;
461
462         if (copy_from_user(&cmd, buf, sizeof cmd))
463                 return -EFAULT;
464
465         ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
466         if (ret)
467                 return ret;
468
469         memset(&resp, 0, sizeof resp);
470
471         resp.state           = attr.state;
472         resp.max_mtu         = attr.max_mtu;
473         resp.active_mtu      = attr.active_mtu;
474         resp.gid_tbl_len     = attr.gid_tbl_len;
475         resp.port_cap_flags  = attr.port_cap_flags;
476         resp.max_msg_sz      = attr.max_msg_sz;
477         resp.bad_pkey_cntr   = attr.bad_pkey_cntr;
478         resp.qkey_viol_cntr  = attr.qkey_viol_cntr;
479         resp.pkey_tbl_len    = attr.pkey_tbl_len;
480         resp.lid             = attr.lid;
481         resp.sm_lid          = attr.sm_lid;
482         resp.lmc             = attr.lmc;
483         resp.max_vl_num      = attr.max_vl_num;
484         resp.sm_sl           = attr.sm_sl;
485         resp.subnet_timeout  = attr.subnet_timeout;
486         resp.init_type_reply = attr.init_type_reply;
487         resp.active_width    = attr.active_width;
488         resp.active_speed    = attr.active_speed;
489         resp.phys_state      = attr.phys_state;
490         resp.link_layer      = rdma_port_get_link_layer(file->device->ib_dev,
491                                                         cmd.port_num);
492
493         if (copy_to_user((void __user *) (unsigned long) cmd.response,
494                          &resp, sizeof resp))
495                 return -EFAULT;
496
497         return in_len;
498 }
499
500 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
501                            const char __user *buf,
502                            int in_len, int out_len)
503 {
504         struct ib_uverbs_alloc_pd      cmd;
505         struct ib_uverbs_alloc_pd_resp resp;
506         struct ib_udata                udata;
507         struct ib_uobject             *uobj;
508         struct ib_pd                  *pd;
509         int                            ret;
510
511         if (out_len < sizeof resp)
512                 return -ENOSPC;
513
514         if (copy_from_user(&cmd, buf, sizeof cmd))
515                 return -EFAULT;
516
517         INIT_UDATA(&udata, buf + sizeof cmd,
518                    (unsigned long) cmd.response + sizeof resp,
519                    in_len - sizeof cmd, out_len - sizeof resp);
520
521         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
522         if (!uobj)
523                 return -ENOMEM;
524
525         init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
526         down_write(&uobj->mutex);
527
528         pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
529                                             file->ucontext, &udata);
530         if (IS_ERR(pd)) {
531                 ret = PTR_ERR(pd);
532                 goto err;
533         }
534
535         pd->device  = file->device->ib_dev;
536         pd->uobject = uobj;
537         atomic_set(&pd->usecnt, 0);
538
539         uobj->object = pd;
540         ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
541         if (ret)
542                 goto err_idr;
543
544         memset(&resp, 0, sizeof resp);
545         resp.pd_handle = uobj->id;
546
547         if (copy_to_user((void __user *) (unsigned long) cmd.response,
548                          &resp, sizeof resp)) {
549                 ret = -EFAULT;
550                 goto err_copy;
551         }
552
553         mutex_lock(&file->mutex);
554         list_add_tail(&uobj->list, &file->ucontext->pd_list);
555         mutex_unlock(&file->mutex);
556
557         uobj->live = 1;
558
559         up_write(&uobj->mutex);
560
561         return in_len;
562
563 err_copy:
564         idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
565
566 err_idr:
567         ib_dealloc_pd(pd);
568
569 err:
570         put_uobj_write(uobj);
571         return ret;
572 }
573
574 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
575                              const char __user *buf,
576                              int in_len, int out_len)
577 {
578         struct ib_uverbs_dealloc_pd cmd;
579         struct ib_uobject          *uobj;
580         int                         ret;
581
582         if (copy_from_user(&cmd, buf, sizeof cmd))
583                 return -EFAULT;
584
585         uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
586         if (!uobj)
587                 return -EINVAL;
588
589         ret = ib_dealloc_pd(uobj->object);
590         if (!ret)
591                 uobj->live = 0;
592
593         put_uobj_write(uobj);
594
595         if (ret)
596                 return ret;
597
598         idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
599
600         mutex_lock(&file->mutex);
601         list_del(&uobj->list);
602         mutex_unlock(&file->mutex);
603
604         put_uobj(uobj);
605
606         return in_len;
607 }
608
609 struct xrcd_table_entry {
610         struct rb_node  node;
611         struct ib_xrcd *xrcd;
612         struct inode   *inode;
613 };
614
615 static int xrcd_table_insert(struct ib_uverbs_device *dev,
616                             struct inode *inode,
617                             struct ib_xrcd *xrcd)
618 {
619         struct xrcd_table_entry *entry, *scan;
620         struct rb_node **p = &dev->xrcd_tree.rb_node;
621         struct rb_node *parent = NULL;
622
623         entry = kmalloc(sizeof *entry, GFP_KERNEL);
624         if (!entry)
625                 return -ENOMEM;
626
627         entry->xrcd  = xrcd;
628         entry->inode = inode;
629
630         while (*p) {
631                 parent = *p;
632                 scan = rb_entry(parent, struct xrcd_table_entry, node);
633
634                 if (inode < scan->inode) {
635                         p = &(*p)->rb_left;
636                 } else if (inode > scan->inode) {
637                         p = &(*p)->rb_right;
638                 } else {
639                         kfree(entry);
640                         return -EEXIST;
641                 }
642         }
643
644         rb_link_node(&entry->node, parent, p);
645         rb_insert_color(&entry->node, &dev->xrcd_tree);
646         igrab(inode);
647         return 0;
648 }
649
650 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
651                                                   struct inode *inode)
652 {
653         struct xrcd_table_entry *entry;
654         struct rb_node *p = dev->xrcd_tree.rb_node;
655
656         while (p) {
657                 entry = rb_entry(p, struct xrcd_table_entry, node);
658
659                 if (inode < entry->inode)
660                         p = p->rb_left;
661                 else if (inode > entry->inode)
662                         p = p->rb_right;
663                 else
664                         return entry;
665         }
666
667         return NULL;
668 }
669
670 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
671 {
672         struct xrcd_table_entry *entry;
673
674         entry = xrcd_table_search(dev, inode);
675         if (!entry)
676                 return NULL;
677
678         return entry->xrcd;
679 }
680
681 static void xrcd_table_delete(struct ib_uverbs_device *dev,
682                               struct inode *inode)
683 {
684         struct xrcd_table_entry *entry;
685
686         entry = xrcd_table_search(dev, inode);
687         if (entry) {
688                 iput(inode);
689                 rb_erase(&entry->node, &dev->xrcd_tree);
690                 kfree(entry);
691         }
692 }
693
694 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
695                             const char __user *buf, int in_len,
696                             int out_len)
697 {
698         struct ib_uverbs_open_xrcd      cmd;
699         struct ib_uverbs_open_xrcd_resp resp;
700         struct ib_udata                 udata;
701         struct ib_uxrcd_object         *obj;
702         struct ib_xrcd                 *xrcd = NULL;
703         struct fd                       f = {NULL, 0};
704         struct inode                   *inode = NULL;
705         int                             ret = 0;
706         int                             new_xrcd = 0;
707
708         if (out_len < sizeof resp)
709                 return -ENOSPC;
710
711         if (copy_from_user(&cmd, buf, sizeof cmd))
712                 return -EFAULT;
713
714         INIT_UDATA(&udata, buf + sizeof cmd,
715                    (unsigned long) cmd.response + sizeof resp,
716                    in_len - sizeof cmd, out_len - sizeof  resp);
717
718         mutex_lock(&file->device->xrcd_tree_mutex);
719
720         if (cmd.fd != -1) {
721                 /* search for file descriptor */
722                 f = fdget(cmd.fd);
723                 if (!f.file) {
724                         ret = -EBADF;
725                         goto err_tree_mutex_unlock;
726                 }
727
728                 inode = file_inode(f.file);
729                 xrcd = find_xrcd(file->device, inode);
730                 if (!xrcd && !(cmd.oflags & O_CREAT)) {
731                         /* no file descriptor. Need CREATE flag */
732                         ret = -EAGAIN;
733                         goto err_tree_mutex_unlock;
734                 }
735
736                 if (xrcd && cmd.oflags & O_EXCL) {
737                         ret = -EINVAL;
738                         goto err_tree_mutex_unlock;
739                 }
740         }
741
742         obj = kmalloc(sizeof *obj, GFP_KERNEL);
743         if (!obj) {
744                 ret = -ENOMEM;
745                 goto err_tree_mutex_unlock;
746         }
747
748         init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
749
750         down_write(&obj->uobject.mutex);
751
752         if (!xrcd) {
753                 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
754                                                         file->ucontext, &udata);
755                 if (IS_ERR(xrcd)) {
756                         ret = PTR_ERR(xrcd);
757                         goto err;
758                 }
759
760                 xrcd->inode   = inode;
761                 xrcd->device  = file->device->ib_dev;
762                 atomic_set(&xrcd->usecnt, 0);
763                 mutex_init(&xrcd->tgt_qp_mutex);
764                 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
765                 new_xrcd = 1;
766         }
767
768         atomic_set(&obj->refcnt, 0);
769         obj->uobject.object = xrcd;
770         ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
771         if (ret)
772                 goto err_idr;
773
774         memset(&resp, 0, sizeof resp);
775         resp.xrcd_handle = obj->uobject.id;
776
777         if (inode) {
778                 if (new_xrcd) {
779                         /* create new inode/xrcd table entry */
780                         ret = xrcd_table_insert(file->device, inode, xrcd);
781                         if (ret)
782                                 goto err_insert_xrcd;
783                 }
784                 atomic_inc(&xrcd->usecnt);
785         }
786
787         if (copy_to_user((void __user *) (unsigned long) cmd.response,
788                          &resp, sizeof resp)) {
789                 ret = -EFAULT;
790                 goto err_copy;
791         }
792
793         if (f.file)
794                 fdput(f);
795
796         mutex_lock(&file->mutex);
797         list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
798         mutex_unlock(&file->mutex);
799
800         obj->uobject.live = 1;
801         up_write(&obj->uobject.mutex);
802
803         mutex_unlock(&file->device->xrcd_tree_mutex);
804         return in_len;
805
806 err_copy:
807         if (inode) {
808                 if (new_xrcd)
809                         xrcd_table_delete(file->device, inode);
810                 atomic_dec(&xrcd->usecnt);
811         }
812
813 err_insert_xrcd:
814         idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
815
816 err_idr:
817         ib_dealloc_xrcd(xrcd);
818
819 err:
820         put_uobj_write(&obj->uobject);
821
822 err_tree_mutex_unlock:
823         if (f.file)
824                 fdput(f);
825
826         mutex_unlock(&file->device->xrcd_tree_mutex);
827
828         return ret;
829 }
830
831 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
832                              const char __user *buf, int in_len,
833                              int out_len)
834 {
835         struct ib_uverbs_close_xrcd cmd;
836         struct ib_uobject           *uobj;
837         struct ib_xrcd              *xrcd = NULL;
838         struct inode                *inode = NULL;
839         struct ib_uxrcd_object      *obj;
840         int                         live;
841         int                         ret = 0;
842
843         if (copy_from_user(&cmd, buf, sizeof cmd))
844                 return -EFAULT;
845
846         mutex_lock(&file->device->xrcd_tree_mutex);
847         uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
848         if (!uobj) {
849                 ret = -EINVAL;
850                 goto out;
851         }
852
853         xrcd  = uobj->object;
854         inode = xrcd->inode;
855         obj   = container_of(uobj, struct ib_uxrcd_object, uobject);
856         if (atomic_read(&obj->refcnt)) {
857                 put_uobj_write(uobj);
858                 ret = -EBUSY;
859                 goto out;
860         }
861
862         if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
863                 ret = ib_dealloc_xrcd(uobj->object);
864                 if (!ret)
865                         uobj->live = 0;
866         }
867
868         live = uobj->live;
869         if (inode && ret)
870                 atomic_inc(&xrcd->usecnt);
871
872         put_uobj_write(uobj);
873
874         if (ret)
875                 goto out;
876
877         if (inode && !live)
878                 xrcd_table_delete(file->device, inode);
879
880         idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
881         mutex_lock(&file->mutex);
882         list_del(&uobj->list);
883         mutex_unlock(&file->mutex);
884
885         put_uobj(uobj);
886         ret = in_len;
887
888 out:
889         mutex_unlock(&file->device->xrcd_tree_mutex);
890         return ret;
891 }
892
893 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
894                             struct ib_xrcd *xrcd)
895 {
896         struct inode *inode;
897
898         inode = xrcd->inode;
899         if (inode && !atomic_dec_and_test(&xrcd->usecnt))
900                 return;
901
902         ib_dealloc_xrcd(xrcd);
903
904         if (inode)
905                 xrcd_table_delete(dev, inode);
906 }
907
908 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
909                          const char __user *buf, int in_len,
910                          int out_len)
911 {
912         struct ib_uverbs_reg_mr      cmd;
913         struct ib_uverbs_reg_mr_resp resp;
914         struct ib_udata              udata;
915         struct ib_uobject           *uobj;
916         struct ib_pd                *pd;
917         struct ib_mr                *mr;
918         int                          ret;
919
920         if (out_len < sizeof resp)
921                 return -ENOSPC;
922
923         if (copy_from_user(&cmd, buf, sizeof cmd))
924                 return -EFAULT;
925
926         INIT_UDATA(&udata, buf + sizeof cmd,
927                    (unsigned long) cmd.response + sizeof resp,
928                    in_len - sizeof cmd, out_len - sizeof resp);
929
930         if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
931                 return -EINVAL;
932
933         ret = ib_check_mr_access(cmd.access_flags);
934         if (ret)
935                 return ret;
936
937         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
938         if (!uobj)
939                 return -ENOMEM;
940
941         init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
942         down_write(&uobj->mutex);
943
944         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
945         if (!pd) {
946                 ret = -EINVAL;
947                 goto err_free;
948         }
949
950         mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
951                                      cmd.access_flags, &udata);
952         if (IS_ERR(mr)) {
953                 ret = PTR_ERR(mr);
954                 goto err_put;
955         }
956
957         mr->device  = pd->device;
958         mr->pd      = pd;
959         mr->uobject = uobj;
960         atomic_inc(&pd->usecnt);
961         atomic_set(&mr->usecnt, 0);
962
963         uobj->object = mr;
964         ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
965         if (ret)
966                 goto err_unreg;
967
968         memset(&resp, 0, sizeof resp);
969         resp.lkey      = mr->lkey;
970         resp.rkey      = mr->rkey;
971         resp.mr_handle = uobj->id;
972
973         if (copy_to_user((void __user *) (unsigned long) cmd.response,
974                          &resp, sizeof resp)) {
975                 ret = -EFAULT;
976                 goto err_copy;
977         }
978
979         put_pd_read(pd);
980
981         mutex_lock(&file->mutex);
982         list_add_tail(&uobj->list, &file->ucontext->mr_list);
983         mutex_unlock(&file->mutex);
984
985         uobj->live = 1;
986
987         up_write(&uobj->mutex);
988
989         return in_len;
990
991 err_copy:
992         idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
993
994 err_unreg:
995         ib_dereg_mr(mr);
996
997 err_put:
998         put_pd_read(pd);
999
1000 err_free:
1001         put_uobj_write(uobj);
1002         return ret;
1003 }
1004
1005 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1006                            const char __user *buf, int in_len,
1007                            int out_len)
1008 {
1009         struct ib_uverbs_dereg_mr cmd;
1010         struct ib_mr             *mr;
1011         struct ib_uobject        *uobj;
1012         int                       ret = -EINVAL;
1013
1014         if (copy_from_user(&cmd, buf, sizeof cmd))
1015                 return -EFAULT;
1016
1017         uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1018         if (!uobj)
1019                 return -EINVAL;
1020
1021         mr = uobj->object;
1022
1023         ret = ib_dereg_mr(mr);
1024         if (!ret)
1025                 uobj->live = 0;
1026
1027         put_uobj_write(uobj);
1028
1029         if (ret)
1030                 return ret;
1031
1032         idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1033
1034         mutex_lock(&file->mutex);
1035         list_del(&uobj->list);
1036         mutex_unlock(&file->mutex);
1037
1038         put_uobj(uobj);
1039
1040         return in_len;
1041 }
1042
1043 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1044                          const char __user *buf, int in_len,
1045                          int out_len)
1046 {
1047         struct ib_uverbs_alloc_mw      cmd;
1048         struct ib_uverbs_alloc_mw_resp resp;
1049         struct ib_uobject             *uobj;
1050         struct ib_pd                  *pd;
1051         struct ib_mw                  *mw;
1052         int                            ret;
1053
1054         if (out_len < sizeof(resp))
1055                 return -ENOSPC;
1056
1057         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1058                 return -EFAULT;
1059
1060         uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1061         if (!uobj)
1062                 return -ENOMEM;
1063
1064         init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1065         down_write(&uobj->mutex);
1066
1067         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1068         if (!pd) {
1069                 ret = -EINVAL;
1070                 goto err_free;
1071         }
1072
1073         mw = pd->device->alloc_mw(pd, cmd.mw_type);
1074         if (IS_ERR(mw)) {
1075                 ret = PTR_ERR(mw);
1076                 goto err_put;
1077         }
1078
1079         mw->device  = pd->device;
1080         mw->pd      = pd;
1081         mw->uobject = uobj;
1082         atomic_inc(&pd->usecnt);
1083
1084         uobj->object = mw;
1085         ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1086         if (ret)
1087                 goto err_unalloc;
1088
1089         memset(&resp, 0, sizeof(resp));
1090         resp.rkey      = mw->rkey;
1091         resp.mw_handle = uobj->id;
1092
1093         if (copy_to_user((void __user *)(unsigned long)cmd.response,
1094                          &resp, sizeof(resp))) {
1095                 ret = -EFAULT;
1096                 goto err_copy;
1097         }
1098
1099         put_pd_read(pd);
1100
1101         mutex_lock(&file->mutex);
1102         list_add_tail(&uobj->list, &file->ucontext->mw_list);
1103         mutex_unlock(&file->mutex);
1104
1105         uobj->live = 1;
1106
1107         up_write(&uobj->mutex);
1108
1109         return in_len;
1110
1111 err_copy:
1112         idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1113
1114 err_unalloc:
1115         ib_dealloc_mw(mw);
1116
1117 err_put:
1118         put_pd_read(pd);
1119
1120 err_free:
1121         put_uobj_write(uobj);
1122         return ret;
1123 }
1124
1125 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1126                            const char __user *buf, int in_len,
1127                            int out_len)
1128 {
1129         struct ib_uverbs_dealloc_mw cmd;
1130         struct ib_mw               *mw;
1131         struct ib_uobject          *uobj;
1132         int                         ret = -EINVAL;
1133
1134         if (copy_from_user(&cmd, buf, sizeof(cmd)))
1135                 return -EFAULT;
1136
1137         uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1138         if (!uobj)
1139                 return -EINVAL;
1140
1141         mw = uobj->object;
1142
1143         ret = ib_dealloc_mw(mw);
1144         if (!ret)
1145                 uobj->live = 0;
1146
1147         put_uobj_write(uobj);
1148
1149         if (ret)
1150                 return ret;
1151
1152         idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1153
1154         mutex_lock(&file->mutex);
1155         list_del(&uobj->list);
1156         mutex_unlock(&file->mutex);
1157
1158         put_uobj(uobj);
1159
1160         return in_len;
1161 }
1162
1163 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1164                                       const char __user *buf, int in_len,
1165                                       int out_len)
1166 {
1167         struct ib_uverbs_create_comp_channel       cmd;
1168         struct ib_uverbs_create_comp_channel_resp  resp;
1169         struct file                               *filp;
1170         int ret;
1171
1172         if (out_len < sizeof resp)
1173                 return -ENOSPC;
1174
1175         if (copy_from_user(&cmd, buf, sizeof cmd))
1176                 return -EFAULT;
1177
1178         ret = get_unused_fd_flags(O_CLOEXEC);
1179         if (ret < 0)
1180                 return ret;
1181         resp.fd = ret;
1182
1183         filp = ib_uverbs_alloc_event_file(file, 0);
1184         if (IS_ERR(filp)) {
1185                 put_unused_fd(resp.fd);
1186                 return PTR_ERR(filp);
1187         }
1188
1189         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1190                          &resp, sizeof resp)) {
1191                 put_unused_fd(resp.fd);
1192                 fput(filp);
1193                 return -EFAULT;
1194         }
1195
1196         fd_install(resp.fd, filp);
1197         return in_len;
1198 }
1199
1200 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1201                             const char __user *buf, int in_len,
1202                             int out_len)
1203 {
1204         struct ib_uverbs_create_cq      cmd;
1205         struct ib_uverbs_create_cq_resp resp;
1206         struct ib_udata                 udata;
1207         struct ib_ucq_object           *obj;
1208         struct ib_uverbs_event_file    *ev_file = NULL;
1209         struct ib_cq                   *cq;
1210         int                             ret;
1211
1212         if (out_len < sizeof resp)
1213                 return -ENOSPC;
1214
1215         if (copy_from_user(&cmd, buf, sizeof cmd))
1216                 return -EFAULT;
1217
1218         INIT_UDATA(&udata, buf + sizeof cmd,
1219                    (unsigned long) cmd.response + sizeof resp,
1220                    in_len - sizeof cmd, out_len - sizeof resp);
1221
1222         if (cmd.comp_vector >= file->device->num_comp_vectors)
1223                 return -EINVAL;
1224
1225         obj = kmalloc(sizeof *obj, GFP_KERNEL);
1226         if (!obj)
1227                 return -ENOMEM;
1228
1229         init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class);
1230         down_write(&obj->uobject.mutex);
1231
1232         if (cmd.comp_channel >= 0) {
1233                 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
1234                 if (!ev_file) {
1235                         ret = -EINVAL;
1236                         goto err;
1237                 }
1238         }
1239
1240         obj->uverbs_file           = file;
1241         obj->comp_events_reported  = 0;
1242         obj->async_events_reported = 0;
1243         INIT_LIST_HEAD(&obj->comp_list);
1244         INIT_LIST_HEAD(&obj->async_list);
1245
1246         cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
1247                                              cmd.comp_vector,
1248                                              file->ucontext, &udata);
1249         if (IS_ERR(cq)) {
1250                 ret = PTR_ERR(cq);
1251                 goto err_file;
1252         }
1253
1254         cq->device        = file->device->ib_dev;
1255         cq->uobject       = &obj->uobject;
1256         cq->comp_handler  = ib_uverbs_comp_handler;
1257         cq->event_handler = ib_uverbs_cq_event_handler;
1258         cq->cq_context    = ev_file;
1259         atomic_set(&cq->usecnt, 0);
1260
1261         obj->uobject.object = cq;
1262         ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1263         if (ret)
1264                 goto err_free;
1265
1266         memset(&resp, 0, sizeof resp);
1267         resp.cq_handle = obj->uobject.id;
1268         resp.cqe       = cq->cqe;
1269
1270         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1271                          &resp, sizeof resp)) {
1272                 ret = -EFAULT;
1273                 goto err_copy;
1274         }
1275
1276         mutex_lock(&file->mutex);
1277         list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1278         mutex_unlock(&file->mutex);
1279
1280         obj->uobject.live = 1;
1281
1282         up_write(&obj->uobject.mutex);
1283
1284         return in_len;
1285
1286 err_copy:
1287         idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1288
1289 err_free:
1290         ib_destroy_cq(cq);
1291
1292 err_file:
1293         if (ev_file)
1294                 ib_uverbs_release_ucq(file, ev_file, obj);
1295
1296 err:
1297         put_uobj_write(&obj->uobject);
1298         return ret;
1299 }
1300
1301 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1302                             const char __user *buf, int in_len,
1303                             int out_len)
1304 {
1305         struct ib_uverbs_resize_cq      cmd;
1306         struct ib_uverbs_resize_cq_resp resp;
1307         struct ib_udata                 udata;
1308         struct ib_cq                    *cq;
1309         int                             ret = -EINVAL;
1310
1311         if (copy_from_user(&cmd, buf, sizeof cmd))
1312                 return -EFAULT;
1313
1314         INIT_UDATA(&udata, buf + sizeof cmd,
1315                    (unsigned long) cmd.response + sizeof resp,
1316                    in_len - sizeof cmd, out_len - sizeof resp);
1317
1318         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1319         if (!cq)
1320                 return -EINVAL;
1321
1322         ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1323         if (ret)
1324                 goto out;
1325
1326         resp.cqe = cq->cqe;
1327
1328         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1329                          &resp, sizeof resp.cqe))
1330                 ret = -EFAULT;
1331
1332 out:
1333         put_cq_read(cq);
1334
1335         return ret ? ret : in_len;
1336 }
1337
1338 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1339 {
1340         struct ib_uverbs_wc tmp;
1341
1342         tmp.wr_id               = wc->wr_id;
1343         tmp.status              = wc->status;
1344         tmp.opcode              = wc->opcode;
1345         tmp.vendor_err          = wc->vendor_err;
1346         tmp.byte_len            = wc->byte_len;
1347         tmp.ex.imm_data         = (__u32 __force) wc->ex.imm_data;
1348         tmp.qp_num              = wc->qp->qp_num;
1349         tmp.src_qp              = wc->src_qp;
1350         tmp.wc_flags            = wc->wc_flags;
1351         tmp.pkey_index          = wc->pkey_index;
1352         tmp.slid                = wc->slid;
1353         tmp.sl                  = wc->sl;
1354         tmp.dlid_path_bits      = wc->dlid_path_bits;
1355         tmp.port_num            = wc->port_num;
1356         tmp.reserved            = 0;
1357
1358         if (copy_to_user(dest, &tmp, sizeof tmp))
1359                 return -EFAULT;
1360
1361         return 0;
1362 }
1363
1364 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1365                           const char __user *buf, int in_len,
1366                           int out_len)
1367 {
1368         struct ib_uverbs_poll_cq       cmd;
1369         struct ib_uverbs_poll_cq_resp  resp;
1370         u8 __user                     *header_ptr;
1371         u8 __user                     *data_ptr;
1372         struct ib_cq                  *cq;
1373         struct ib_wc                   wc;
1374         int                            ret;
1375
1376         if (copy_from_user(&cmd, buf, sizeof cmd))
1377                 return -EFAULT;
1378
1379         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1380         if (!cq)
1381                 return -EINVAL;
1382
1383         /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1384         header_ptr = (void __user *)(unsigned long) cmd.response;
1385         data_ptr = header_ptr + sizeof resp;
1386
1387         memset(&resp, 0, sizeof resp);
1388         while (resp.count < cmd.ne) {
1389                 ret = ib_poll_cq(cq, 1, &wc);
1390                 if (ret < 0)
1391                         goto out_put;
1392                 if (!ret)
1393                         break;
1394
1395                 ret = copy_wc_to_user(data_ptr, &wc);
1396                 if (ret)
1397                         goto out_put;
1398
1399                 data_ptr += sizeof(struct ib_uverbs_wc);
1400                 ++resp.count;
1401         }
1402
1403         if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1404                 ret = -EFAULT;
1405                 goto out_put;
1406         }
1407
1408         ret = in_len;
1409
1410 out_put:
1411         put_cq_read(cq);
1412         return ret;
1413 }
1414
1415 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1416                                 const char __user *buf, int in_len,
1417                                 int out_len)
1418 {
1419         struct ib_uverbs_req_notify_cq cmd;
1420         struct ib_cq                  *cq;
1421
1422         if (copy_from_user(&cmd, buf, sizeof cmd))
1423                 return -EFAULT;
1424
1425         cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1426         if (!cq)
1427                 return -EINVAL;
1428
1429         ib_req_notify_cq(cq, cmd.solicited_only ?
1430                          IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1431
1432         put_cq_read(cq);
1433
1434         return in_len;
1435 }
1436
1437 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1438                              const char __user *buf, int in_len,
1439                              int out_len)
1440 {
1441         struct ib_uverbs_destroy_cq      cmd;
1442         struct ib_uverbs_destroy_cq_resp resp;
1443         struct ib_uobject               *uobj;
1444         struct ib_cq                    *cq;
1445         struct ib_ucq_object            *obj;
1446         struct ib_uverbs_event_file     *ev_file;
1447         int                              ret = -EINVAL;
1448
1449         if (copy_from_user(&cmd, buf, sizeof cmd))
1450                 return -EFAULT;
1451
1452         uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1453         if (!uobj)
1454                 return -EINVAL;
1455         cq      = uobj->object;
1456         ev_file = cq->cq_context;
1457         obj     = container_of(cq->uobject, struct ib_ucq_object, uobject);
1458
1459         ret = ib_destroy_cq(cq);
1460         if (!ret)
1461                 uobj->live = 0;
1462
1463         put_uobj_write(uobj);
1464
1465         if (ret)
1466                 return ret;
1467
1468         idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1469
1470         mutex_lock(&file->mutex);
1471         list_del(&uobj->list);
1472         mutex_unlock(&file->mutex);
1473
1474         ib_uverbs_release_ucq(file, ev_file, obj);
1475
1476         memset(&resp, 0, sizeof resp);
1477         resp.comp_events_reported  = obj->comp_events_reported;
1478         resp.async_events_reported = obj->async_events_reported;
1479
1480         put_uobj(uobj);
1481
1482         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1483                          &resp, sizeof resp))
1484                 return -EFAULT;
1485
1486         return in_len;
1487 }
1488
1489 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1490                             const char __user *buf, int in_len,
1491                             int out_len)
1492 {
1493         struct ib_uverbs_create_qp      cmd;
1494         struct ib_uverbs_create_qp_resp resp;
1495         struct ib_udata                 udata;
1496         struct ib_uqp_object           *obj;
1497         struct ib_device               *device;
1498         struct ib_pd                   *pd = NULL;
1499         struct ib_xrcd                 *xrcd = NULL;
1500         struct ib_uobject              *uninitialized_var(xrcd_uobj);
1501         struct ib_cq                   *scq = NULL, *rcq = NULL;
1502         struct ib_srq                  *srq = NULL;
1503         struct ib_qp                   *qp;
1504         struct ib_qp_init_attr          attr;
1505         int ret;
1506
1507         if (out_len < sizeof resp)
1508                 return -ENOSPC;
1509
1510         if (copy_from_user(&cmd, buf, sizeof cmd))
1511                 return -EFAULT;
1512
1513         if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1514                 return -EPERM;
1515
1516         INIT_UDATA(&udata, buf + sizeof cmd,
1517                    (unsigned long) cmd.response + sizeof resp,
1518                    in_len - sizeof cmd, out_len - sizeof resp);
1519
1520         obj = kzalloc(sizeof *obj, GFP_KERNEL);
1521         if (!obj)
1522                 return -ENOMEM;
1523
1524         init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1525         down_write(&obj->uevent.uobject.mutex);
1526
1527         if (cmd.qp_type == IB_QPT_XRC_TGT) {
1528                 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1529                 if (!xrcd) {
1530                         ret = -EINVAL;
1531                         goto err_put;
1532                 }
1533                 device = xrcd->device;
1534         } else {
1535                 if (cmd.qp_type == IB_QPT_XRC_INI) {
1536                         cmd.max_recv_wr = cmd.max_recv_sge = 0;
1537                 } else {
1538                         if (cmd.is_srq) {
1539                                 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1540                                 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1541                                         ret = -EINVAL;
1542                                         goto err_put;
1543                                 }
1544                         }
1545
1546                         if (cmd.recv_cq_handle != cmd.send_cq_handle) {
1547                                 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
1548                                 if (!rcq) {
1549                                         ret = -EINVAL;
1550                                         goto err_put;
1551                                 }
1552                         }
1553                 }
1554
1555                 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
1556                 rcq = rcq ?: scq;
1557                 pd  = idr_read_pd(cmd.pd_handle, file->ucontext);
1558                 if (!pd || !scq) {
1559                         ret = -EINVAL;
1560                         goto err_put;
1561                 }
1562
1563                 device = pd->device;
1564         }
1565
1566         attr.event_handler = ib_uverbs_qp_event_handler;
1567         attr.qp_context    = file;
1568         attr.send_cq       = scq;
1569         attr.recv_cq       = rcq;
1570         attr.srq           = srq;
1571         attr.xrcd          = xrcd;
1572         attr.sq_sig_type   = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1573         attr.qp_type       = cmd.qp_type;
1574         attr.create_flags  = 0;
1575
1576         attr.cap.max_send_wr     = cmd.max_send_wr;
1577         attr.cap.max_recv_wr     = cmd.max_recv_wr;
1578         attr.cap.max_send_sge    = cmd.max_send_sge;
1579         attr.cap.max_recv_sge    = cmd.max_recv_sge;
1580         attr.cap.max_inline_data = cmd.max_inline_data;
1581
1582         obj->uevent.events_reported     = 0;
1583         INIT_LIST_HEAD(&obj->uevent.event_list);
1584         INIT_LIST_HEAD(&obj->mcast_list);
1585
1586         if (cmd.qp_type == IB_QPT_XRC_TGT)
1587                 qp = ib_create_qp(pd, &attr);
1588         else
1589                 qp = device->create_qp(pd, &attr, &udata);
1590
1591         if (IS_ERR(qp)) {
1592                 ret = PTR_ERR(qp);
1593                 goto err_put;
1594         }
1595
1596         if (cmd.qp_type != IB_QPT_XRC_TGT) {
1597                 qp->real_qp       = qp;
1598                 qp->device        = device;
1599                 qp->pd            = pd;
1600                 qp->send_cq       = attr.send_cq;
1601                 qp->recv_cq       = attr.recv_cq;
1602                 qp->srq           = attr.srq;
1603                 qp->event_handler = attr.event_handler;
1604                 qp->qp_context    = attr.qp_context;
1605                 qp->qp_type       = attr.qp_type;
1606                 atomic_set(&qp->usecnt, 0);
1607                 atomic_inc(&pd->usecnt);
1608                 atomic_inc(&attr.send_cq->usecnt);
1609                 if (attr.recv_cq)
1610                         atomic_inc(&attr.recv_cq->usecnt);
1611                 if (attr.srq)
1612                         atomic_inc(&attr.srq->usecnt);
1613         }
1614         qp->uobject = &obj->uevent.uobject;
1615
1616         obj->uevent.uobject.object = qp;
1617         ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1618         if (ret)
1619                 goto err_destroy;
1620
1621         memset(&resp, 0, sizeof resp);
1622         resp.qpn             = qp->qp_num;
1623         resp.qp_handle       = obj->uevent.uobject.id;
1624         resp.max_recv_sge    = attr.cap.max_recv_sge;
1625         resp.max_send_sge    = attr.cap.max_send_sge;
1626         resp.max_recv_wr     = attr.cap.max_recv_wr;
1627         resp.max_send_wr     = attr.cap.max_send_wr;
1628         resp.max_inline_data = attr.cap.max_inline_data;
1629
1630         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1631                          &resp, sizeof resp)) {
1632                 ret = -EFAULT;
1633                 goto err_copy;
1634         }
1635
1636         if (xrcd) {
1637                 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1638                                           uobject);
1639                 atomic_inc(&obj->uxrcd->refcnt);
1640                 put_xrcd_read(xrcd_uobj);
1641         }
1642
1643         if (pd)
1644                 put_pd_read(pd);
1645         if (scq)
1646                 put_cq_read(scq);
1647         if (rcq && rcq != scq)
1648                 put_cq_read(rcq);
1649         if (srq)
1650                 put_srq_read(srq);
1651
1652         mutex_lock(&file->mutex);
1653         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1654         mutex_unlock(&file->mutex);
1655
1656         obj->uevent.uobject.live = 1;
1657
1658         up_write(&obj->uevent.uobject.mutex);
1659
1660         return in_len;
1661
1662 err_copy:
1663         idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1664
1665 err_destroy:
1666         ib_destroy_qp(qp);
1667
1668 err_put:
1669         if (xrcd)
1670                 put_xrcd_read(xrcd_uobj);
1671         if (pd)
1672                 put_pd_read(pd);
1673         if (scq)
1674                 put_cq_read(scq);
1675         if (rcq && rcq != scq)
1676                 put_cq_read(rcq);
1677         if (srq)
1678                 put_srq_read(srq);
1679
1680         put_uobj_write(&obj->uevent.uobject);
1681         return ret;
1682 }
1683
1684 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1685                           const char __user *buf, int in_len, int out_len)
1686 {
1687         struct ib_uverbs_open_qp        cmd;
1688         struct ib_uverbs_create_qp_resp resp;
1689         struct ib_udata                 udata;
1690         struct ib_uqp_object           *obj;
1691         struct ib_xrcd                 *xrcd;
1692         struct ib_uobject              *uninitialized_var(xrcd_uobj);
1693         struct ib_qp                   *qp;
1694         struct ib_qp_open_attr          attr;
1695         int ret;
1696
1697         if (out_len < sizeof resp)
1698                 return -ENOSPC;
1699
1700         if (copy_from_user(&cmd, buf, sizeof cmd))
1701                 return -EFAULT;
1702
1703         INIT_UDATA(&udata, buf + sizeof cmd,
1704                    (unsigned long) cmd.response + sizeof resp,
1705                    in_len - sizeof cmd, out_len - sizeof resp);
1706
1707         obj = kmalloc(sizeof *obj, GFP_KERNEL);
1708         if (!obj)
1709                 return -ENOMEM;
1710
1711         init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1712         down_write(&obj->uevent.uobject.mutex);
1713
1714         xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1715         if (!xrcd) {
1716                 ret = -EINVAL;
1717                 goto err_put;
1718         }
1719
1720         attr.event_handler = ib_uverbs_qp_event_handler;
1721         attr.qp_context    = file;
1722         attr.qp_num        = cmd.qpn;
1723         attr.qp_type       = cmd.qp_type;
1724
1725         obj->uevent.events_reported = 0;
1726         INIT_LIST_HEAD(&obj->uevent.event_list);
1727         INIT_LIST_HEAD(&obj->mcast_list);
1728
1729         qp = ib_open_qp(xrcd, &attr);
1730         if (IS_ERR(qp)) {
1731                 ret = PTR_ERR(qp);
1732                 goto err_put;
1733         }
1734
1735         qp->uobject = &obj->uevent.uobject;
1736
1737         obj->uevent.uobject.object = qp;
1738         ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1739         if (ret)
1740                 goto err_destroy;
1741
1742         memset(&resp, 0, sizeof resp);
1743         resp.qpn       = qp->qp_num;
1744         resp.qp_handle = obj->uevent.uobject.id;
1745
1746         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1747                          &resp, sizeof resp)) {
1748                 ret = -EFAULT;
1749                 goto err_remove;
1750         }
1751
1752         obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1753         atomic_inc(&obj->uxrcd->refcnt);
1754         put_xrcd_read(xrcd_uobj);
1755
1756         mutex_lock(&file->mutex);
1757         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1758         mutex_unlock(&file->mutex);
1759
1760         obj->uevent.uobject.live = 1;
1761
1762         up_write(&obj->uevent.uobject.mutex);
1763
1764         return in_len;
1765
1766 err_remove:
1767         idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1768
1769 err_destroy:
1770         ib_destroy_qp(qp);
1771
1772 err_put:
1773         put_xrcd_read(xrcd_uobj);
1774         put_uobj_write(&obj->uevent.uobject);
1775         return ret;
1776 }
1777
1778 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1779                            const char __user *buf, int in_len,
1780                            int out_len)
1781 {
1782         struct ib_uverbs_query_qp      cmd;
1783         struct ib_uverbs_query_qp_resp resp;
1784         struct ib_qp                   *qp;
1785         struct ib_qp_attr              *attr;
1786         struct ib_qp_init_attr         *init_attr;
1787         int                            ret;
1788
1789         if (copy_from_user(&cmd, buf, sizeof cmd))
1790                 return -EFAULT;
1791
1792         attr      = kmalloc(sizeof *attr, GFP_KERNEL);
1793         init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1794         if (!attr || !init_attr) {
1795                 ret = -ENOMEM;
1796                 goto out;
1797         }
1798
1799         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1800         if (!qp) {
1801                 ret = -EINVAL;
1802                 goto out;
1803         }
1804
1805         ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1806
1807         put_qp_read(qp);
1808
1809         if (ret)
1810                 goto out;
1811
1812         memset(&resp, 0, sizeof resp);
1813
1814         resp.qp_state               = attr->qp_state;
1815         resp.cur_qp_state           = attr->cur_qp_state;
1816         resp.path_mtu               = attr->path_mtu;
1817         resp.path_mig_state         = attr->path_mig_state;
1818         resp.qkey                   = attr->qkey;
1819         resp.rq_psn                 = attr->rq_psn;
1820         resp.sq_psn                 = attr->sq_psn;
1821         resp.dest_qp_num            = attr->dest_qp_num;
1822         resp.qp_access_flags        = attr->qp_access_flags;
1823         resp.pkey_index             = attr->pkey_index;
1824         resp.alt_pkey_index         = attr->alt_pkey_index;
1825         resp.sq_draining            = attr->sq_draining;
1826         resp.max_rd_atomic          = attr->max_rd_atomic;
1827         resp.max_dest_rd_atomic     = attr->max_dest_rd_atomic;
1828         resp.min_rnr_timer          = attr->min_rnr_timer;
1829         resp.port_num               = attr->port_num;
1830         resp.timeout                = attr->timeout;
1831         resp.retry_cnt              = attr->retry_cnt;
1832         resp.rnr_retry              = attr->rnr_retry;
1833         resp.alt_port_num           = attr->alt_port_num;
1834         resp.alt_timeout            = attr->alt_timeout;
1835
1836         memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1837         resp.dest.flow_label        = attr->ah_attr.grh.flow_label;
1838         resp.dest.sgid_index        = attr->ah_attr.grh.sgid_index;
1839         resp.dest.hop_limit         = attr->ah_attr.grh.hop_limit;
1840         resp.dest.traffic_class     = attr->ah_attr.grh.traffic_class;
1841         resp.dest.dlid              = attr->ah_attr.dlid;
1842         resp.dest.sl                = attr->ah_attr.sl;
1843         resp.dest.src_path_bits     = attr->ah_attr.src_path_bits;
1844         resp.dest.static_rate       = attr->ah_attr.static_rate;
1845         resp.dest.is_global         = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1846         resp.dest.port_num          = attr->ah_attr.port_num;
1847
1848         memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1849         resp.alt_dest.flow_label    = attr->alt_ah_attr.grh.flow_label;
1850         resp.alt_dest.sgid_index    = attr->alt_ah_attr.grh.sgid_index;
1851         resp.alt_dest.hop_limit     = attr->alt_ah_attr.grh.hop_limit;
1852         resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1853         resp.alt_dest.dlid          = attr->alt_ah_attr.dlid;
1854         resp.alt_dest.sl            = attr->alt_ah_attr.sl;
1855         resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1856         resp.alt_dest.static_rate   = attr->alt_ah_attr.static_rate;
1857         resp.alt_dest.is_global     = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1858         resp.alt_dest.port_num      = attr->alt_ah_attr.port_num;
1859
1860         resp.max_send_wr            = init_attr->cap.max_send_wr;
1861         resp.max_recv_wr            = init_attr->cap.max_recv_wr;
1862         resp.max_send_sge           = init_attr->cap.max_send_sge;
1863         resp.max_recv_sge           = init_attr->cap.max_recv_sge;
1864         resp.max_inline_data        = init_attr->cap.max_inline_data;
1865         resp.sq_sig_all             = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1866
1867         if (copy_to_user((void __user *) (unsigned long) cmd.response,
1868                          &resp, sizeof resp))
1869                 ret = -EFAULT;
1870
1871 out:
1872         kfree(attr);
1873         kfree(init_attr);
1874
1875         return ret ? ret : in_len;
1876 }
1877
1878 /* Remove ignored fields set in the attribute mask */
1879 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1880 {
1881         switch (qp_type) {
1882         case IB_QPT_XRC_INI:
1883                 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1884         case IB_QPT_XRC_TGT:
1885                 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1886                                 IB_QP_RNR_RETRY);
1887         default:
1888                 return mask;
1889         }
1890 }
1891
1892 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1893                             const char __user *buf, int in_len,
1894                             int out_len)
1895 {
1896         struct ib_uverbs_modify_qp cmd;
1897         struct ib_udata            udata;
1898         struct ib_qp              *qp;
1899         struct ib_qp_attr         *attr;
1900         int                        ret;
1901
1902         if (copy_from_user(&cmd, buf, sizeof cmd))
1903                 return -EFAULT;
1904
1905         INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
1906                    out_len);
1907
1908         attr = kmalloc(sizeof *attr, GFP_KERNEL);
1909         if (!attr)
1910                 return -ENOMEM;
1911
1912         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1913         if (!qp) {
1914                 ret = -EINVAL;
1915                 goto out;
1916         }
1917
1918         attr->qp_state            = cmd.qp_state;
1919         attr->cur_qp_state        = cmd.cur_qp_state;
1920         attr->path_mtu            = cmd.path_mtu;
1921         attr->path_mig_state      = cmd.path_mig_state;
1922         attr->qkey                = cmd.qkey;
1923         attr->rq_psn              = cmd.rq_psn;
1924         attr->sq_psn              = cmd.sq_psn;
1925         attr->dest_qp_num         = cmd.dest_qp_num;
1926         attr->qp_access_flags     = cmd.qp_access_flags;
1927         attr->pkey_index          = cmd.pkey_index;
1928         attr->alt_pkey_index      = cmd.alt_pkey_index;
1929         attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
1930         attr->max_rd_atomic       = cmd.max_rd_atomic;
1931         attr->max_dest_rd_atomic  = cmd.max_dest_rd_atomic;
1932         attr->min_rnr_timer       = cmd.min_rnr_timer;
1933         attr->port_num            = cmd.port_num;
1934         attr->timeout             = cmd.timeout;
1935         attr->retry_cnt           = cmd.retry_cnt;
1936         attr->rnr_retry           = cmd.rnr_retry;
1937         attr->alt_port_num        = cmd.alt_port_num;
1938         attr->alt_timeout         = cmd.alt_timeout;
1939
1940         memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
1941         attr->ah_attr.grh.flow_label        = cmd.dest.flow_label;
1942         attr->ah_attr.grh.sgid_index        = cmd.dest.sgid_index;
1943         attr->ah_attr.grh.hop_limit         = cmd.dest.hop_limit;
1944         attr->ah_attr.grh.traffic_class     = cmd.dest.traffic_class;
1945         attr->ah_attr.dlid                  = cmd.dest.dlid;
1946         attr->ah_attr.sl                    = cmd.dest.sl;
1947         attr->ah_attr.src_path_bits         = cmd.dest.src_path_bits;
1948         attr->ah_attr.static_rate           = cmd.dest.static_rate;
1949         attr->ah_attr.ah_flags              = cmd.dest.is_global ? IB_AH_GRH : 0;
1950         attr->ah_attr.port_num              = cmd.dest.port_num;
1951
1952         memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
1953         attr->alt_ah_attr.grh.flow_label    = cmd.alt_dest.flow_label;
1954         attr->alt_ah_attr.grh.sgid_index    = cmd.alt_dest.sgid_index;
1955         attr->alt_ah_attr.grh.hop_limit     = cmd.alt_dest.hop_limit;
1956         attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
1957         attr->alt_ah_attr.dlid              = cmd.alt_dest.dlid;
1958         attr->alt_ah_attr.sl                = cmd.alt_dest.sl;
1959         attr->alt_ah_attr.src_path_bits     = cmd.alt_dest.src_path_bits;
1960         attr->alt_ah_attr.static_rate       = cmd.alt_dest.static_rate;
1961         attr->alt_ah_attr.ah_flags          = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
1962         attr->alt_ah_attr.port_num          = cmd.alt_dest.port_num;
1963
1964         if (qp->real_qp == qp) {
1965                 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
1966                 if (ret)
1967                         goto out;
1968                 ret = qp->device->modify_qp(qp, attr,
1969                         modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
1970         } else {
1971                 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
1972         }
1973
1974         put_qp_read(qp);
1975
1976         if (ret)
1977                 goto out;
1978
1979         ret = in_len;
1980
1981 out:
1982         kfree(attr);
1983
1984         return ret;
1985 }
1986
1987 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1988                              const char __user *buf, int in_len,
1989                              int out_len)
1990 {
1991         struct ib_uverbs_destroy_qp      cmd;
1992         struct ib_uverbs_destroy_qp_resp resp;
1993         struct ib_uobject               *uobj;
1994         struct ib_qp                    *qp;
1995         struct ib_uqp_object            *obj;
1996         int                              ret = -EINVAL;
1997
1998         if (copy_from_user(&cmd, buf, sizeof cmd))
1999                 return -EFAULT;
2000
2001         memset(&resp, 0, sizeof resp);
2002
2003         uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2004         if (!uobj)
2005                 return -EINVAL;
2006         qp  = uobj->object;
2007         obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2008
2009         if (!list_empty(&obj->mcast_list)) {
2010                 put_uobj_write(uobj);
2011                 return -EBUSY;
2012         }
2013
2014         ret = ib_destroy_qp(qp);
2015         if (!ret)
2016                 uobj->live = 0;
2017
2018         put_uobj_write(uobj);
2019
2020         if (ret)
2021                 return ret;
2022
2023         if (obj->uxrcd)
2024                 atomic_dec(&obj->uxrcd->refcnt);
2025
2026         idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2027
2028         mutex_lock(&file->mutex);
2029         list_del(&uobj->list);
2030         mutex_unlock(&file->mutex);
2031
2032         ib_uverbs_release_uevent(file, &obj->uevent);
2033
2034         resp.events_reported = obj->uevent.events_reported;
2035
2036         put_uobj(uobj);
2037
2038         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2039                          &resp, sizeof resp))
2040                 return -EFAULT;
2041
2042         return in_len;
2043 }
2044
2045 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2046                             const char __user *buf, int in_len,
2047                             int out_len)
2048 {
2049         struct ib_uverbs_post_send      cmd;
2050         struct ib_uverbs_post_send_resp resp;
2051         struct ib_uverbs_send_wr       *user_wr;
2052         struct ib_send_wr              *wr = NULL, *last, *next, *bad_wr;
2053         struct ib_qp                   *qp;
2054         int                             i, sg_ind;
2055         int                             is_ud;
2056         ssize_t                         ret = -EINVAL;
2057
2058         if (copy_from_user(&cmd, buf, sizeof cmd))
2059                 return -EFAULT;
2060
2061         if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2062             cmd.sge_count * sizeof (struct ib_uverbs_sge))
2063                 return -EINVAL;
2064
2065         if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2066                 return -EINVAL;
2067
2068         user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2069         if (!user_wr)
2070                 return -ENOMEM;
2071
2072         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2073         if (!qp)
2074                 goto out;
2075
2076         is_ud = qp->qp_type == IB_QPT_UD;
2077         sg_ind = 0;
2078         last = NULL;
2079         for (i = 0; i < cmd.wr_count; ++i) {
2080                 if (copy_from_user(user_wr,
2081                                    buf + sizeof cmd + i * cmd.wqe_size,
2082                                    cmd.wqe_size)) {
2083                         ret = -EFAULT;
2084                         goto out_put;
2085                 }
2086
2087                 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2088                         ret = -EINVAL;
2089                         goto out_put;
2090                 }
2091
2092                 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2093                                user_wr->num_sge * sizeof (struct ib_sge),
2094                                GFP_KERNEL);
2095                 if (!next) {
2096                         ret = -ENOMEM;
2097                         goto out_put;
2098                 }
2099
2100                 if (!last)
2101                         wr = next;
2102                 else
2103                         last->next = next;
2104                 last = next;
2105
2106                 next->next       = NULL;
2107                 next->wr_id      = user_wr->wr_id;
2108                 next->num_sge    = user_wr->num_sge;
2109                 next->opcode     = user_wr->opcode;
2110                 next->send_flags = user_wr->send_flags;
2111
2112                 if (is_ud) {
2113                         next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2114                                                      file->ucontext);
2115                         if (!next->wr.ud.ah) {
2116                                 ret = -EINVAL;
2117                                 goto out_put;
2118                         }
2119                         next->wr.ud.remote_qpn  = user_wr->wr.ud.remote_qpn;
2120                         next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
2121                         if (next->opcode == IB_WR_SEND_WITH_IMM)
2122                                 next->ex.imm_data =
2123                                         (__be32 __force) user_wr->ex.imm_data;
2124                 } else {
2125                         switch (next->opcode) {
2126                         case IB_WR_RDMA_WRITE_WITH_IMM:
2127                                 next->ex.imm_data =
2128                                         (__be32 __force) user_wr->ex.imm_data;
2129                         case IB_WR_RDMA_WRITE:
2130                         case IB_WR_RDMA_READ:
2131                                 next->wr.rdma.remote_addr =
2132                                         user_wr->wr.rdma.remote_addr;
2133                                 next->wr.rdma.rkey        =
2134                                         user_wr->wr.rdma.rkey;
2135                                 break;
2136                         case IB_WR_SEND_WITH_IMM:
2137                                 next->ex.imm_data =
2138                                         (__be32 __force) user_wr->ex.imm_data;
2139                                 break;
2140                         case IB_WR_SEND_WITH_INV:
2141                                 next->ex.invalidate_rkey =
2142                                         user_wr->ex.invalidate_rkey;
2143                                 break;
2144                         case IB_WR_ATOMIC_CMP_AND_SWP:
2145                         case IB_WR_ATOMIC_FETCH_AND_ADD:
2146                                 next->wr.atomic.remote_addr =
2147                                         user_wr->wr.atomic.remote_addr;
2148                                 next->wr.atomic.compare_add =
2149                                         user_wr->wr.atomic.compare_add;
2150                                 next->wr.atomic.swap = user_wr->wr.atomic.swap;
2151                                 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2152                                 break;
2153                         default:
2154                                 break;
2155                         }
2156                 }
2157
2158                 if (next->num_sge) {
2159                         next->sg_list = (void *) next +
2160                                 ALIGN(sizeof *next, sizeof (struct ib_sge));
2161                         if (copy_from_user(next->sg_list,
2162                                            buf + sizeof cmd +
2163                                            cmd.wr_count * cmd.wqe_size +
2164                                            sg_ind * sizeof (struct ib_sge),
2165                                            next->num_sge * sizeof (struct ib_sge))) {
2166                                 ret = -EFAULT;
2167                                 goto out_put;
2168                         }
2169                         sg_ind += next->num_sge;
2170                 } else
2171                         next->sg_list = NULL;
2172         }
2173
2174         resp.bad_wr = 0;
2175         ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2176         if (ret)
2177                 for (next = wr; next; next = next->next) {
2178                         ++resp.bad_wr;
2179                         if (next == bad_wr)
2180                                 break;
2181                 }
2182
2183         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2184                          &resp, sizeof resp))
2185                 ret = -EFAULT;
2186
2187 out_put:
2188         put_qp_read(qp);
2189
2190         while (wr) {
2191                 if (is_ud && wr->wr.ud.ah)
2192                         put_ah_read(wr->wr.ud.ah);
2193                 next = wr->next;
2194                 kfree(wr);
2195                 wr = next;
2196         }
2197
2198 out:
2199         kfree(user_wr);
2200
2201         return ret ? ret : in_len;
2202 }
2203
2204 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2205                                                     int in_len,
2206                                                     u32 wr_count,
2207                                                     u32 sge_count,
2208                                                     u32 wqe_size)
2209 {
2210         struct ib_uverbs_recv_wr *user_wr;
2211         struct ib_recv_wr        *wr = NULL, *last, *next;
2212         int                       sg_ind;
2213         int                       i;
2214         int                       ret;
2215
2216         if (in_len < wqe_size * wr_count +
2217             sge_count * sizeof (struct ib_uverbs_sge))
2218                 return ERR_PTR(-EINVAL);
2219
2220         if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2221                 return ERR_PTR(-EINVAL);
2222
2223         user_wr = kmalloc(wqe_size, GFP_KERNEL);
2224         if (!user_wr)
2225                 return ERR_PTR(-ENOMEM);
2226
2227         sg_ind = 0;
2228         last = NULL;
2229         for (i = 0; i < wr_count; ++i) {
2230                 if (copy_from_user(user_wr, buf + i * wqe_size,
2231                                    wqe_size)) {
2232                         ret = -EFAULT;
2233                         goto err;
2234                 }
2235
2236                 if (user_wr->num_sge + sg_ind > sge_count) {
2237                         ret = -EINVAL;
2238                         goto err;
2239                 }
2240
2241                 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2242                                user_wr->num_sge * sizeof (struct ib_sge),
2243                                GFP_KERNEL);
2244                 if (!next) {
2245                         ret = -ENOMEM;
2246                         goto err;
2247                 }
2248
2249                 if (!last)
2250                         wr = next;
2251                 else
2252                         last->next = next;
2253                 last = next;
2254
2255                 next->next       = NULL;
2256                 next->wr_id      = user_wr->wr_id;
2257                 next->num_sge    = user_wr->num_sge;
2258
2259                 if (next->num_sge) {
2260                         next->sg_list = (void *) next +
2261                                 ALIGN(sizeof *next, sizeof (struct ib_sge));
2262                         if (copy_from_user(next->sg_list,
2263                                            buf + wr_count * wqe_size +
2264                                            sg_ind * sizeof (struct ib_sge),
2265                                            next->num_sge * sizeof (struct ib_sge))) {
2266                                 ret = -EFAULT;
2267                                 goto err;
2268                         }
2269                         sg_ind += next->num_sge;
2270                 } else
2271                         next->sg_list = NULL;
2272         }
2273
2274         kfree(user_wr);
2275         return wr;
2276
2277 err:
2278         kfree(user_wr);
2279
2280         while (wr) {
2281                 next = wr->next;
2282                 kfree(wr);
2283                 wr = next;
2284         }
2285
2286         return ERR_PTR(ret);
2287 }
2288
2289 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2290                             const char __user *buf, int in_len,
2291                             int out_len)
2292 {
2293         struct ib_uverbs_post_recv      cmd;
2294         struct ib_uverbs_post_recv_resp resp;
2295         struct ib_recv_wr              *wr, *next, *bad_wr;
2296         struct ib_qp                   *qp;
2297         ssize_t                         ret = -EINVAL;
2298
2299         if (copy_from_user(&cmd, buf, sizeof cmd))
2300                 return -EFAULT;
2301
2302         wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2303                                        in_len - sizeof cmd, cmd.wr_count,
2304                                        cmd.sge_count, cmd.wqe_size);
2305         if (IS_ERR(wr))
2306                 return PTR_ERR(wr);
2307
2308         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2309         if (!qp)
2310                 goto out;
2311
2312         resp.bad_wr = 0;
2313         ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2314
2315         put_qp_read(qp);
2316
2317         if (ret)
2318                 for (next = wr; next; next = next->next) {
2319                         ++resp.bad_wr;
2320                         if (next == bad_wr)
2321                                 break;
2322                 }
2323
2324         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2325                          &resp, sizeof resp))
2326                 ret = -EFAULT;
2327
2328 out:
2329         while (wr) {
2330                 next = wr->next;
2331                 kfree(wr);
2332                 wr = next;
2333         }
2334
2335         return ret ? ret : in_len;
2336 }
2337
2338 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2339                                 const char __user *buf, int in_len,
2340                                 int out_len)
2341 {
2342         struct ib_uverbs_post_srq_recv      cmd;
2343         struct ib_uverbs_post_srq_recv_resp resp;
2344         struct ib_recv_wr                  *wr, *next, *bad_wr;
2345         struct ib_srq                      *srq;
2346         ssize_t                             ret = -EINVAL;
2347
2348         if (copy_from_user(&cmd, buf, sizeof cmd))
2349                 return -EFAULT;
2350
2351         wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2352                                        in_len - sizeof cmd, cmd.wr_count,
2353                                        cmd.sge_count, cmd.wqe_size);
2354         if (IS_ERR(wr))
2355                 return PTR_ERR(wr);
2356
2357         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2358         if (!srq)
2359                 goto out;
2360
2361         resp.bad_wr = 0;
2362         ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2363
2364         put_srq_read(srq);
2365
2366         if (ret)
2367                 for (next = wr; next; next = next->next) {
2368                         ++resp.bad_wr;
2369                         if (next == bad_wr)
2370                                 break;
2371                 }
2372
2373         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2374                          &resp, sizeof resp))
2375                 ret = -EFAULT;
2376
2377 out:
2378         while (wr) {
2379                 next = wr->next;
2380                 kfree(wr);
2381                 wr = next;
2382         }
2383
2384         return ret ? ret : in_len;
2385 }
2386
2387 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2388                             const char __user *buf, int in_len,
2389                             int out_len)
2390 {
2391         struct ib_uverbs_create_ah       cmd;
2392         struct ib_uverbs_create_ah_resp  resp;
2393         struct ib_uobject               *uobj;
2394         struct ib_pd                    *pd;
2395         struct ib_ah                    *ah;
2396         struct ib_ah_attr               attr;
2397         int ret;
2398
2399         if (out_len < sizeof resp)
2400                 return -ENOSPC;
2401
2402         if (copy_from_user(&cmd, buf, sizeof cmd))
2403                 return -EFAULT;
2404
2405         uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2406         if (!uobj)
2407                 return -ENOMEM;
2408
2409         init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2410         down_write(&uobj->mutex);
2411
2412         pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2413         if (!pd) {
2414                 ret = -EINVAL;
2415                 goto err;
2416         }
2417
2418         attr.dlid              = cmd.attr.dlid;
2419         attr.sl                = cmd.attr.sl;
2420         attr.src_path_bits     = cmd.attr.src_path_bits;
2421         attr.static_rate       = cmd.attr.static_rate;
2422         attr.ah_flags          = cmd.attr.is_global ? IB_AH_GRH : 0;
2423         attr.port_num          = cmd.attr.port_num;
2424         attr.grh.flow_label    = cmd.attr.grh.flow_label;
2425         attr.grh.sgid_index    = cmd.attr.grh.sgid_index;
2426         attr.grh.hop_limit     = cmd.attr.grh.hop_limit;
2427         attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2428         memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2429
2430         ah = ib_create_ah(pd, &attr);
2431         if (IS_ERR(ah)) {
2432                 ret = PTR_ERR(ah);
2433                 goto err_put;
2434         }
2435
2436         ah->uobject  = uobj;
2437         uobj->object = ah;
2438
2439         ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2440         if (ret)
2441                 goto err_destroy;
2442
2443         resp.ah_handle = uobj->id;
2444
2445         if (copy_to_user((void __user *) (unsigned long) cmd.response,
2446                          &resp, sizeof resp)) {
2447                 ret = -EFAULT;
2448                 goto err_copy;
2449         }
2450
2451         put_pd_read(pd);
2452
2453         mutex_lock(&file->mutex);
2454         list_add_tail(&uobj->list, &file->ucontext->ah_list);
2455         mutex_unlock(&file->mutex);
2456
2457         uobj->live = 1;
2458
2459         up_write(&uobj->mutex);
2460
2461         return in_len;
2462
2463 err_copy:
2464         idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2465
2466 err_destroy:
2467         ib_destroy_ah(ah);
2468
2469 err_put:
2470         put_pd_read(pd);
2471
2472 err:
2473         put_uobj_write(uobj);
2474         return ret;
2475 }
2476
2477 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2478                              const char __user *buf, int in_len, int out_len)
2479 {
2480         struct ib_uverbs_destroy_ah cmd;
2481         struct ib_ah               *ah;
2482         struct ib_uobject          *uobj;
2483         int                         ret;
2484
2485         if (copy_from_user(&cmd, buf, sizeof cmd))
2486                 return -EFAULT;
2487
2488         uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2489         if (!uobj)
2490                 return -EINVAL;
2491         ah = uobj->object;
2492
2493         ret = ib_destroy_ah(ah);
2494         if (!ret)
2495                 uobj->live = 0;
2496
2497         put_uobj_write(uobj);
2498
2499         if (ret)
2500                 return ret;
2501
2502         idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2503
2504         mutex_lock(&file->mutex);
2505         list_del(&uobj->list);
2506         mutex_unlock(&file->mutex);
2507
2508         put_uobj(uobj);
2509
2510         return in_len;
2511 }
2512
2513 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2514                                const char __user *buf, int in_len,
2515                                int out_len)
2516 {
2517         struct ib_uverbs_attach_mcast cmd;
2518         struct ib_qp                 *qp;
2519         struct ib_uqp_object         *obj;
2520         struct ib_uverbs_mcast_entry *mcast;
2521         int                           ret;
2522
2523         if (copy_from_user(&cmd, buf, sizeof cmd))
2524                 return -EFAULT;
2525
2526         qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2527         if (!qp)
2528                 return -EINVAL;
2529
2530         obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2531
2532         list_for_each_entry(mcast, &obj->mcast_list, list)
2533                 if (cmd.mlid == mcast->lid &&
2534                     !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2535                         ret = 0;
2536                         goto out_put;
2537                 }
2538
2539         mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2540         if (!mcast) {
2541                 ret = -ENOMEM;
2542                 goto out_put;
2543         }
2544
2545         mcast->lid = cmd.mlid;
2546         memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2547
2548         ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2549         if (!ret)
2550                 list_add_tail(&mcast->list, &obj->mcast_list);
2551         else
2552                 kfree(mcast);
2553
2554 out_put:
2555         put_qp_write(qp);
2556
2557         return ret ? ret : in_len;
2558 }
2559
2560 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2561                                const char __user *buf, int in_len,
2562                                int out_len)
2563 {
2564         struct ib_uverbs_detach_mcast cmd;
2565         struct ib_uqp_object         *obj;
2566         struct ib_qp                 *qp;
2567         struct ib_uverbs_mcast_entry *mcast;
2568         int                           ret = -EINVAL;
2569
2570         if (copy_from_user(&cmd, buf, sizeof cmd))
2571                 return -EFAULT;
2572
2573         qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2574         if (!qp)
2575                 return -EINVAL;
2576
2577         ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2578         if (ret)
2579                 goto out_put;
2580
2581         obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2582
2583         list_for_each_entry(mcast, &obj->mcast_list, list)
2584                 if (cmd.mlid == mcast->lid &&
2585                     !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2586                         list_del(&mcast->list);
2587                         kfree(mcast);
2588                         break;
2589                 }
2590
2591 out_put:
2592         put_qp_write(qp);
2593
2594         return ret ? ret : in_len;
2595 }
2596
2597 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2598                                 union ib_flow_spec *ib_spec)
2599 {
2600         if (kern_spec->reserved)
2601                 return -EINVAL;
2602
2603         ib_spec->type = kern_spec->type;
2604
2605         switch (ib_spec->type) {
2606         case IB_FLOW_SPEC_ETH:
2607                 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
2608                 if (ib_spec->eth.size != kern_spec->eth.size)
2609                         return -EINVAL;
2610                 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
2611                        sizeof(struct ib_flow_eth_filter));
2612                 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
2613                        sizeof(struct ib_flow_eth_filter));
2614                 break;
2615         case IB_FLOW_SPEC_IPV4:
2616                 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
2617                 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
2618                         return -EINVAL;
2619                 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
2620                        sizeof(struct ib_flow_ipv4_filter));
2621                 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
2622                        sizeof(struct ib_flow_ipv4_filter));
2623                 break;
2624         case IB_FLOW_SPEC_TCP:
2625         case IB_FLOW_SPEC_UDP:
2626                 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
2627                 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
2628                         return -EINVAL;
2629                 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
2630                        sizeof(struct ib_flow_tcp_udp_filter));
2631                 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
2632                        sizeof(struct ib_flow_tcp_udp_filter));
2633                 break;
2634         default:
2635                 return -EINVAL;
2636         }
2637         return 0;
2638 }
2639
2640 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2641                              struct ib_udata *ucore,
2642                              struct ib_udata *uhw)
2643 {
2644         struct ib_uverbs_create_flow      cmd;
2645         struct ib_uverbs_create_flow_resp resp;
2646         struct ib_uobject                 *uobj;
2647         struct ib_flow                    *flow_id;
2648         struct ib_uverbs_flow_attr        *kern_flow_attr;
2649         struct ib_flow_attr               *flow_attr;
2650         struct ib_qp                      *qp;
2651         int err = 0;
2652         void *kern_spec;
2653         void *ib_spec;
2654         int i;
2655
2656         if (ucore->inlen < sizeof(cmd))
2657                 return -EINVAL;
2658
2659         if (ucore->outlen < sizeof(resp))
2660                 return -ENOSPC;
2661
2662         err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2663         if (err)
2664                 return err;
2665
2666         ucore->inbuf += sizeof(cmd);
2667         ucore->inlen -= sizeof(cmd);
2668
2669         if (cmd.comp_mask)
2670                 return -EINVAL;
2671
2672         if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
2673              !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
2674                 return -EPERM;
2675
2676         if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
2677                 return -EINVAL;
2678
2679         if (cmd.flow_attr.size > ucore->inlen ||
2680             cmd.flow_attr.size >
2681             (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
2682                 return -EINVAL;
2683
2684         if (cmd.flow_attr.reserved[0] ||
2685             cmd.flow_attr.reserved[1])
2686                 return -EINVAL;
2687
2688         if (cmd.flow_attr.num_of_specs) {
2689                 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2690                                          GFP_KERNEL);
2691                 if (!kern_flow_attr)
2692                         return -ENOMEM;
2693
2694                 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
2695                 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
2696                                          cmd.flow_attr.size);
2697                 if (err)
2698                         goto err_free_attr;
2699         } else {
2700                 kern_flow_attr = &cmd.flow_attr;
2701         }
2702
2703         uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
2704         if (!uobj) {
2705                 err = -ENOMEM;
2706                 goto err_free_attr;
2707         }
2708         init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
2709         down_write(&uobj->mutex);
2710
2711         qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2712         if (!qp) {
2713                 err = -EINVAL;
2714                 goto err_uobj;
2715         }
2716
2717         flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
2718         if (!flow_attr) {
2719                 err = -ENOMEM;
2720                 goto err_put;
2721         }
2722
2723         flow_attr->type = kern_flow_attr->type;
2724         flow_attr->priority = kern_flow_attr->priority;
2725         flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
2726         flow_attr->port = kern_flow_attr->port;
2727         flow_attr->flags = kern_flow_attr->flags;
2728         flow_attr->size = sizeof(*flow_attr);
2729
2730         kern_spec = kern_flow_attr + 1;
2731         ib_spec = flow_attr + 1;
2732         for (i = 0; i < flow_attr->num_of_specs &&
2733              cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
2734              cmd.flow_attr.size >=
2735              ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
2736                 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
2737                 if (err)
2738                         goto err_free;
2739                 flow_attr->size +=
2740                         ((union ib_flow_spec *) ib_spec)->size;
2741                 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
2742                 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
2743                 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
2744         }
2745         if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
2746                 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2747                         i, cmd.flow_attr.size);
2748                 err = -EINVAL;
2749                 goto err_free;
2750         }
2751         flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
2752         if (IS_ERR(flow_id)) {
2753                 err = PTR_ERR(flow_id);
2754                 goto err_free;
2755         }
2756         flow_id->qp = qp;
2757         flow_id->uobject = uobj;
2758         uobj->object = flow_id;
2759
2760         err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
2761         if (err)
2762                 goto destroy_flow;
2763
2764         memset(&resp, 0, sizeof(resp));
2765         resp.flow_handle = uobj->id;
2766
2767         err = ib_copy_to_udata(ucore,
2768                                &resp, sizeof(resp));
2769         if (err)
2770                 goto err_copy;
2771
2772         put_qp_read(qp);
2773         mutex_lock(&file->mutex);
2774         list_add_tail(&uobj->list, &file->ucontext->rule_list);
2775         mutex_unlock(&file->mutex);
2776
2777         uobj->live = 1;
2778
2779         up_write(&uobj->mutex);
2780         kfree(flow_attr);
2781         if (cmd.flow_attr.num_of_specs)
2782                 kfree(kern_flow_attr);
2783         return 0;
2784 err_copy:
2785         idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2786 destroy_flow:
2787         ib_destroy_flow(flow_id);
2788 err_free:
2789         kfree(flow_attr);
2790 err_put:
2791         put_qp_read(qp);
2792 err_uobj:
2793         put_uobj_write(uobj);
2794 err_free_attr:
2795         if (cmd.flow_attr.num_of_specs)
2796                 kfree(kern_flow_attr);
2797         return err;
2798 }
2799
2800 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
2801                               struct ib_udata *ucore,
2802                               struct ib_udata *uhw)
2803 {
2804         struct ib_uverbs_destroy_flow   cmd;
2805         struct ib_flow                  *flow_id;
2806         struct ib_uobject               *uobj;
2807         int                             ret;
2808
2809         if (ucore->inlen < sizeof(cmd))
2810                 return -EINVAL;
2811
2812         ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2813         if (ret)
2814                 return ret;
2815
2816         if (cmd.comp_mask)
2817                 return -EINVAL;
2818
2819         uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
2820                               file->ucontext);
2821         if (!uobj)
2822                 return -EINVAL;
2823         flow_id = uobj->object;
2824
2825         ret = ib_destroy_flow(flow_id);
2826         if (!ret)
2827                 uobj->live = 0;
2828
2829         put_uobj_write(uobj);
2830
2831         idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2832
2833         mutex_lock(&file->mutex);
2834         list_del(&uobj->list);
2835         mutex_unlock(&file->mutex);
2836
2837         put_uobj(uobj);
2838
2839         return ret;
2840 }
2841
2842 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
2843                                 struct ib_uverbs_create_xsrq *cmd,
2844                                 struct ib_udata *udata)
2845 {
2846         struct ib_uverbs_create_srq_resp resp;
2847         struct ib_usrq_object           *obj;
2848         struct ib_pd                    *pd;
2849         struct ib_srq                   *srq;
2850         struct ib_uobject               *uninitialized_var(xrcd_uobj);
2851         struct ib_srq_init_attr          attr;
2852         int ret;
2853
2854         obj = kmalloc(sizeof *obj, GFP_KERNEL);
2855         if (!obj)
2856                 return -ENOMEM;
2857
2858         init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
2859         down_write(&obj->uevent.uobject.mutex);
2860
2861         if (cmd->srq_type == IB_SRQT_XRC) {
2862                 attr.ext.xrc.xrcd  = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
2863                 if (!attr.ext.xrc.xrcd) {
2864                         ret = -EINVAL;
2865                         goto err;
2866                 }
2867
2868                 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2869                 atomic_inc(&obj->uxrcd->refcnt);
2870
2871                 attr.ext.xrc.cq  = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
2872                 if (!attr.ext.xrc.cq) {
2873                         ret = -EINVAL;
2874                         goto err_put_xrcd;
2875                 }
2876         }
2877
2878         pd  = idr_read_pd(cmd->pd_handle, file->ucontext);
2879         if (!pd) {
2880                 ret = -EINVAL;
2881                 goto err_put_cq;
2882         }
2883
2884         attr.event_handler  = ib_uverbs_srq_event_handler;
2885         attr.srq_context    = file;
2886         attr.srq_type       = cmd->srq_type;
2887         attr.attr.max_wr    = cmd->max_wr;
2888         attr.attr.max_sge   = cmd->max_sge;
2889         attr.attr.srq_limit = cmd->srq_limit;
2890
2891         obj->uevent.events_reported = 0;
2892         INIT_LIST_HEAD(&obj->uevent.event_list);
2893
2894         srq = pd->device->create_srq(pd, &attr, udata);
2895         if (IS_ERR(srq)) {
2896                 ret = PTR_ERR(srq);
2897                 goto err_put;
2898         }
2899
2900         srq->device        = pd->device;
2901         srq->pd            = pd;
2902         srq->srq_type      = cmd->srq_type;
2903         srq->uobject       = &obj->uevent.uobject;
2904         srq->event_handler = attr.event_handler;
2905         srq->srq_context   = attr.srq_context;
2906
2907         if (cmd->srq_type == IB_SRQT_XRC) {
2908                 srq->ext.xrc.cq   = attr.ext.xrc.cq;
2909                 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
2910                 atomic_inc(&attr.ext.xrc.cq->usecnt);
2911                 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
2912         }
2913
2914         atomic_inc(&pd->usecnt);
2915         atomic_set(&srq->usecnt, 0);
2916
2917         obj->uevent.uobject.object = srq;
2918         ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
2919         if (ret)
2920                 goto err_destroy;
2921
2922         memset(&resp, 0, sizeof resp);
2923         resp.srq_handle = obj->uevent.uobject.id;
2924         resp.max_wr     = attr.attr.max_wr;
2925         resp.max_sge    = attr.attr.max_sge;
2926         if (cmd->srq_type == IB_SRQT_XRC)
2927                 resp.srqn = srq->ext.xrc.srq_num;
2928
2929         if (copy_to_user((void __user *) (unsigned long) cmd->response,
2930                          &resp, sizeof resp)) {
2931                 ret = -EFAULT;
2932                 goto err_copy;
2933         }
2934
2935         if (cmd->srq_type == IB_SRQT_XRC) {
2936                 put_uobj_read(xrcd_uobj);
2937                 put_cq_read(attr.ext.xrc.cq);
2938         }
2939         put_pd_read(pd);
2940
2941         mutex_lock(&file->mutex);
2942         list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
2943         mutex_unlock(&file->mutex);
2944
2945         obj->uevent.uobject.live = 1;
2946
2947         up_write(&obj->uevent.uobject.mutex);
2948
2949         return 0;
2950
2951 err_copy:
2952         idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
2953
2954 err_destroy:
2955         ib_destroy_srq(srq);
2956
2957 err_put:
2958         put_pd_read(pd);
2959
2960 err_put_cq:
2961         if (cmd->srq_type == IB_SRQT_XRC)
2962                 put_cq_read(attr.ext.xrc.cq);
2963
2964 err_put_xrcd:
2965         if (cmd->srq_type == IB_SRQT_XRC) {
2966                 atomic_dec(&obj->uxrcd->refcnt);
2967                 put_uobj_read(xrcd_uobj);
2968         }
2969
2970 err:
2971         put_uobj_write(&obj->uevent.uobject);
2972         return ret;
2973 }
2974
2975 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
2976                              const char __user *buf, int in_len,
2977                              int out_len)
2978 {
2979         struct ib_uverbs_create_srq      cmd;
2980         struct ib_uverbs_create_xsrq     xcmd;
2981         struct ib_uverbs_create_srq_resp resp;
2982         struct ib_udata                  udata;
2983         int ret;
2984
2985         if (out_len < sizeof resp)
2986                 return -ENOSPC;
2987
2988         if (copy_from_user(&cmd, buf, sizeof cmd))
2989                 return -EFAULT;
2990
2991         xcmd.response    = cmd.response;
2992         xcmd.user_handle = cmd.user_handle;
2993         xcmd.srq_type    = IB_SRQT_BASIC;
2994         xcmd.pd_handle   = cmd.pd_handle;
2995         xcmd.max_wr      = cmd.max_wr;
2996         xcmd.max_sge     = cmd.max_sge;
2997         xcmd.srq_limit   = cmd.srq_limit;
2998
2999         INIT_UDATA(&udata, buf + sizeof cmd,
3000                    (unsigned long) cmd.response + sizeof resp,
3001                    in_len - sizeof cmd, out_len - sizeof resp);
3002
3003         ret = __uverbs_create_xsrq(file, &xcmd, &udata);
3004         if (ret)
3005                 return ret;
3006
3007         return in_len;
3008 }
3009
3010 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3011                               const char __user *buf, int in_len, int out_len)
3012 {
3013         struct ib_uverbs_create_xsrq     cmd;
3014         struct ib_uverbs_create_srq_resp resp;
3015         struct ib_udata                  udata;
3016         int ret;
3017
3018         if (out_len < sizeof resp)
3019                 return -ENOSPC;
3020
3021         if (copy_from_user(&cmd, buf, sizeof cmd))
3022                 return -EFAULT;
3023
3024         INIT_UDATA(&udata, buf + sizeof cmd,
3025                    (unsigned long) cmd.response + sizeof resp,
3026                    in_len - sizeof cmd, out_len - sizeof resp);
3027
3028         ret = __uverbs_create_xsrq(file, &cmd, &udata);
3029         if (ret)
3030                 return ret;
3031
3032         return in_len;
3033 }
3034
3035 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3036                              const char __user *buf, int in_len,
3037                              int out_len)
3038 {
3039         struct ib_uverbs_modify_srq cmd;
3040         struct ib_udata             udata;
3041         struct ib_srq              *srq;
3042         struct ib_srq_attr          attr;
3043         int                         ret;
3044
3045         if (copy_from_user(&cmd, buf, sizeof cmd))
3046                 return -EFAULT;
3047
3048         INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3049                    out_len);
3050
3051         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3052         if (!srq)
3053                 return -EINVAL;
3054
3055         attr.max_wr    = cmd.max_wr;
3056         attr.srq_limit = cmd.srq_limit;
3057
3058         ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3059
3060         put_srq_read(srq);
3061
3062         return ret ? ret : in_len;
3063 }
3064
3065 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3066                             const char __user *buf,
3067                             int in_len, int out_len)
3068 {
3069         struct ib_uverbs_query_srq      cmd;
3070         struct ib_uverbs_query_srq_resp resp;
3071         struct ib_srq_attr              attr;
3072         struct ib_srq                   *srq;
3073         int                             ret;
3074
3075         if (out_len < sizeof resp)
3076                 return -ENOSPC;
3077
3078         if (copy_from_user(&cmd, buf, sizeof cmd))
3079                 return -EFAULT;
3080
3081         srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3082         if (!srq)
3083                 return -EINVAL;
3084
3085         ret = ib_query_srq(srq, &attr);
3086
3087         put_srq_read(srq);
3088
3089         if (ret)
3090                 return ret;
3091
3092         memset(&resp, 0, sizeof resp);
3093
3094         resp.max_wr    = attr.max_wr;
3095         resp.max_sge   = attr.max_sge;
3096         resp.srq_limit = attr.srq_limit;
3097
3098         if (copy_to_user((void __user *) (unsigned long) cmd.response,
3099                          &resp, sizeof resp))
3100                 return -EFAULT;
3101
3102         return in_len;
3103 }
3104
3105 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3106                               const char __user *buf, int in_len,
3107                               int out_len)
3108 {
3109         struct ib_uverbs_destroy_srq      cmd;
3110         struct ib_uverbs_destroy_srq_resp resp;
3111         struct ib_uobject                *uobj;
3112         struct ib_srq                    *srq;
3113         struct ib_uevent_object          *obj;
3114         int                               ret = -EINVAL;
3115         struct ib_usrq_object            *us;
3116         enum ib_srq_type                  srq_type;
3117
3118         if (copy_from_user(&cmd, buf, sizeof cmd))
3119                 return -EFAULT;
3120
3121         uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3122         if (!uobj)
3123                 return -EINVAL;
3124         srq = uobj->object;
3125         obj = container_of(uobj, struct ib_uevent_object, uobject);
3126         srq_type = srq->srq_type;
3127
3128         ret = ib_destroy_srq(srq);
3129         if (!ret)
3130                 uobj->live = 0;
3131
3132         put_uobj_write(uobj);
3133
3134         if (ret)
3135                 return ret;
3136
3137         if (srq_type == IB_SRQT_XRC) {
3138                 us = container_of(obj, struct ib_usrq_object, uevent);
3139                 atomic_dec(&us->uxrcd->refcnt);
3140         }
3141
3142         idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3143
3144         mutex_lock(&file->mutex);
3145         list_del(&uobj->list);
3146         mutex_unlock(&file->mutex);
3147
3148         ib_uverbs_release_uevent(file, obj);
3149
3150         memset(&resp, 0, sizeof resp);
3151         resp.events_reported = obj->events_reported;
3152
3153         put_uobj(uobj);
3154
3155         if (copy_to_user((void __user *) (unsigned long) cmd.response,
3156                          &resp, sizeof resp))
3157                 ret = -EFAULT;
3158
3159         return ret ? ret : in_len;
3160 }