drm/nouveau: fence: fix undefined fence state after emit
[platform/kernel/linux-rpi.git] / drivers / accel / qaic / qaic_control.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
5
6 #include <asm/byteorder.h>
7 #include <linux/completion.h>
8 #include <linux/crc32.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/kref.h>
12 #include <linux/list.h>
13 #include <linux/mhi.h>
14 #include <linux/mm.h>
15 #include <linux/moduleparam.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/scatterlist.h>
19 #include <linux/types.h>
20 #include <linux/uaccess.h>
21 #include <linux/workqueue.h>
22 #include <linux/wait.h>
23 #include <drm/drm_device.h>
24 #include <drm/drm_file.h>
25 #include <uapi/drm/qaic_accel.h>
26
27 #include "qaic.h"
28
29 #define MANAGE_MAGIC_NUMBER             ((__force __le32)0x43494151) /* "QAIC" in little endian */
30 #define QAIC_DBC_Q_GAP                  SZ_256
31 #define QAIC_DBC_Q_BUF_ALIGN            SZ_4K
32 #define QAIC_MANAGE_EXT_MSG_LENGTH      SZ_64K /* Max DMA message length */
33 #define QAIC_WRAPPER_MAX_SIZE           SZ_4K
34 #define QAIC_MHI_RETRY_WAIT_MS          100
35 #define QAIC_MHI_RETRY_MAX              20
36
37 static unsigned int control_resp_timeout_s = 60; /* 60 sec default */
38 module_param(control_resp_timeout_s, uint, 0600);
39 MODULE_PARM_DESC(control_resp_timeout_s, "Timeout for NNC responses from QSM");
40
41 struct manage_msg {
42         u32 len;
43         u32 count;
44         u8 data[];
45 };
46
47 /*
48  * wire encoding structures for the manage protocol.
49  * All fields are little endian on the wire
50  */
51 struct wire_msg_hdr {
52         __le32 crc32; /* crc of everything following this field in the message */
53         __le32 magic_number;
54         __le32 sequence_number;
55         __le32 len; /* length of this message */
56         __le32 count; /* number of transactions in this message */
57         __le32 handle; /* unique id to track the resources consumed */
58         __le32 partition_id; /* partition id for the request (signed) */
59         __le32 padding; /* must be 0 */
60 } __packed;
61
62 struct wire_msg {
63         struct wire_msg_hdr hdr;
64         u8 data[];
65 } __packed;
66
67 struct wire_trans_hdr {
68         __le32 type;
69         __le32 len;
70 } __packed;
71
72 /* Each message sent from driver to device are organized in a list of wrapper_msg */
73 struct wrapper_msg {
74         struct list_head list;
75         struct kref ref_count;
76         u32 len; /* length of data to transfer */
77         struct wrapper_list *head;
78         union {
79                 struct wire_msg msg;
80                 struct wire_trans_hdr trans;
81         };
82 };
83
84 struct wrapper_list {
85         struct list_head list;
86         spinlock_t lock; /* Protects the list state during additions and removals */
87 };
88
89 struct wire_trans_passthrough {
90         struct wire_trans_hdr hdr;
91         u8 data[];
92 } __packed;
93
94 struct wire_addr_size_pair {
95         __le64 addr;
96         __le64 size;
97 } __packed;
98
99 struct wire_trans_dma_xfer {
100         struct wire_trans_hdr hdr;
101         __le32 tag;
102         __le32 count;
103         __le32 dma_chunk_id;
104         __le32 padding;
105         struct wire_addr_size_pair data[];
106 } __packed;
107
108 /* Initiated by device to continue the DMA xfer of a large piece of data */
109 struct wire_trans_dma_xfer_cont {
110         struct wire_trans_hdr hdr;
111         __le32 dma_chunk_id;
112         __le32 padding;
113         __le64 xferred_size;
114 } __packed;
115
116 struct wire_trans_activate_to_dev {
117         struct wire_trans_hdr hdr;
118         __le64 req_q_addr;
119         __le64 rsp_q_addr;
120         __le32 req_q_size;
121         __le32 rsp_q_size;
122         __le32 buf_len;
123         __le32 options; /* unused, but BIT(16) has meaning to the device */
124 } __packed;
125
126 struct wire_trans_activate_from_dev {
127         struct wire_trans_hdr hdr;
128         __le32 status;
129         __le32 dbc_id;
130         __le64 options; /* unused */
131 } __packed;
132
133 struct wire_trans_deactivate_from_dev {
134         struct wire_trans_hdr hdr;
135         __le32 status;
136         __le32 dbc_id;
137 } __packed;
138
139 struct wire_trans_terminate_to_dev {
140         struct wire_trans_hdr hdr;
141         __le32 handle;
142         __le32 padding;
143 } __packed;
144
145 struct wire_trans_terminate_from_dev {
146         struct wire_trans_hdr hdr;
147         __le32 status;
148         __le32 padding;
149 } __packed;
150
151 struct wire_trans_status_to_dev {
152         struct wire_trans_hdr hdr;
153 } __packed;
154
155 struct wire_trans_status_from_dev {
156         struct wire_trans_hdr hdr;
157         __le16 major;
158         __le16 minor;
159         __le32 status;
160         __le64 status_flags;
161 } __packed;
162
163 struct wire_trans_validate_part_to_dev {
164         struct wire_trans_hdr hdr;
165         __le32 part_id;
166         __le32 padding;
167 } __packed;
168
169 struct wire_trans_validate_part_from_dev {
170         struct wire_trans_hdr hdr;
171         __le32 status;
172         __le32 padding;
173 } __packed;
174
175 struct xfer_queue_elem {
176         /*
177          * Node in list of ongoing transfer request on control channel.
178          * Maintained by root device struct.
179          */
180         struct list_head list;
181         /* Sequence number of this transfer request */
182         u32 seq_num;
183         /* This is used to wait on until completion of transfer request */
184         struct completion xfer_done;
185         /* Received data from device */
186         void *buf;
187 };
188
189 struct dma_xfer {
190         /* Node in list of DMA transfers which is used for cleanup */
191         struct list_head list;
192         /* SG table of memory used for DMA */
193         struct sg_table *sgt;
194         /* Array pages used for DMA */
195         struct page **page_list;
196         /* Number of pages used for DMA */
197         unsigned long nr_pages;
198 };
199
200 struct ioctl_resources {
201         /* List of all DMA transfers which is used later for cleanup */
202         struct list_head dma_xfers;
203         /* Base address of request queue which belongs to a DBC */
204         void *buf;
205         /*
206          * Base bus address of request queue which belongs to a DBC. Response
207          * queue base bus address can be calculated by adding size of request
208          * queue to base bus address of request queue.
209          */
210         dma_addr_t dma_addr;
211         /* Total size of request queue and response queue in byte */
212         u32 total_size;
213         /* Total number of elements that can be queued in each of request and response queue */
214         u32 nelem;
215         /* Base address of response queue which belongs to a DBC */
216         void *rsp_q_base;
217         /* Status of the NNC message received */
218         u32 status;
219         /* DBC id of the DBC received from device */
220         u32 dbc_id;
221         /*
222          * DMA transfer request messages can be big in size and it may not be
223          * possible to send them in one shot. In such cases the messages are
224          * broken into chunks, this field stores ID of such chunks.
225          */
226         u32 dma_chunk_id;
227         /* Total number of bytes transferred for a DMA xfer request */
228         u64 xferred_dma_size;
229         /* Header of transaction message received from user. Used during DMA xfer request. */
230         void *trans_hdr;
231 };
232
233 struct resp_work {
234         struct work_struct work;
235         struct qaic_device *qdev;
236         void *buf;
237 };
238
239 /*
240  * Since we're working with little endian messages, its useful to be able to
241  * increment without filling a whole line with conversions back and forth just
242  * to add one(1) to a message count.
243  */
244 static __le32 incr_le32(__le32 val)
245 {
246         return cpu_to_le32(le32_to_cpu(val) + 1);
247 }
248
249 static u32 gen_crc(void *msg)
250 {
251         struct wrapper_list *wrappers = msg;
252         struct wrapper_msg *w;
253         u32 crc = ~0;
254
255         list_for_each_entry(w, &wrappers->list, list)
256                 crc = crc32(crc, &w->msg, w->len);
257
258         return crc ^ ~0;
259 }
260
261 static u32 gen_crc_stub(void *msg)
262 {
263         return 0;
264 }
265
266 static bool valid_crc(void *msg)
267 {
268         struct wire_msg_hdr *hdr = msg;
269         bool ret;
270         u32 crc;
271
272         /*
273          * The output of this algorithm is always converted to the native
274          * endianness.
275          */
276         crc = le32_to_cpu(hdr->crc32);
277         hdr->crc32 = 0;
278         ret = (crc32(~0, msg, le32_to_cpu(hdr->len)) ^ ~0) == crc;
279         hdr->crc32 = cpu_to_le32(crc);
280         return ret;
281 }
282
283 static bool valid_crc_stub(void *msg)
284 {
285         return true;
286 }
287
288 static void free_wrapper(struct kref *ref)
289 {
290         struct wrapper_msg *wrapper = container_of(ref, struct wrapper_msg, ref_count);
291
292         list_del(&wrapper->list);
293         kfree(wrapper);
294 }
295
296 static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources,
297                          struct qaic_user *usr)
298 {
299         u32 dbc_id = resources->dbc_id;
300
301         if (resources->buf) {
302                 wait_event_interruptible(qdev->dbc[dbc_id].dbc_release, !qdev->dbc[dbc_id].in_use);
303                 qdev->dbc[dbc_id].req_q_base = resources->buf;
304                 qdev->dbc[dbc_id].rsp_q_base = resources->rsp_q_base;
305                 qdev->dbc[dbc_id].dma_addr = resources->dma_addr;
306                 qdev->dbc[dbc_id].total_size = resources->total_size;
307                 qdev->dbc[dbc_id].nelem = resources->nelem;
308                 enable_dbc(qdev, dbc_id, usr);
309                 qdev->dbc[dbc_id].in_use = true;
310                 resources->buf = NULL;
311         }
312 }
313
314 static void free_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources)
315 {
316         if (resources->buf)
317                 dma_free_coherent(&qdev->pdev->dev, resources->total_size, resources->buf,
318                                   resources->dma_addr);
319         resources->buf = NULL;
320 }
321
322 static void free_dma_xfers(struct qaic_device *qdev, struct ioctl_resources *resources)
323 {
324         struct dma_xfer *xfer;
325         struct dma_xfer *x;
326         int i;
327
328         list_for_each_entry_safe(xfer, x, &resources->dma_xfers, list) {
329                 dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0);
330                 sg_free_table(xfer->sgt);
331                 kfree(xfer->sgt);
332                 for (i = 0; i < xfer->nr_pages; ++i)
333                         put_page(xfer->page_list[i]);
334                 kfree(xfer->page_list);
335                 list_del(&xfer->list);
336                 kfree(xfer);
337         }
338 }
339
340 static struct wrapper_msg *add_wrapper(struct wrapper_list *wrappers, u32 size)
341 {
342         struct wrapper_msg *w = kzalloc(size, GFP_KERNEL);
343
344         if (!w)
345                 return NULL;
346         list_add_tail(&w->list, &wrappers->list);
347         kref_init(&w->ref_count);
348         w->head = wrappers;
349         return w;
350 }
351
352 static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
353                               u32 *user_len)
354 {
355         struct qaic_manage_trans_passthrough *in_trans = trans;
356         struct wire_trans_passthrough *out_trans;
357         struct wrapper_msg *trans_wrapper;
358         struct wrapper_msg *wrapper;
359         struct wire_msg *msg;
360         u32 msg_hdr_len;
361
362         wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
363         msg = &wrapper->msg;
364         msg_hdr_len = le32_to_cpu(msg->hdr.len);
365
366         if (in_trans->hdr.len % 8 != 0)
367                 return -EINVAL;
368
369         if (msg_hdr_len + in_trans->hdr.len > QAIC_MANAGE_EXT_MSG_LENGTH)
370                 return -ENOSPC;
371
372         trans_wrapper = add_wrapper(wrappers,
373                                     offsetof(struct wrapper_msg, trans) + in_trans->hdr.len);
374         if (!trans_wrapper)
375                 return -ENOMEM;
376         trans_wrapper->len = in_trans->hdr.len;
377         out_trans = (struct wire_trans_passthrough *)&trans_wrapper->trans;
378
379         memcpy(out_trans->data, in_trans->data, in_trans->hdr.len - sizeof(in_trans->hdr));
380         msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len);
381         msg->hdr.count = incr_le32(msg->hdr.count);
382         *user_len += in_trans->hdr.len;
383         out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_PASSTHROUGH_TO_DEV);
384         out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len);
385
386         return 0;
387 }
388
389 /* returns error code for failure, 0 if enough pages alloc'd, 1 if dma_cont is needed */
390 static int find_and_map_user_pages(struct qaic_device *qdev,
391                                    struct qaic_manage_trans_dma_xfer *in_trans,
392                                    struct ioctl_resources *resources, struct dma_xfer *xfer)
393 {
394         unsigned long need_pages;
395         struct page **page_list;
396         unsigned long nr_pages;
397         struct sg_table *sgt;
398         u64 xfer_start_addr;
399         int ret;
400         int i;
401
402         xfer_start_addr = in_trans->addr + resources->xferred_dma_size;
403
404         need_pages = DIV_ROUND_UP(in_trans->size + offset_in_page(xfer_start_addr) -
405                                   resources->xferred_dma_size, PAGE_SIZE);
406
407         nr_pages = need_pages;
408
409         while (1) {
410                 page_list = kmalloc_array(nr_pages, sizeof(*page_list), GFP_KERNEL | __GFP_NOWARN);
411                 if (!page_list) {
412                         nr_pages = nr_pages / 2;
413                         if (!nr_pages)
414                                 return -ENOMEM;
415                 } else {
416                         break;
417                 }
418         }
419
420         ret = get_user_pages_fast(xfer_start_addr, nr_pages, 0, page_list);
421         if (ret < 0 || ret != nr_pages) {
422                 ret = -EFAULT;
423                 goto free_page_list;
424         }
425
426         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
427         if (!sgt) {
428                 ret = -ENOMEM;
429                 goto put_pages;
430         }
431
432         ret = sg_alloc_table_from_pages(sgt, page_list, nr_pages,
433                                         offset_in_page(xfer_start_addr),
434                                         in_trans->size - resources->xferred_dma_size, GFP_KERNEL);
435         if (ret) {
436                 ret = -ENOMEM;
437                 goto free_sgt;
438         }
439
440         ret = dma_map_sgtable(&qdev->pdev->dev, sgt, DMA_TO_DEVICE, 0);
441         if (ret)
442                 goto free_table;
443
444         xfer->sgt = sgt;
445         xfer->page_list = page_list;
446         xfer->nr_pages = nr_pages;
447
448         return need_pages > nr_pages ? 1 : 0;
449
450 free_table:
451         sg_free_table(sgt);
452 free_sgt:
453         kfree(sgt);
454 put_pages:
455         for (i = 0; i < nr_pages; ++i)
456                 put_page(page_list[i]);
457 free_page_list:
458         kfree(page_list);
459         return ret;
460 }
461
462 /* returns error code for failure, 0 if everything was encoded, 1 if dma_cont is needed */
463 static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wrappers,
464                                   struct ioctl_resources *resources, u32 msg_hdr_len, u32 *size,
465                                   struct wire_trans_dma_xfer **out_trans)
466 {
467         struct wrapper_msg *trans_wrapper;
468         struct sg_table *sgt = xfer->sgt;
469         struct wire_addr_size_pair *asp;
470         struct scatterlist *sg;
471         struct wrapper_msg *w;
472         unsigned int dma_len;
473         u64 dma_chunk_len;
474         void *boundary;
475         int nents_dma;
476         int nents;
477         int i;
478
479         nents = sgt->nents;
480         nents_dma = nents;
481         *size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
482         for_each_sgtable_sg(sgt, sg, i) {
483                 *size -= sizeof(*asp);
484                 /* Save 1K for possible follow-up transactions. */
485                 if (*size < SZ_1K) {
486                         nents_dma = i;
487                         break;
488                 }
489         }
490
491         trans_wrapper = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE);
492         if (!trans_wrapper)
493                 return -ENOMEM;
494         *out_trans = (struct wire_trans_dma_xfer *)&trans_wrapper->trans;
495
496         asp = (*out_trans)->data;
497         boundary = (void *)trans_wrapper + QAIC_WRAPPER_MAX_SIZE;
498         *size = 0;
499
500         dma_len = 0;
501         w = trans_wrapper;
502         dma_chunk_len = 0;
503         for_each_sg(sgt->sgl, sg, nents_dma, i) {
504                 asp->size = cpu_to_le64(dma_len);
505                 dma_chunk_len += dma_len;
506                 if (dma_len) {
507                         asp++;
508                         if ((void *)asp + sizeof(*asp) > boundary) {
509                                 w->len = (void *)asp - (void *)&w->msg;
510                                 *size += w->len;
511                                 w = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE);
512                                 if (!w)
513                                         return -ENOMEM;
514                                 boundary = (void *)w + QAIC_WRAPPER_MAX_SIZE;
515                                 asp = (struct wire_addr_size_pair *)&w->msg;
516                         }
517                 }
518                 asp->addr = cpu_to_le64(sg_dma_address(sg));
519                 dma_len = sg_dma_len(sg);
520         }
521         /* finalize the last segment */
522         asp->size = cpu_to_le64(dma_len);
523         w->len = (void *)asp + sizeof(*asp) - (void *)&w->msg;
524         *size += w->len;
525         dma_chunk_len += dma_len;
526         resources->xferred_dma_size += dma_chunk_len;
527
528         return nents_dma < nents ? 1 : 0;
529 }
530
531 static void cleanup_xfer(struct qaic_device *qdev, struct dma_xfer *xfer)
532 {
533         int i;
534
535         dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0);
536         sg_free_table(xfer->sgt);
537         kfree(xfer->sgt);
538         for (i = 0; i < xfer->nr_pages; ++i)
539                 put_page(xfer->page_list[i]);
540         kfree(xfer->page_list);
541 }
542
543 static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
544                       u32 *user_len, struct ioctl_resources *resources, struct qaic_user *usr)
545 {
546         struct qaic_manage_trans_dma_xfer *in_trans = trans;
547         struct wire_trans_dma_xfer *out_trans;
548         struct wrapper_msg *wrapper;
549         struct dma_xfer *xfer;
550         struct wire_msg *msg;
551         bool need_cont_dma;
552         u32 msg_hdr_len;
553         u32 size;
554         int ret;
555
556         wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
557         msg = &wrapper->msg;
558         msg_hdr_len = le32_to_cpu(msg->hdr.len);
559
560         if (msg_hdr_len > (UINT_MAX - QAIC_MANAGE_EXT_MSG_LENGTH))
561                 return -EINVAL;
562
563         /* There should be enough space to hold at least one ASP entry. */
564         if (msg_hdr_len + sizeof(*out_trans) + sizeof(struct wire_addr_size_pair) >
565             QAIC_MANAGE_EXT_MSG_LENGTH)
566                 return -ENOMEM;
567
568         if (in_trans->addr + in_trans->size < in_trans->addr || !in_trans->size)
569                 return -EINVAL;
570
571         xfer = kmalloc(sizeof(*xfer), GFP_KERNEL);
572         if (!xfer)
573                 return -ENOMEM;
574
575         ret = find_and_map_user_pages(qdev, in_trans, resources, xfer);
576         if (ret < 0)
577                 goto free_xfer;
578
579         need_cont_dma = (bool)ret;
580
581         ret = encode_addr_size_pairs(xfer, wrappers, resources, msg_hdr_len, &size, &out_trans);
582         if (ret < 0)
583                 goto cleanup_xfer;
584
585         need_cont_dma = need_cont_dma || (bool)ret;
586
587         msg->hdr.len = cpu_to_le32(msg_hdr_len + size);
588         msg->hdr.count = incr_le32(msg->hdr.count);
589
590         out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV);
591         out_trans->hdr.len = cpu_to_le32(size);
592         out_trans->tag = cpu_to_le32(in_trans->tag);
593         out_trans->count = cpu_to_le32((size - sizeof(*out_trans)) /
594                                                                 sizeof(struct wire_addr_size_pair));
595
596         *user_len += in_trans->hdr.len;
597
598         if (resources->dma_chunk_id) {
599                 out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id);
600         } else if (need_cont_dma) {
601                 while (resources->dma_chunk_id == 0)
602                         resources->dma_chunk_id = atomic_inc_return(&usr->chunk_id);
603
604                 out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id);
605         }
606         resources->trans_hdr = trans;
607
608         list_add(&xfer->list, &resources->dma_xfers);
609         return 0;
610
611 cleanup_xfer:
612         cleanup_xfer(qdev, xfer);
613 free_xfer:
614         kfree(xfer);
615         return ret;
616 }
617
618 static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
619                            u32 *user_len, struct ioctl_resources *resources)
620 {
621         struct qaic_manage_trans_activate_to_dev *in_trans = trans;
622         struct wire_trans_activate_to_dev *out_trans;
623         struct wrapper_msg *trans_wrapper;
624         struct wrapper_msg *wrapper;
625         struct wire_msg *msg;
626         dma_addr_t dma_addr;
627         u32 msg_hdr_len;
628         void *buf;
629         u32 nelem;
630         u32 size;
631         int ret;
632
633         wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
634         msg = &wrapper->msg;
635         msg_hdr_len = le32_to_cpu(msg->hdr.len);
636
637         if (msg_hdr_len + sizeof(*out_trans) > QAIC_MANAGE_MAX_MSG_LENGTH)
638                 return -ENOSPC;
639
640         if (!in_trans->queue_size)
641                 return -EINVAL;
642
643         if (in_trans->pad)
644                 return -EINVAL;
645
646         nelem = in_trans->queue_size;
647         size = (get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) * nelem;
648         if (size / nelem != get_dbc_req_elem_size() + get_dbc_rsp_elem_size())
649                 return -EINVAL;
650
651         if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size)
652                 return -EINVAL;
653
654         size = ALIGN((size + QAIC_DBC_Q_GAP), QAIC_DBC_Q_BUF_ALIGN);
655
656         buf = dma_alloc_coherent(&qdev->pdev->dev, size, &dma_addr, GFP_KERNEL);
657         if (!buf)
658                 return -ENOMEM;
659
660         trans_wrapper = add_wrapper(wrappers,
661                                     offsetof(struct wrapper_msg, trans) + sizeof(*out_trans));
662         if (!trans_wrapper) {
663                 ret = -ENOMEM;
664                 goto free_dma;
665         }
666         trans_wrapper->len = sizeof(*out_trans);
667         out_trans = (struct wire_trans_activate_to_dev *)&trans_wrapper->trans;
668
669         out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_ACTIVATE_TO_DEV);
670         out_trans->hdr.len = cpu_to_le32(sizeof(*out_trans));
671         out_trans->buf_len = cpu_to_le32(size);
672         out_trans->req_q_addr = cpu_to_le64(dma_addr);
673         out_trans->req_q_size = cpu_to_le32(nelem);
674         out_trans->rsp_q_addr = cpu_to_le64(dma_addr + size - nelem * get_dbc_rsp_elem_size());
675         out_trans->rsp_q_size = cpu_to_le32(nelem);
676         out_trans->options = cpu_to_le32(in_trans->options);
677
678         *user_len += in_trans->hdr.len;
679         msg->hdr.len = cpu_to_le32(msg_hdr_len + sizeof(*out_trans));
680         msg->hdr.count = incr_le32(msg->hdr.count);
681
682         resources->buf = buf;
683         resources->dma_addr = dma_addr;
684         resources->total_size = size;
685         resources->nelem = nelem;
686         resources->rsp_q_base = buf + size - nelem * get_dbc_rsp_elem_size();
687         return 0;
688
689 free_dma:
690         dma_free_coherent(&qdev->pdev->dev, size, buf, dma_addr);
691         return ret;
692 }
693
694 static int encode_deactivate(struct qaic_device *qdev, void *trans,
695                              u32 *user_len, struct qaic_user *usr)
696 {
697         struct qaic_manage_trans_deactivate *in_trans = trans;
698
699         if (in_trans->dbc_id >= qdev->num_dbc || in_trans->pad)
700                 return -EINVAL;
701
702         *user_len += in_trans->hdr.len;
703
704         return disable_dbc(qdev, in_trans->dbc_id, usr);
705 }
706
707 static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers,
708                          u32 *user_len)
709 {
710         struct qaic_manage_trans_status_to_dev *in_trans = trans;
711         struct wire_trans_status_to_dev *out_trans;
712         struct wrapper_msg *trans_wrapper;
713         struct wrapper_msg *wrapper;
714         struct wire_msg *msg;
715         u32 msg_hdr_len;
716
717         wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
718         msg = &wrapper->msg;
719         msg_hdr_len = le32_to_cpu(msg->hdr.len);
720
721         if (msg_hdr_len + in_trans->hdr.len > QAIC_MANAGE_MAX_MSG_LENGTH)
722                 return -ENOSPC;
723
724         trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper));
725         if (!trans_wrapper)
726                 return -ENOMEM;
727
728         trans_wrapper->len = sizeof(*out_trans);
729         out_trans = (struct wire_trans_status_to_dev *)&trans_wrapper->trans;
730
731         out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_STATUS_TO_DEV);
732         out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len);
733         msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len);
734         msg->hdr.count = incr_le32(msg->hdr.count);
735         *user_len += in_trans->hdr.len;
736
737         return 0;
738 }
739
740 static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
741                           struct wrapper_list *wrappers, struct ioctl_resources *resources,
742                           struct qaic_user *usr)
743 {
744         struct qaic_manage_trans_hdr *trans_hdr;
745         struct wrapper_msg *wrapper;
746         struct wire_msg *msg;
747         u32 user_len = 0;
748         int ret;
749         int i;
750
751         if (!user_msg->count) {
752                 ret = -EINVAL;
753                 goto out;
754         }
755
756         wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
757         msg = &wrapper->msg;
758
759         msg->hdr.len = cpu_to_le32(sizeof(msg->hdr));
760
761         if (resources->dma_chunk_id) {
762                 ret = encode_dma(qdev, resources->trans_hdr, wrappers, &user_len, resources, usr);
763                 msg->hdr.count = cpu_to_le32(1);
764                 goto out;
765         }
766
767         for (i = 0; i < user_msg->count; ++i) {
768                 if (user_len >= user_msg->len) {
769                         ret = -EINVAL;
770                         break;
771                 }
772                 trans_hdr = (struct qaic_manage_trans_hdr *)(user_msg->data + user_len);
773                 if (user_len + trans_hdr->len > user_msg->len) {
774                         ret = -EINVAL;
775                         break;
776                 }
777
778                 switch (trans_hdr->type) {
779                 case QAIC_TRANS_PASSTHROUGH_FROM_USR:
780                         ret = encode_passthrough(qdev, trans_hdr, wrappers, &user_len);
781                         break;
782                 case QAIC_TRANS_DMA_XFER_FROM_USR:
783                         ret = encode_dma(qdev, trans_hdr, wrappers, &user_len, resources, usr);
784                         break;
785                 case QAIC_TRANS_ACTIVATE_FROM_USR:
786                         ret = encode_activate(qdev, trans_hdr, wrappers, &user_len, resources);
787                         break;
788                 case QAIC_TRANS_DEACTIVATE_FROM_USR:
789                         ret = encode_deactivate(qdev, trans_hdr, &user_len, usr);
790                         break;
791                 case QAIC_TRANS_STATUS_FROM_USR:
792                         ret = encode_status(qdev, trans_hdr, wrappers, &user_len);
793                         break;
794                 default:
795                         ret = -EINVAL;
796                         break;
797                 }
798
799                 if (ret)
800                         break;
801         }
802
803         if (user_len != user_msg->len)
804                 ret = -EINVAL;
805 out:
806         if (ret) {
807                 free_dma_xfers(qdev, resources);
808                 free_dbc_buf(qdev, resources);
809                 return ret;
810         }
811
812         return 0;
813 }
814
815 static int decode_passthrough(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
816                               u32 *msg_len)
817 {
818         struct qaic_manage_trans_passthrough *out_trans;
819         struct wire_trans_passthrough *in_trans = trans;
820         u32 len;
821
822         out_trans = (void *)user_msg->data + user_msg->len;
823
824         len = le32_to_cpu(in_trans->hdr.len);
825         if (len % 8 != 0)
826                 return -EINVAL;
827
828         if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
829                 return -ENOSPC;
830
831         memcpy(out_trans->data, in_trans->data, len - sizeof(in_trans->hdr));
832         user_msg->len += len;
833         *msg_len += len;
834         out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type);
835         out_trans->hdr.len = len;
836
837         return 0;
838 }
839
840 static int decode_activate(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
841                            u32 *msg_len, struct ioctl_resources *resources, struct qaic_user *usr)
842 {
843         struct qaic_manage_trans_activate_from_dev *out_trans;
844         struct wire_trans_activate_from_dev *in_trans = trans;
845         u32 len;
846
847         out_trans = (void *)user_msg->data + user_msg->len;
848
849         len = le32_to_cpu(in_trans->hdr.len);
850         if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
851                 return -ENOSPC;
852
853         user_msg->len += len;
854         *msg_len += len;
855         out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type);
856         out_trans->hdr.len = len;
857         out_trans->status = le32_to_cpu(in_trans->status);
858         out_trans->dbc_id = le32_to_cpu(in_trans->dbc_id);
859         out_trans->options = le64_to_cpu(in_trans->options);
860
861         if (!resources->buf)
862                 /* how did we get an activate response without a request? */
863                 return -EINVAL;
864
865         if (out_trans->dbc_id >= qdev->num_dbc)
866                 /*
867                  * The device assigned an invalid resource, which should never
868                  * happen. Return an error so the user can try to recover.
869                  */
870                 return -ENODEV;
871
872         if (out_trans->status)
873                 /*
874                  * Allocating resources failed on device side. This is not an
875                  * expected behaviour, user is expected to handle this situation.
876                  */
877                 return -ECANCELED;
878
879         resources->status = out_trans->status;
880         resources->dbc_id = out_trans->dbc_id;
881         save_dbc_buf(qdev, resources, usr);
882
883         return 0;
884 }
885
886 static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len,
887                              struct qaic_user *usr)
888 {
889         struct wire_trans_deactivate_from_dev *in_trans = trans;
890         u32 dbc_id = le32_to_cpu(in_trans->dbc_id);
891         u32 status = le32_to_cpu(in_trans->status);
892
893         if (dbc_id >= qdev->num_dbc)
894                 /*
895                  * The device assigned an invalid resource, which should never
896                  * happen. Inject an error so the user can try to recover.
897                  */
898                 return -ENODEV;
899
900         if (status) {
901                 /*
902                  * Releasing resources failed on the device side, which puts
903                  * us in a bind since they may still be in use, so enable the
904                  * dbc. User is expected to retry deactivation.
905                  */
906                 enable_dbc(qdev, dbc_id, usr);
907                 return -ECANCELED;
908         }
909
910         release_dbc(qdev, dbc_id);
911         *msg_len += sizeof(*in_trans);
912
913         return 0;
914 }
915
916 static int decode_status(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg,
917                          u32 *user_len, struct wire_msg *msg)
918 {
919         struct qaic_manage_trans_status_from_dev *out_trans;
920         struct wire_trans_status_from_dev *in_trans = trans;
921         u32 len;
922
923         out_trans = (void *)user_msg->data + user_msg->len;
924
925         len = le32_to_cpu(in_trans->hdr.len);
926         if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH)
927                 return -ENOSPC;
928
929         out_trans->hdr.type = QAIC_TRANS_STATUS_FROM_DEV;
930         out_trans->hdr.len = len;
931         out_trans->major = le16_to_cpu(in_trans->major);
932         out_trans->minor = le16_to_cpu(in_trans->minor);
933         out_trans->status_flags = le64_to_cpu(in_trans->status_flags);
934         out_trans->status = le32_to_cpu(in_trans->status);
935         *user_len += le32_to_cpu(in_trans->hdr.len);
936         user_msg->len += len;
937
938         if (out_trans->status)
939                 return -ECANCELED;
940         if (out_trans->status_flags & BIT(0) && !valid_crc(msg))
941                 return -EPIPE;
942
943         return 0;
944 }
945
946 static int decode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
947                           struct wire_msg *msg, struct ioctl_resources *resources,
948                           struct qaic_user *usr)
949 {
950         u32 msg_hdr_len = le32_to_cpu(msg->hdr.len);
951         struct wire_trans_hdr *trans_hdr;
952         u32 msg_len = 0;
953         int ret;
954         int i;
955
956         if (msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH)
957                 return -EINVAL;
958
959         user_msg->len = 0;
960         user_msg->count = le32_to_cpu(msg->hdr.count);
961
962         for (i = 0; i < user_msg->count; ++i) {
963                 trans_hdr = (struct wire_trans_hdr *)(msg->data + msg_len);
964                 if (msg_len + le32_to_cpu(trans_hdr->len) > msg_hdr_len)
965                         return -EINVAL;
966
967                 switch (le32_to_cpu(trans_hdr->type)) {
968                 case QAIC_TRANS_PASSTHROUGH_FROM_DEV:
969                         ret = decode_passthrough(qdev, trans_hdr, user_msg, &msg_len);
970                         break;
971                 case QAIC_TRANS_ACTIVATE_FROM_DEV:
972                         ret = decode_activate(qdev, trans_hdr, user_msg, &msg_len, resources, usr);
973                         break;
974                 case QAIC_TRANS_DEACTIVATE_FROM_DEV:
975                         ret = decode_deactivate(qdev, trans_hdr, &msg_len, usr);
976                         break;
977                 case QAIC_TRANS_STATUS_FROM_DEV:
978                         ret = decode_status(qdev, trans_hdr, user_msg, &msg_len, msg);
979                         break;
980                 default:
981                         return -EINVAL;
982                 }
983
984                 if (ret)
985                         return ret;
986         }
987
988         if (msg_len != (msg_hdr_len - sizeof(msg->hdr)))
989                 return -EINVAL;
990
991         return 0;
992 }
993
994 static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 seq_num,
995                       bool ignore_signal)
996 {
997         struct xfer_queue_elem elem;
998         struct wire_msg *out_buf;
999         struct wrapper_msg *w;
1000         long ret = -EAGAIN;
1001         int xfer_count = 0;
1002         int retry_count;
1003
1004         if (qdev->in_reset) {
1005                 mutex_unlock(&qdev->cntl_mutex);
1006                 return ERR_PTR(-ENODEV);
1007         }
1008
1009         /* Attempt to avoid a partial commit of a message */
1010         list_for_each_entry(w, &wrappers->list, list)
1011                 xfer_count++;
1012
1013         for (retry_count = 0; retry_count < QAIC_MHI_RETRY_MAX; retry_count++) {
1014                 if (xfer_count <= mhi_get_free_desc_count(qdev->cntl_ch, DMA_TO_DEVICE)) {
1015                         ret = 0;
1016                         break;
1017                 }
1018                 msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS);
1019                 if (signal_pending(current))
1020                         break;
1021         }
1022
1023         if (ret) {
1024                 mutex_unlock(&qdev->cntl_mutex);
1025                 return ERR_PTR(ret);
1026         }
1027
1028         elem.seq_num = seq_num;
1029         elem.buf = NULL;
1030         init_completion(&elem.xfer_done);
1031         if (likely(!qdev->cntl_lost_buf)) {
1032                 /*
1033                  * The max size of request to device is QAIC_MANAGE_EXT_MSG_LENGTH.
1034                  * The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH.
1035                  */
1036                 out_buf = kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH, GFP_KERNEL);
1037                 if (!out_buf) {
1038                         mutex_unlock(&qdev->cntl_mutex);
1039                         return ERR_PTR(-ENOMEM);
1040                 }
1041
1042                 ret = mhi_queue_buf(qdev->cntl_ch, DMA_FROM_DEVICE, out_buf,
1043                                     QAIC_MANAGE_MAX_MSG_LENGTH, MHI_EOT);
1044                 if (ret) {
1045                         mutex_unlock(&qdev->cntl_mutex);
1046                         return ERR_PTR(ret);
1047                 }
1048         } else {
1049                 /*
1050                  * we lost a buffer because we queued a recv buf, but then
1051                  * queuing the corresponding tx buf failed. To try to avoid
1052                  * a memory leak, lets reclaim it and use it for this
1053                  * transaction.
1054                  */
1055                 qdev->cntl_lost_buf = false;
1056         }
1057
1058         list_for_each_entry(w, &wrappers->list, list) {
1059                 kref_get(&w->ref_count);
1060                 retry_count = 0;
1061                 ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len,
1062                                     list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN);
1063                 if (ret) {
1064                         qdev->cntl_lost_buf = true;
1065                         kref_put(&w->ref_count, free_wrapper);
1066                         mutex_unlock(&qdev->cntl_mutex);
1067                         return ERR_PTR(ret);
1068                 }
1069         }
1070
1071         list_add_tail(&elem.list, &qdev->cntl_xfer_list);
1072         mutex_unlock(&qdev->cntl_mutex);
1073
1074         if (ignore_signal)
1075                 ret = wait_for_completion_timeout(&elem.xfer_done, control_resp_timeout_s * HZ);
1076         else
1077                 ret = wait_for_completion_interruptible_timeout(&elem.xfer_done,
1078                                                                 control_resp_timeout_s * HZ);
1079         /*
1080          * not using _interruptable because we have to cleanup or we'll
1081          * likely cause memory corruption
1082          */
1083         mutex_lock(&qdev->cntl_mutex);
1084         if (!list_empty(&elem.list))
1085                 list_del(&elem.list);
1086         if (!ret && !elem.buf)
1087                 ret = -ETIMEDOUT;
1088         else if (ret > 0 && !elem.buf)
1089                 ret = -EIO;
1090         mutex_unlock(&qdev->cntl_mutex);
1091
1092         if (ret < 0) {
1093                 kfree(elem.buf);
1094                 return ERR_PTR(ret);
1095         } else if (!qdev->valid_crc(elem.buf)) {
1096                 kfree(elem.buf);
1097                 return ERR_PTR(-EPIPE);
1098         }
1099
1100         return elem.buf;
1101 }
1102
1103 /* Add a transaction to abort the outstanding DMA continuation */
1104 static int abort_dma_cont(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 dma_chunk_id)
1105 {
1106         struct wire_trans_dma_xfer *out_trans;
1107         u32 size = sizeof(*out_trans);
1108         struct wrapper_msg *wrapper;
1109         struct wrapper_msg *w;
1110         struct wire_msg *msg;
1111
1112         wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list);
1113         msg = &wrapper->msg;
1114
1115         /* Remove all but the first wrapper which has the msg header */
1116         list_for_each_entry_safe(wrapper, w, &wrappers->list, list)
1117                 if (!list_is_first(&wrapper->list, &wrappers->list))
1118                         kref_put(&wrapper->ref_count, free_wrapper);
1119
1120         wrapper = add_wrapper(wrappers, offsetof(struct wrapper_msg, trans) + sizeof(*out_trans));
1121
1122         if (!wrapper)
1123                 return -ENOMEM;
1124
1125         out_trans = (struct wire_trans_dma_xfer *)&wrapper->trans;
1126         out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV);
1127         out_trans->hdr.len = cpu_to_le32(size);
1128         out_trans->tag = cpu_to_le32(0);
1129         out_trans->count = cpu_to_le32(0);
1130         out_trans->dma_chunk_id = cpu_to_le32(dma_chunk_id);
1131
1132         msg->hdr.len = cpu_to_le32(size + sizeof(*msg));
1133         msg->hdr.count = cpu_to_le32(1);
1134         wrapper->len = size;
1135
1136         return 0;
1137 }
1138
1139 static struct wrapper_list *alloc_wrapper_list(void)
1140 {
1141         struct wrapper_list *wrappers;
1142
1143         wrappers = kmalloc(sizeof(*wrappers), GFP_KERNEL);
1144         if (!wrappers)
1145                 return NULL;
1146         INIT_LIST_HEAD(&wrappers->list);
1147         spin_lock_init(&wrappers->lock);
1148
1149         return wrappers;
1150 }
1151
1152 static int qaic_manage_msg_xfer(struct qaic_device *qdev, struct qaic_user *usr,
1153                                 struct manage_msg *user_msg, struct ioctl_resources *resources,
1154                                 struct wire_msg **rsp)
1155 {
1156         struct wrapper_list *wrappers;
1157         struct wrapper_msg *wrapper;
1158         struct wrapper_msg *w;
1159         bool all_done = false;
1160         struct wire_msg *msg;
1161         int ret;
1162
1163         wrappers = alloc_wrapper_list();
1164         if (!wrappers)
1165                 return -ENOMEM;
1166
1167         wrapper = add_wrapper(wrappers, sizeof(*wrapper));
1168         if (!wrapper) {
1169                 kfree(wrappers);
1170                 return -ENOMEM;
1171         }
1172
1173         msg = &wrapper->msg;
1174         wrapper->len = sizeof(*msg);
1175
1176         ret = encode_message(qdev, user_msg, wrappers, resources, usr);
1177         if (ret && resources->dma_chunk_id)
1178                 ret = abort_dma_cont(qdev, wrappers, resources->dma_chunk_id);
1179         if (ret)
1180                 goto encode_failed;
1181
1182         ret = mutex_lock_interruptible(&qdev->cntl_mutex);
1183         if (ret)
1184                 goto lock_failed;
1185
1186         msg->hdr.magic_number = MANAGE_MAGIC_NUMBER;
1187         msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++);
1188
1189         if (usr) {
1190                 msg->hdr.handle = cpu_to_le32(usr->handle);
1191                 msg->hdr.partition_id = cpu_to_le32(usr->qddev->partition_id);
1192         } else {
1193                 msg->hdr.handle = 0;
1194                 msg->hdr.partition_id = cpu_to_le32(QAIC_NO_PARTITION);
1195         }
1196
1197         msg->hdr.padding = cpu_to_le32(0);
1198         msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers));
1199
1200         /* msg_xfer releases the mutex */
1201         *rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, false);
1202         if (IS_ERR(*rsp))
1203                 ret = PTR_ERR(*rsp);
1204
1205 lock_failed:
1206         free_dma_xfers(qdev, resources);
1207 encode_failed:
1208         spin_lock(&wrappers->lock);
1209         list_for_each_entry_safe(wrapper, w, &wrappers->list, list)
1210                 kref_put(&wrapper->ref_count, free_wrapper);
1211         all_done = list_empty(&wrappers->list);
1212         spin_unlock(&wrappers->lock);
1213         if (all_done)
1214                 kfree(wrappers);
1215
1216         return ret;
1217 }
1218
1219 static int qaic_manage(struct qaic_device *qdev, struct qaic_user *usr, struct manage_msg *user_msg)
1220 {
1221         struct wire_trans_dma_xfer_cont *dma_cont = NULL;
1222         struct ioctl_resources resources;
1223         struct wire_msg *rsp = NULL;
1224         int ret;
1225
1226         memset(&resources, 0, sizeof(struct ioctl_resources));
1227
1228         INIT_LIST_HEAD(&resources.dma_xfers);
1229
1230         if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH ||
1231             user_msg->count > QAIC_MANAGE_MAX_MSG_LENGTH / sizeof(struct qaic_manage_trans_hdr))
1232                 return -EINVAL;
1233
1234 dma_xfer_continue:
1235         ret = qaic_manage_msg_xfer(qdev, usr, user_msg, &resources, &rsp);
1236         if (ret)
1237                 return ret;
1238         /* dma_cont should be the only transaction if present */
1239         if (le32_to_cpu(rsp->hdr.count) == 1) {
1240                 dma_cont = (struct wire_trans_dma_xfer_cont *)rsp->data;
1241                 if (le32_to_cpu(dma_cont->hdr.type) != QAIC_TRANS_DMA_XFER_CONT)
1242                         dma_cont = NULL;
1243         }
1244         if (dma_cont) {
1245                 if (le32_to_cpu(dma_cont->dma_chunk_id) == resources.dma_chunk_id &&
1246                     le64_to_cpu(dma_cont->xferred_size) == resources.xferred_dma_size) {
1247                         kfree(rsp);
1248                         goto dma_xfer_continue;
1249                 }
1250
1251                 ret = -EINVAL;
1252                 goto dma_cont_failed;
1253         }
1254
1255         ret = decode_message(qdev, user_msg, rsp, &resources, usr);
1256
1257 dma_cont_failed:
1258         free_dbc_buf(qdev, &resources);
1259         kfree(rsp);
1260         return ret;
1261 }
1262
1263 int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1264 {
1265         struct qaic_manage_msg *user_msg = data;
1266         struct qaic_device *qdev;
1267         struct manage_msg *msg;
1268         struct qaic_user *usr;
1269         u8 __user *user_data;
1270         int qdev_rcu_id;
1271         int usr_rcu_id;
1272         int ret;
1273
1274         if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH)
1275                 return -EINVAL;
1276
1277         usr = file_priv->driver_priv;
1278
1279         usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
1280         if (!usr->qddev) {
1281                 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1282                 return -ENODEV;
1283         }
1284
1285         qdev = usr->qddev->qdev;
1286
1287         qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
1288         if (qdev->in_reset) {
1289                 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1290                 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1291                 return -ENODEV;
1292         }
1293
1294         msg = kzalloc(QAIC_MANAGE_MAX_MSG_LENGTH + sizeof(*msg), GFP_KERNEL);
1295         if (!msg) {
1296                 ret = -ENOMEM;
1297                 goto out;
1298         }
1299
1300         msg->len = user_msg->len;
1301         msg->count = user_msg->count;
1302
1303         user_data = u64_to_user_ptr(user_msg->data);
1304
1305         if (copy_from_user(msg->data, user_data, user_msg->len)) {
1306                 ret = -EFAULT;
1307                 goto free_msg;
1308         }
1309
1310         ret = qaic_manage(qdev, usr, msg);
1311
1312         /*
1313          * If the qaic_manage() is successful then we copy the message onto
1314          * userspace memory but we have an exception for -ECANCELED.
1315          * For -ECANCELED, it means that device has NACKed the message with a
1316          * status error code which userspace would like to know.
1317          */
1318         if (ret == -ECANCELED || !ret) {
1319                 if (copy_to_user(user_data, msg->data, msg->len)) {
1320                         ret = -EFAULT;
1321                 } else {
1322                         user_msg->len = msg->len;
1323                         user_msg->count = msg->count;
1324                 }
1325         }
1326
1327 free_msg:
1328         kfree(msg);
1329 out:
1330         srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
1331         srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
1332         return ret;
1333 }
1334
1335 int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor)
1336 {
1337         struct qaic_manage_trans_status_from_dev *status_result;
1338         struct qaic_manage_trans_status_to_dev *status_query;
1339         struct manage_msg *user_msg;
1340         int ret;
1341
1342         user_msg = kmalloc(sizeof(*user_msg) + sizeof(*status_result), GFP_KERNEL);
1343         if (!user_msg) {
1344                 ret = -ENOMEM;
1345                 goto out;
1346         }
1347         user_msg->len = sizeof(*status_query);
1348         user_msg->count = 1;
1349
1350         status_query = (struct qaic_manage_trans_status_to_dev *)user_msg->data;
1351         status_query->hdr.type = QAIC_TRANS_STATUS_FROM_USR;
1352         status_query->hdr.len = sizeof(status_query->hdr);
1353
1354         ret = qaic_manage(qdev, usr, user_msg);
1355         if (ret)
1356                 goto kfree_user_msg;
1357         status_result = (struct qaic_manage_trans_status_from_dev *)user_msg->data;
1358         *major = status_result->major;
1359         *minor = status_result->minor;
1360
1361         if (status_result->status_flags & BIT(0)) { /* device is using CRC */
1362                 /* By default qdev->gen_crc is programmed to generate CRC */
1363                 qdev->valid_crc = valid_crc;
1364         } else {
1365                 /* By default qdev->valid_crc is programmed to bypass CRC */
1366                 qdev->gen_crc = gen_crc_stub;
1367         }
1368
1369 kfree_user_msg:
1370         kfree(user_msg);
1371 out:
1372         return ret;
1373 }
1374
1375 static void resp_worker(struct work_struct *work)
1376 {
1377         struct resp_work *resp = container_of(work, struct resp_work, work);
1378         struct qaic_device *qdev = resp->qdev;
1379         struct wire_msg *msg = resp->buf;
1380         struct xfer_queue_elem *elem;
1381         struct xfer_queue_elem *i;
1382         bool found = false;
1383
1384         mutex_lock(&qdev->cntl_mutex);
1385         list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) {
1386                 if (elem->seq_num == le32_to_cpu(msg->hdr.sequence_number)) {
1387                         found = true;
1388                         list_del_init(&elem->list);
1389                         elem->buf = msg;
1390                         complete_all(&elem->xfer_done);
1391                         break;
1392                 }
1393         }
1394         mutex_unlock(&qdev->cntl_mutex);
1395
1396         if (!found)
1397                 /* request must have timed out, drop packet */
1398                 kfree(msg);
1399
1400         kfree(resp);
1401 }
1402
1403 static void free_wrapper_from_list(struct wrapper_list *wrappers, struct wrapper_msg *wrapper)
1404 {
1405         bool all_done = false;
1406
1407         spin_lock(&wrappers->lock);
1408         kref_put(&wrapper->ref_count, free_wrapper);
1409         all_done = list_empty(&wrappers->list);
1410         spin_unlock(&wrappers->lock);
1411
1412         if (all_done)
1413                 kfree(wrappers);
1414 }
1415
1416 void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
1417 {
1418         struct wire_msg *msg = mhi_result->buf_addr;
1419         struct wrapper_msg *wrapper = container_of(msg, struct wrapper_msg, msg);
1420
1421         free_wrapper_from_list(wrapper->head, wrapper);
1422 }
1423
1424 void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
1425 {
1426         struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
1427         struct wire_msg *msg = mhi_result->buf_addr;
1428         struct resp_work *resp;
1429
1430         if (mhi_result->transaction_status || msg->hdr.magic_number != MANAGE_MAGIC_NUMBER) {
1431                 kfree(msg);
1432                 return;
1433         }
1434
1435         resp = kmalloc(sizeof(*resp), GFP_ATOMIC);
1436         if (!resp) {
1437                 kfree(msg);
1438                 return;
1439         }
1440
1441         INIT_WORK(&resp->work, resp_worker);
1442         resp->qdev = qdev;
1443         resp->buf = msg;
1444         queue_work(qdev->cntl_wq, &resp->work);
1445 }
1446
1447 int qaic_control_open(struct qaic_device *qdev)
1448 {
1449         if (!qdev->cntl_ch)
1450                 return -ENODEV;
1451
1452         qdev->cntl_lost_buf = false;
1453         /*
1454          * By default qaic should assume that device has CRC enabled.
1455          * Qaic comes to know if device has CRC enabled or disabled during the
1456          * device status transaction, which is the first transaction performed
1457          * on control channel.
1458          *
1459          * So CRC validation of first device status transaction response is
1460          * ignored (by calling valid_crc_stub) and is done later during decoding
1461          * if device has CRC enabled.
1462          * Now that qaic knows whether device has CRC enabled or not it acts
1463          * accordingly.
1464          */
1465         qdev->gen_crc = gen_crc;
1466         qdev->valid_crc = valid_crc_stub;
1467
1468         return mhi_prepare_for_transfer(qdev->cntl_ch);
1469 }
1470
1471 void qaic_control_close(struct qaic_device *qdev)
1472 {
1473         mhi_unprepare_from_transfer(qdev->cntl_ch);
1474 }
1475
1476 void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr)
1477 {
1478         struct wire_trans_terminate_to_dev *trans;
1479         struct wrapper_list *wrappers;
1480         struct wrapper_msg *wrapper;
1481         struct wire_msg *msg;
1482         struct wire_msg *rsp;
1483
1484         wrappers = alloc_wrapper_list();
1485         if (!wrappers)
1486                 return;
1487
1488         wrapper = add_wrapper(wrappers, sizeof(*wrapper) + sizeof(*msg) + sizeof(*trans));
1489         if (!wrapper)
1490                 return;
1491
1492         msg = &wrapper->msg;
1493
1494         trans = (struct wire_trans_terminate_to_dev *)msg->data;
1495
1496         trans->hdr.type = cpu_to_le32(QAIC_TRANS_TERMINATE_TO_DEV);
1497         trans->hdr.len = cpu_to_le32(sizeof(*trans));
1498         trans->handle = cpu_to_le32(usr->handle);
1499
1500         mutex_lock(&qdev->cntl_mutex);
1501         wrapper->len = sizeof(msg->hdr) + sizeof(*trans);
1502         msg->hdr.magic_number = MANAGE_MAGIC_NUMBER;
1503         msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++);
1504         msg->hdr.len = cpu_to_le32(wrapper->len);
1505         msg->hdr.count = cpu_to_le32(1);
1506         msg->hdr.handle = cpu_to_le32(usr->handle);
1507         msg->hdr.padding = cpu_to_le32(0);
1508         msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers));
1509
1510         /*
1511          * msg_xfer releases the mutex
1512          * We don't care about the return of msg_xfer since we will not do
1513          * anything different based on what happens.
1514          * We ignore pending signals since one will be set if the user is
1515          * killed, and we need give the device a chance to cleanup, otherwise
1516          * DMA may still be in progress when we return.
1517          */
1518         rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, true);
1519         if (!IS_ERR(rsp))
1520                 kfree(rsp);
1521         free_wrapper_from_list(wrappers, wrapper);
1522 }
1523
1524 void wake_all_cntl(struct qaic_device *qdev)
1525 {
1526         struct xfer_queue_elem *elem;
1527         struct xfer_queue_elem *i;
1528
1529         mutex_lock(&qdev->cntl_mutex);
1530         list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) {
1531                 list_del_init(&elem->list);
1532                 complete_all(&elem->xfer_done);
1533         }
1534         mutex_unlock(&qdev->cntl_mutex);
1535 }