vfio/mdev: make mdev.h standalone includable
[platform/kernel/linux-starfive.git] / samples / vfio-mdev / mtty.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Mediated virtual PCI serial host device driver
4  *
5  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
6  *     Author: Neo Jia <cjia@nvidia.com>
7  *             Kirti Wankhede <kwankhede@nvidia.com>
8  *
9  * Sample driver that creates mdev device that simulates serial port over PCI
10  * card.
11  */
12
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/fs.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/cdev.h>
20 #include <linux/sched.h>
21 #include <linux/wait.h>
22 #include <linux/vfio.h>
23 #include <linux/iommu.h>
24 #include <linux/sysfs.h>
25 #include <linux/ctype.h>
26 #include <linux/file.h>
27 #include <linux/mdev.h>
28 #include <linux/pci.h>
29 #include <linux/serial.h>
30 #include <uapi/linux/serial_reg.h>
31 #include <linux/eventfd.h>
32 /*
33  * #defines
34  */
35
36 #define VERSION_STRING  "0.1"
37 #define DRIVER_AUTHOR   "NVIDIA Corporation"
38
39 #define MTTY_CLASS_NAME "mtty"
40
41 #define MTTY_NAME       "mtty"
42
43 #define MTTY_STRING_LEN         16
44
45 #define MTTY_CONFIG_SPACE_SIZE  0xff
46 #define MTTY_IO_BAR_SIZE        0x8
47 #define MTTY_MMIO_BAR_SIZE      0x100000
48
49 #define STORE_LE16(addr, val)   (*(u16 *)addr = val)
50 #define STORE_LE32(addr, val)   (*(u32 *)addr = val)
51
52 #define MAX_FIFO_SIZE   16
53
54 #define CIRCULAR_BUF_INC_IDX(idx)    (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
55
56 #define MTTY_VFIO_PCI_OFFSET_SHIFT   40
57
58 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off)   (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
59 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
60                                 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
61 #define MTTY_VFIO_PCI_OFFSET_MASK    \
62                                 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
63 #define MAX_MTTYS       24
64
65 /*
66  * Global Structures
67  */
68
69 static struct mtty_dev {
70         dev_t           vd_devt;
71         struct class    *vd_class;
72         struct cdev     vd_cdev;
73         struct idr      vd_idr;
74         struct device   dev;
75 } mtty_dev;
76
77 struct mdev_region_info {
78         u64 start;
79         u64 phys_start;
80         u32 size;
81         u64 vfio_offset;
82 };
83
84 #if defined(DEBUG_REGS)
85 static const char *wr_reg[] = {
86         "TX",
87         "IER",
88         "FCR",
89         "LCR",
90         "MCR",
91         "LSR",
92         "MSR",
93         "SCR"
94 };
95
96 static const char *rd_reg[] = {
97         "RX",
98         "IER",
99         "IIR",
100         "LCR",
101         "MCR",
102         "LSR",
103         "MSR",
104         "SCR"
105 };
106 #endif
107
108 /* loop back buffer */
109 struct rxtx {
110         u8 fifo[MAX_FIFO_SIZE];
111         u8 head, tail;
112         u8 count;
113 };
114
115 struct serial_port {
116         u8 uart_reg[8];         /* 8 registers */
117         struct rxtx rxtx;       /* loop back buffer */
118         bool dlab;
119         bool overrun;
120         u16 divisor;
121         u8 fcr;                 /* FIFO control register */
122         u8 max_fifo_size;
123         u8 intr_trigger_level;  /* interrupt trigger level */
124 };
125
126 /* State of each mdev device */
127 struct mdev_state {
128         struct vfio_device vdev;
129         int irq_fd;
130         struct eventfd_ctx *intx_evtfd;
131         struct eventfd_ctx *msi_evtfd;
132         int irq_index;
133         u8 *vconfig;
134         struct mutex ops_lock;
135         struct mdev_device *mdev;
136         struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
137         u32 bar_mask[VFIO_PCI_NUM_REGIONS];
138         struct list_head next;
139         struct serial_port s[2];
140         struct mutex rxtx_lock;
141         struct vfio_device_info dev_info;
142         int nr_ports;
143 };
144
145 static atomic_t mdev_avail_ports = ATOMIC_INIT(MAX_MTTYS);
146
147 static const struct file_operations vd_fops = {
148         .owner          = THIS_MODULE,
149 };
150
151 static const struct vfio_device_ops mtty_dev_ops;
152
153 /* function prototypes */
154
155 static int mtty_trigger_interrupt(struct mdev_state *mdev_state);
156
157 /* Helper functions */
158
159 static void dump_buffer(u8 *buf, uint32_t count)
160 {
161 #if defined(DEBUG)
162         int i;
163
164         pr_info("Buffer:\n");
165         for (i = 0; i < count; i++) {
166                 pr_info("%2x ", *(buf + i));
167                 if ((i + 1) % 16 == 0)
168                         pr_info("\n");
169         }
170 #endif
171 }
172
173 static void mtty_create_config_space(struct mdev_state *mdev_state)
174 {
175         /* PCI dev ID */
176         STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
177
178         /* Control: I/O+, Mem-, BusMaster- */
179         STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
180
181         /* Status: capabilities list absent */
182         STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
183
184         /* Rev ID */
185         mdev_state->vconfig[0x8] =  0x10;
186
187         /* programming interface class : 16550-compatible serial controller */
188         mdev_state->vconfig[0x9] =  0x02;
189
190         /* Sub class : 00 */
191         mdev_state->vconfig[0xa] =  0x00;
192
193         /* Base class : Simple Communication controllers */
194         mdev_state->vconfig[0xb] =  0x07;
195
196         /* base address registers */
197         /* BAR0: IO space */
198         STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
199         mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
200
201         if (mdev_state->nr_ports == 2) {
202                 /* BAR1: IO space */
203                 STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
204                 mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
205         }
206
207         /* Subsystem ID */
208         STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
209
210         mdev_state->vconfig[0x34] =  0x00;   /* Cap Ptr */
211         mdev_state->vconfig[0x3d] =  0x01;   /* interrupt pin (INTA#) */
212
213         /* Vendor specific data */
214         mdev_state->vconfig[0x40] =  0x23;
215         mdev_state->vconfig[0x43] =  0x80;
216         mdev_state->vconfig[0x44] =  0x23;
217         mdev_state->vconfig[0x48] =  0x23;
218         mdev_state->vconfig[0x4c] =  0x23;
219
220         mdev_state->vconfig[0x60] =  0x50;
221         mdev_state->vconfig[0x61] =  0x43;
222         mdev_state->vconfig[0x62] =  0x49;
223         mdev_state->vconfig[0x63] =  0x20;
224         mdev_state->vconfig[0x64] =  0x53;
225         mdev_state->vconfig[0x65] =  0x65;
226         mdev_state->vconfig[0x66] =  0x72;
227         mdev_state->vconfig[0x67] =  0x69;
228         mdev_state->vconfig[0x68] =  0x61;
229         mdev_state->vconfig[0x69] =  0x6c;
230         mdev_state->vconfig[0x6a] =  0x2f;
231         mdev_state->vconfig[0x6b] =  0x55;
232         mdev_state->vconfig[0x6c] =  0x41;
233         mdev_state->vconfig[0x6d] =  0x52;
234         mdev_state->vconfig[0x6e] =  0x54;
235 }
236
237 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
238                                  u8 *buf, u32 count)
239 {
240         u32 cfg_addr, bar_mask, bar_index = 0;
241
242         switch (offset) {
243         case 0x04: /* device control */
244         case 0x06: /* device status */
245                 /* do nothing */
246                 break;
247         case 0x3c:  /* interrupt line */
248                 mdev_state->vconfig[0x3c] = buf[0];
249                 break;
250         case 0x3d:
251                 /*
252                  * Interrupt Pin is hardwired to INTA.
253                  * This field is write protected by hardware
254                  */
255                 break;
256         case 0x10:  /* BAR0 */
257         case 0x14:  /* BAR1 */
258                 if (offset == 0x10)
259                         bar_index = 0;
260                 else if (offset == 0x14)
261                         bar_index = 1;
262
263                 if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
264                         STORE_LE32(&mdev_state->vconfig[offset], 0);
265                         break;
266                 }
267
268                 cfg_addr = *(u32 *)buf;
269                 pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
270
271                 if (cfg_addr == 0xffffffff) {
272                         bar_mask = mdev_state->bar_mask[bar_index];
273                         cfg_addr = (cfg_addr & bar_mask);
274                 }
275
276                 cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
277                 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
278                 break;
279         case 0x18:  /* BAR2 */
280         case 0x1c:  /* BAR3 */
281         case 0x20:  /* BAR4 */
282                 STORE_LE32(&mdev_state->vconfig[offset], 0);
283                 break;
284         default:
285                 pr_info("PCI config write @0x%x of %d bytes not handled\n",
286                         offset, count);
287                 break;
288         }
289 }
290
291 static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
292                                 u16 offset, u8 *buf, u32 count)
293 {
294         u8 data = *buf;
295
296         /* Handle data written by guest */
297         switch (offset) {
298         case UART_TX:
299                 /* if DLAB set, data is LSB of divisor */
300                 if (mdev_state->s[index].dlab) {
301                         mdev_state->s[index].divisor |= data;
302                         break;
303                 }
304
305                 mutex_lock(&mdev_state->rxtx_lock);
306
307                 /* save in TX buffer */
308                 if (mdev_state->s[index].rxtx.count <
309                                 mdev_state->s[index].max_fifo_size) {
310                         mdev_state->s[index].rxtx.fifo[
311                                         mdev_state->s[index].rxtx.head] = data;
312                         mdev_state->s[index].rxtx.count++;
313                         CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
314                         mdev_state->s[index].overrun = false;
315
316                         /*
317                          * Trigger interrupt if receive data interrupt is
318                          * enabled and fifo reached trigger level
319                          */
320                         if ((mdev_state->s[index].uart_reg[UART_IER] &
321                                                 UART_IER_RDI) &&
322                            (mdev_state->s[index].rxtx.count ==
323                                     mdev_state->s[index].intr_trigger_level)) {
324                                 /* trigger interrupt */
325 #if defined(DEBUG_INTR)
326                                 pr_err("Serial port %d: Fifo level trigger\n",
327                                         index);
328 #endif
329                                 mtty_trigger_interrupt(mdev_state);
330                         }
331                 } else {
332 #if defined(DEBUG_INTR)
333                         pr_err("Serial port %d: Buffer Overflow\n", index);
334 #endif
335                         mdev_state->s[index].overrun = true;
336
337                         /*
338                          * Trigger interrupt if receiver line status interrupt
339                          * is enabled
340                          */
341                         if (mdev_state->s[index].uart_reg[UART_IER] &
342                                                                 UART_IER_RLSI)
343                                 mtty_trigger_interrupt(mdev_state);
344                 }
345                 mutex_unlock(&mdev_state->rxtx_lock);
346                 break;
347
348         case UART_IER:
349                 /* if DLAB set, data is MSB of divisor */
350                 if (mdev_state->s[index].dlab)
351                         mdev_state->s[index].divisor |= (u16)data << 8;
352                 else {
353                         mdev_state->s[index].uart_reg[offset] = data;
354                         mutex_lock(&mdev_state->rxtx_lock);
355                         if ((data & UART_IER_THRI) &&
356                             (mdev_state->s[index].rxtx.head ==
357                                         mdev_state->s[index].rxtx.tail)) {
358 #if defined(DEBUG_INTR)
359                                 pr_err("Serial port %d: IER_THRI write\n",
360                                         index);
361 #endif
362                                 mtty_trigger_interrupt(mdev_state);
363                         }
364
365                         mutex_unlock(&mdev_state->rxtx_lock);
366                 }
367
368                 break;
369
370         case UART_FCR:
371                 mdev_state->s[index].fcr = data;
372
373                 mutex_lock(&mdev_state->rxtx_lock);
374                 if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
375                         /* clear loop back FIFO */
376                         mdev_state->s[index].rxtx.count = 0;
377                         mdev_state->s[index].rxtx.head = 0;
378                         mdev_state->s[index].rxtx.tail = 0;
379                 }
380                 mutex_unlock(&mdev_state->rxtx_lock);
381
382                 switch (data & UART_FCR_TRIGGER_MASK) {
383                 case UART_FCR_TRIGGER_1:
384                         mdev_state->s[index].intr_trigger_level = 1;
385                         break;
386
387                 case UART_FCR_TRIGGER_4:
388                         mdev_state->s[index].intr_trigger_level = 4;
389                         break;
390
391                 case UART_FCR_TRIGGER_8:
392                         mdev_state->s[index].intr_trigger_level = 8;
393                         break;
394
395                 case UART_FCR_TRIGGER_14:
396                         mdev_state->s[index].intr_trigger_level = 14;
397                         break;
398                 }
399
400                 /*
401                  * Set trigger level to 1 otherwise or  implement timer with
402                  * timeout of 4 characters and on expiring that timer set
403                  * Recevice data timeout in IIR register
404                  */
405                 mdev_state->s[index].intr_trigger_level = 1;
406                 if (data & UART_FCR_ENABLE_FIFO)
407                         mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
408                 else {
409                         mdev_state->s[index].max_fifo_size = 1;
410                         mdev_state->s[index].intr_trigger_level = 1;
411                 }
412
413                 break;
414
415         case UART_LCR:
416                 if (data & UART_LCR_DLAB) {
417                         mdev_state->s[index].dlab = true;
418                         mdev_state->s[index].divisor = 0;
419                 } else
420                         mdev_state->s[index].dlab = false;
421
422                 mdev_state->s[index].uart_reg[offset] = data;
423                 break;
424
425         case UART_MCR:
426                 mdev_state->s[index].uart_reg[offset] = data;
427
428                 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
429                                 (data & UART_MCR_OUT2)) {
430 #if defined(DEBUG_INTR)
431                         pr_err("Serial port %d: MCR_OUT2 write\n", index);
432 #endif
433                         mtty_trigger_interrupt(mdev_state);
434                 }
435
436                 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
437                                 (data & (UART_MCR_RTS | UART_MCR_DTR))) {
438 #if defined(DEBUG_INTR)
439                         pr_err("Serial port %d: MCR RTS/DTR write\n", index);
440 #endif
441                         mtty_trigger_interrupt(mdev_state);
442                 }
443                 break;
444
445         case UART_LSR:
446         case UART_MSR:
447                 /* do nothing */
448                 break;
449
450         case UART_SCR:
451                 mdev_state->s[index].uart_reg[offset] = data;
452                 break;
453
454         default:
455                 break;
456         }
457 }
458
459 static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
460                             u16 offset, u8 *buf, u32 count)
461 {
462         /* Handle read requests by guest */
463         switch (offset) {
464         case UART_RX:
465                 /* if DLAB set, data is LSB of divisor */
466                 if (mdev_state->s[index].dlab) {
467                         *buf  = (u8)mdev_state->s[index].divisor;
468                         break;
469                 }
470
471                 mutex_lock(&mdev_state->rxtx_lock);
472                 /* return data in tx buffer */
473                 if (mdev_state->s[index].rxtx.head !=
474                                  mdev_state->s[index].rxtx.tail) {
475                         *buf = mdev_state->s[index].rxtx.fifo[
476                                                 mdev_state->s[index].rxtx.tail];
477                         mdev_state->s[index].rxtx.count--;
478                         CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
479                 }
480
481                 if (mdev_state->s[index].rxtx.head ==
482                                 mdev_state->s[index].rxtx.tail) {
483                 /*
484                  *  Trigger interrupt if tx buffer empty interrupt is
485                  *  enabled and fifo is empty
486                  */
487 #if defined(DEBUG_INTR)
488                         pr_err("Serial port %d: Buffer Empty\n", index);
489 #endif
490                         if (mdev_state->s[index].uart_reg[UART_IER] &
491                                                          UART_IER_THRI)
492                                 mtty_trigger_interrupt(mdev_state);
493                 }
494                 mutex_unlock(&mdev_state->rxtx_lock);
495
496                 break;
497
498         case UART_IER:
499                 if (mdev_state->s[index].dlab) {
500                         *buf = (u8)(mdev_state->s[index].divisor >> 8);
501                         break;
502                 }
503                 *buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
504                 break;
505
506         case UART_IIR:
507         {
508                 u8 ier = mdev_state->s[index].uart_reg[UART_IER];
509                 *buf = 0;
510
511                 mutex_lock(&mdev_state->rxtx_lock);
512                 /* Interrupt priority 1: Parity, overrun, framing or break */
513                 if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
514                         *buf |= UART_IIR_RLSI;
515
516                 /* Interrupt priority 2: Fifo trigger level reached */
517                 if ((ier & UART_IER_RDI) &&
518                     (mdev_state->s[index].rxtx.count >=
519                       mdev_state->s[index].intr_trigger_level))
520                         *buf |= UART_IIR_RDI;
521
522                 /* Interrupt priotiry 3: transmitter holding register empty */
523                 if ((ier & UART_IER_THRI) &&
524                     (mdev_state->s[index].rxtx.head ==
525                                 mdev_state->s[index].rxtx.tail))
526                         *buf |= UART_IIR_THRI;
527
528                 /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD  */
529                 if ((ier & UART_IER_MSI) &&
530                     (mdev_state->s[index].uart_reg[UART_MCR] &
531                                  (UART_MCR_RTS | UART_MCR_DTR)))
532                         *buf |= UART_IIR_MSI;
533
534                 /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
535                 if (*buf == 0)
536                         *buf = UART_IIR_NO_INT;
537
538                 /* set bit 6 & 7 to be 16550 compatible */
539                 *buf |= 0xC0;
540                 mutex_unlock(&mdev_state->rxtx_lock);
541         }
542         break;
543
544         case UART_LCR:
545         case UART_MCR:
546                 *buf = mdev_state->s[index].uart_reg[offset];
547                 break;
548
549         case UART_LSR:
550         {
551                 u8 lsr = 0;
552
553                 mutex_lock(&mdev_state->rxtx_lock);
554                 /* atleast one char in FIFO */
555                 if (mdev_state->s[index].rxtx.head !=
556                                  mdev_state->s[index].rxtx.tail)
557                         lsr |= UART_LSR_DR;
558
559                 /* if FIFO overrun */
560                 if (mdev_state->s[index].overrun)
561                         lsr |= UART_LSR_OE;
562
563                 /* transmit FIFO empty and tramsitter empty */
564                 if (mdev_state->s[index].rxtx.head ==
565                                  mdev_state->s[index].rxtx.tail)
566                         lsr |= UART_LSR_TEMT | UART_LSR_THRE;
567
568                 mutex_unlock(&mdev_state->rxtx_lock);
569                 *buf = lsr;
570                 break;
571         }
572         case UART_MSR:
573                 *buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
574
575                 mutex_lock(&mdev_state->rxtx_lock);
576                 /* if AFE is 1 and FIFO have space, set CTS bit */
577                 if (mdev_state->s[index].uart_reg[UART_MCR] &
578                                                  UART_MCR_AFE) {
579                         if (mdev_state->s[index].rxtx.count <
580                                         mdev_state->s[index].max_fifo_size)
581                                 *buf |= UART_MSR_CTS | UART_MSR_DCTS;
582                 } else
583                         *buf |= UART_MSR_CTS | UART_MSR_DCTS;
584                 mutex_unlock(&mdev_state->rxtx_lock);
585
586                 break;
587
588         case UART_SCR:
589                 *buf = mdev_state->s[index].uart_reg[offset];
590                 break;
591
592         default:
593                 break;
594         }
595 }
596
597 static void mdev_read_base(struct mdev_state *mdev_state)
598 {
599         int index, pos;
600         u32 start_lo, start_hi;
601         u32 mem_type;
602
603         pos = PCI_BASE_ADDRESS_0;
604
605         for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
606
607                 if (!mdev_state->region_info[index].size)
608                         continue;
609
610                 start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
611                         PCI_BASE_ADDRESS_MEM_MASK;
612                 mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
613                         PCI_BASE_ADDRESS_MEM_TYPE_MASK;
614
615                 switch (mem_type) {
616                 case PCI_BASE_ADDRESS_MEM_TYPE_64:
617                         start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
618                         pos += 4;
619                         break;
620                 case PCI_BASE_ADDRESS_MEM_TYPE_32:
621                 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
622                         /* 1M mem BAR treated as 32-bit BAR */
623                 default:
624                         /* mem unknown type treated as 32-bit BAR */
625                         start_hi = 0;
626                         break;
627                 }
628                 pos += 4;
629                 mdev_state->region_info[index].start = ((u64)start_hi << 32) |
630                                                         start_lo;
631         }
632 }
633
634 static ssize_t mdev_access(struct mdev_state *mdev_state, u8 *buf, size_t count,
635                            loff_t pos, bool is_write)
636 {
637         unsigned int index;
638         loff_t offset;
639         int ret = 0;
640
641         if (!buf)
642                 return -EINVAL;
643
644         mutex_lock(&mdev_state->ops_lock);
645
646         index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
647         offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
648         switch (index) {
649         case VFIO_PCI_CONFIG_REGION_INDEX:
650
651 #if defined(DEBUG)
652                 pr_info("%s: PCI config space %s at offset 0x%llx\n",
653                          __func__, is_write ? "write" : "read", offset);
654 #endif
655                 if (is_write) {
656                         dump_buffer(buf, count);
657                         handle_pci_cfg_write(mdev_state, offset, buf, count);
658                 } else {
659                         memcpy(buf, (mdev_state->vconfig + offset), count);
660                         dump_buffer(buf, count);
661                 }
662
663                 break;
664
665         case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
666                 if (!mdev_state->region_info[index].start)
667                         mdev_read_base(mdev_state);
668
669                 if (is_write) {
670                         dump_buffer(buf, count);
671
672 #if defined(DEBUG_REGS)
673                         pr_info("%s: BAR%d  WR @0x%llx %s val:0x%02x dlab:%d\n",
674                                 __func__, index, offset, wr_reg[offset],
675                                 *buf, mdev_state->s[index].dlab);
676 #endif
677                         handle_bar_write(index, mdev_state, offset, buf, count);
678                 } else {
679                         handle_bar_read(index, mdev_state, offset, buf, count);
680                         dump_buffer(buf, count);
681
682 #if defined(DEBUG_REGS)
683                         pr_info("%s: BAR%d  RD @0x%llx %s val:0x%02x dlab:%d\n",
684                                 __func__, index, offset, rd_reg[offset],
685                                 *buf, mdev_state->s[index].dlab);
686 #endif
687                 }
688                 break;
689
690         default:
691                 ret = -1;
692                 goto accessfailed;
693         }
694
695         ret = count;
696
697
698 accessfailed:
699         mutex_unlock(&mdev_state->ops_lock);
700
701         return ret;
702 }
703
704 static int mtty_init_dev(struct vfio_device *vdev)
705 {
706         struct mdev_state *mdev_state =
707                 container_of(vdev, struct mdev_state, vdev);
708         struct mdev_device *mdev = to_mdev_device(vdev->dev);
709         int nr_ports = mdev_get_type_group_id(mdev) + 1;
710         int avail_ports = atomic_read(&mdev_avail_ports);
711         int ret;
712
713         do {
714                 if (avail_ports < nr_ports)
715                         return -ENOSPC;
716         } while (!atomic_try_cmpxchg(&mdev_avail_ports,
717                                      &avail_ports, avail_ports - nr_ports));
718
719         mdev_state->nr_ports = nr_ports;
720         mdev_state->irq_index = -1;
721         mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
722         mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
723         mutex_init(&mdev_state->rxtx_lock);
724
725         mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
726         if (!mdev_state->vconfig) {
727                 ret = -ENOMEM;
728                 goto err_nr_ports;
729         }
730
731         mutex_init(&mdev_state->ops_lock);
732         mdev_state->mdev = mdev;
733         mtty_create_config_space(mdev_state);
734         return 0;
735
736 err_nr_ports:
737         atomic_add(nr_ports, &mdev_avail_ports);
738         return ret;
739 }
740
741 static int mtty_probe(struct mdev_device *mdev)
742 {
743         struct mdev_state *mdev_state;
744         int ret;
745
746         mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
747                                        &mtty_dev_ops);
748         if (IS_ERR(mdev_state))
749                 return PTR_ERR(mdev_state);
750
751         ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
752         if (ret)
753                 goto err_put_vdev;
754         dev_set_drvdata(&mdev->dev, mdev_state);
755         return 0;
756
757 err_put_vdev:
758         vfio_put_device(&mdev_state->vdev);
759         return ret;
760 }
761
762 static void mtty_release_dev(struct vfio_device *vdev)
763 {
764         struct mdev_state *mdev_state =
765                 container_of(vdev, struct mdev_state, vdev);
766
767         atomic_add(mdev_state->nr_ports, &mdev_avail_ports);
768         kfree(mdev_state->vconfig);
769         vfio_free_device(vdev);
770 }
771
772 static void mtty_remove(struct mdev_device *mdev)
773 {
774         struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
775
776         vfio_unregister_group_dev(&mdev_state->vdev);
777         vfio_put_device(&mdev_state->vdev);
778 }
779
780 static int mtty_reset(struct mdev_state *mdev_state)
781 {
782         pr_info("%s: called\n", __func__);
783
784         return 0;
785 }
786
787 static ssize_t mtty_read(struct vfio_device *vdev, char __user *buf,
788                          size_t count, loff_t *ppos)
789 {
790         struct mdev_state *mdev_state =
791                 container_of(vdev, struct mdev_state, vdev);
792         unsigned int done = 0;
793         int ret;
794
795         while (count) {
796                 size_t filled;
797
798                 if (count >= 4 && !(*ppos % 4)) {
799                         u32 val;
800
801                         ret =  mdev_access(mdev_state, (u8 *)&val, sizeof(val),
802                                            *ppos, false);
803                         if (ret <= 0)
804                                 goto read_err;
805
806                         if (copy_to_user(buf, &val, sizeof(val)))
807                                 goto read_err;
808
809                         filled = 4;
810                 } else if (count >= 2 && !(*ppos % 2)) {
811                         u16 val;
812
813                         ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
814                                           *ppos, false);
815                         if (ret <= 0)
816                                 goto read_err;
817
818                         if (copy_to_user(buf, &val, sizeof(val)))
819                                 goto read_err;
820
821                         filled = 2;
822                 } else {
823                         u8 val;
824
825                         ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
826                                           *ppos, false);
827                         if (ret <= 0)
828                                 goto read_err;
829
830                         if (copy_to_user(buf, &val, sizeof(val)))
831                                 goto read_err;
832
833                         filled = 1;
834                 }
835
836                 count -= filled;
837                 done += filled;
838                 *ppos += filled;
839                 buf += filled;
840         }
841
842         return done;
843
844 read_err:
845         return -EFAULT;
846 }
847
848 static ssize_t mtty_write(struct vfio_device *vdev, const char __user *buf,
849                    size_t count, loff_t *ppos)
850 {
851         struct mdev_state *mdev_state =
852                 container_of(vdev, struct mdev_state, vdev);
853         unsigned int done = 0;
854         int ret;
855
856         while (count) {
857                 size_t filled;
858
859                 if (count >= 4 && !(*ppos % 4)) {
860                         u32 val;
861
862                         if (copy_from_user(&val, buf, sizeof(val)))
863                                 goto write_err;
864
865                         ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
866                                           *ppos, true);
867                         if (ret <= 0)
868                                 goto write_err;
869
870                         filled = 4;
871                 } else if (count >= 2 && !(*ppos % 2)) {
872                         u16 val;
873
874                         if (copy_from_user(&val, buf, sizeof(val)))
875                                 goto write_err;
876
877                         ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
878                                           *ppos, true);
879                         if (ret <= 0)
880                                 goto write_err;
881
882                         filled = 2;
883                 } else {
884                         u8 val;
885
886                         if (copy_from_user(&val, buf, sizeof(val)))
887                                 goto write_err;
888
889                         ret = mdev_access(mdev_state, (u8 *)&val, sizeof(val),
890                                           *ppos, true);
891                         if (ret <= 0)
892                                 goto write_err;
893
894                         filled = 1;
895                 }
896                 count -= filled;
897                 done += filled;
898                 *ppos += filled;
899                 buf += filled;
900         }
901
902         return done;
903 write_err:
904         return -EFAULT;
905 }
906
907 static int mtty_set_irqs(struct mdev_state *mdev_state, uint32_t flags,
908                          unsigned int index, unsigned int start,
909                          unsigned int count, void *data)
910 {
911         int ret = 0;
912
913         mutex_lock(&mdev_state->ops_lock);
914         switch (index) {
915         case VFIO_PCI_INTX_IRQ_INDEX:
916                 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
917                 case VFIO_IRQ_SET_ACTION_MASK:
918                 case VFIO_IRQ_SET_ACTION_UNMASK:
919                         break;
920                 case VFIO_IRQ_SET_ACTION_TRIGGER:
921                 {
922                         if (flags & VFIO_IRQ_SET_DATA_NONE) {
923                                 pr_info("%s: disable INTx\n", __func__);
924                                 if (mdev_state->intx_evtfd)
925                                         eventfd_ctx_put(mdev_state->intx_evtfd);
926                                 break;
927                         }
928
929                         if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
930                                 int fd = *(int *)data;
931
932                                 if (fd > 0) {
933                                         struct eventfd_ctx *evt;
934
935                                         evt = eventfd_ctx_fdget(fd);
936                                         if (IS_ERR(evt)) {
937                                                 ret = PTR_ERR(evt);
938                                                 break;
939                                         }
940                                         mdev_state->intx_evtfd = evt;
941                                         mdev_state->irq_fd = fd;
942                                         mdev_state->irq_index = index;
943                                         break;
944                                 }
945                         }
946                         break;
947                 }
948                 }
949                 break;
950         case VFIO_PCI_MSI_IRQ_INDEX:
951                 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
952                 case VFIO_IRQ_SET_ACTION_MASK:
953                 case VFIO_IRQ_SET_ACTION_UNMASK:
954                         break;
955                 case VFIO_IRQ_SET_ACTION_TRIGGER:
956                         if (flags & VFIO_IRQ_SET_DATA_NONE) {
957                                 if (mdev_state->msi_evtfd)
958                                         eventfd_ctx_put(mdev_state->msi_evtfd);
959                                 pr_info("%s: disable MSI\n", __func__);
960                                 mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
961                                 break;
962                         }
963                         if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
964                                 int fd = *(int *)data;
965                                 struct eventfd_ctx *evt;
966
967                                 if (fd <= 0)
968                                         break;
969
970                                 if (mdev_state->msi_evtfd)
971                                         break;
972
973                                 evt = eventfd_ctx_fdget(fd);
974                                 if (IS_ERR(evt)) {
975                                         ret = PTR_ERR(evt);
976                                         break;
977                                 }
978                                 mdev_state->msi_evtfd = evt;
979                                 mdev_state->irq_fd = fd;
980                                 mdev_state->irq_index = index;
981                         }
982                         break;
983         }
984         break;
985         case VFIO_PCI_MSIX_IRQ_INDEX:
986                 pr_info("%s: MSIX_IRQ\n", __func__);
987                 break;
988         case VFIO_PCI_ERR_IRQ_INDEX:
989                 pr_info("%s: ERR_IRQ\n", __func__);
990                 break;
991         case VFIO_PCI_REQ_IRQ_INDEX:
992                 pr_info("%s: REQ_IRQ\n", __func__);
993                 break;
994         }
995
996         mutex_unlock(&mdev_state->ops_lock);
997         return ret;
998 }
999
1000 static int mtty_trigger_interrupt(struct mdev_state *mdev_state)
1001 {
1002         int ret = -1;
1003
1004         if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
1005             (!mdev_state->msi_evtfd))
1006                 return -EINVAL;
1007         else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
1008                  (!mdev_state->intx_evtfd)) {
1009                 pr_info("%s: Intr eventfd not found\n", __func__);
1010                 return -EINVAL;
1011         }
1012
1013         if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
1014                 ret = eventfd_signal(mdev_state->msi_evtfd, 1);
1015         else
1016                 ret = eventfd_signal(mdev_state->intx_evtfd, 1);
1017
1018 #if defined(DEBUG_INTR)
1019         pr_info("Intx triggered\n");
1020 #endif
1021         if (ret != 1)
1022                 pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
1023
1024         return ret;
1025 }
1026
1027 static int mtty_get_region_info(struct mdev_state *mdev_state,
1028                          struct vfio_region_info *region_info,
1029                          u16 *cap_type_id, void **cap_type)
1030 {
1031         unsigned int size = 0;
1032         u32 bar_index;
1033
1034         bar_index = region_info->index;
1035         if (bar_index >= VFIO_PCI_NUM_REGIONS)
1036                 return -EINVAL;
1037
1038         mutex_lock(&mdev_state->ops_lock);
1039
1040         switch (bar_index) {
1041         case VFIO_PCI_CONFIG_REGION_INDEX:
1042                 size = MTTY_CONFIG_SPACE_SIZE;
1043                 break;
1044         case VFIO_PCI_BAR0_REGION_INDEX:
1045                 size = MTTY_IO_BAR_SIZE;
1046                 break;
1047         case VFIO_PCI_BAR1_REGION_INDEX:
1048                 if (mdev_state->nr_ports == 2)
1049                         size = MTTY_IO_BAR_SIZE;
1050                 break;
1051         default:
1052                 size = 0;
1053                 break;
1054         }
1055
1056         mdev_state->region_info[bar_index].size = size;
1057         mdev_state->region_info[bar_index].vfio_offset =
1058                 MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1059
1060         region_info->size = size;
1061         region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1062         region_info->flags = VFIO_REGION_INFO_FLAG_READ |
1063                 VFIO_REGION_INFO_FLAG_WRITE;
1064         mutex_unlock(&mdev_state->ops_lock);
1065         return 0;
1066 }
1067
1068 static int mtty_get_irq_info(struct vfio_irq_info *irq_info)
1069 {
1070         switch (irq_info->index) {
1071         case VFIO_PCI_INTX_IRQ_INDEX:
1072         case VFIO_PCI_MSI_IRQ_INDEX:
1073         case VFIO_PCI_REQ_IRQ_INDEX:
1074                 break;
1075
1076         default:
1077                 return -EINVAL;
1078         }
1079
1080         irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
1081         irq_info->count = 1;
1082
1083         if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
1084                 irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
1085                                 VFIO_IRQ_INFO_AUTOMASKED);
1086         else
1087                 irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
1088
1089         return 0;
1090 }
1091
1092 static int mtty_get_device_info(struct vfio_device_info *dev_info)
1093 {
1094         dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
1095         dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
1096         dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
1097
1098         return 0;
1099 }
1100
1101 static long mtty_ioctl(struct vfio_device *vdev, unsigned int cmd,
1102                         unsigned long arg)
1103 {
1104         struct mdev_state *mdev_state =
1105                 container_of(vdev, struct mdev_state, vdev);
1106         int ret = 0;
1107         unsigned long minsz;
1108
1109         switch (cmd) {
1110         case VFIO_DEVICE_GET_INFO:
1111         {
1112                 struct vfio_device_info info;
1113
1114                 minsz = offsetofend(struct vfio_device_info, num_irqs);
1115
1116                 if (copy_from_user(&info, (void __user *)arg, minsz))
1117                         return -EFAULT;
1118
1119                 if (info.argsz < minsz)
1120                         return -EINVAL;
1121
1122                 ret = mtty_get_device_info(&info);
1123                 if (ret)
1124                         return ret;
1125
1126                 memcpy(&mdev_state->dev_info, &info, sizeof(info));
1127
1128                 if (copy_to_user((void __user *)arg, &info, minsz))
1129                         return -EFAULT;
1130
1131                 return 0;
1132         }
1133         case VFIO_DEVICE_GET_REGION_INFO:
1134         {
1135                 struct vfio_region_info info;
1136                 u16 cap_type_id = 0;
1137                 void *cap_type = NULL;
1138
1139                 minsz = offsetofend(struct vfio_region_info, offset);
1140
1141                 if (copy_from_user(&info, (void __user *)arg, minsz))
1142                         return -EFAULT;
1143
1144                 if (info.argsz < minsz)
1145                         return -EINVAL;
1146
1147                 ret = mtty_get_region_info(mdev_state, &info, &cap_type_id,
1148                                            &cap_type);
1149                 if (ret)
1150                         return ret;
1151
1152                 if (copy_to_user((void __user *)arg, &info, minsz))
1153                         return -EFAULT;
1154
1155                 return 0;
1156         }
1157
1158         case VFIO_DEVICE_GET_IRQ_INFO:
1159         {
1160                 struct vfio_irq_info info;
1161
1162                 minsz = offsetofend(struct vfio_irq_info, count);
1163
1164                 if (copy_from_user(&info, (void __user *)arg, minsz))
1165                         return -EFAULT;
1166
1167                 if ((info.argsz < minsz) ||
1168                     (info.index >= mdev_state->dev_info.num_irqs))
1169                         return -EINVAL;
1170
1171                 ret = mtty_get_irq_info(&info);
1172                 if (ret)
1173                         return ret;
1174
1175                 if (copy_to_user((void __user *)arg, &info, minsz))
1176                         return -EFAULT;
1177
1178                 return 0;
1179         }
1180         case VFIO_DEVICE_SET_IRQS:
1181         {
1182                 struct vfio_irq_set hdr;
1183                 u8 *data = NULL, *ptr = NULL;
1184                 size_t data_size = 0;
1185
1186                 minsz = offsetofend(struct vfio_irq_set, count);
1187
1188                 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1189                         return -EFAULT;
1190
1191                 ret = vfio_set_irqs_validate_and_prepare(&hdr,
1192                                                 mdev_state->dev_info.num_irqs,
1193                                                 VFIO_PCI_NUM_IRQS,
1194                                                 &data_size);
1195                 if (ret)
1196                         return ret;
1197
1198                 if (data_size) {
1199                         ptr = data = memdup_user((void __user *)(arg + minsz),
1200                                                  data_size);
1201                         if (IS_ERR(data))
1202                                 return PTR_ERR(data);
1203                 }
1204
1205                 ret = mtty_set_irqs(mdev_state, hdr.flags, hdr.index, hdr.start,
1206                                     hdr.count, data);
1207
1208                 kfree(ptr);
1209                 return ret;
1210         }
1211         case VFIO_DEVICE_RESET:
1212                 return mtty_reset(mdev_state);
1213         }
1214         return -ENOTTY;
1215 }
1216
1217 static ssize_t
1218 sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
1219                      char *buf)
1220 {
1221         return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
1222 }
1223
1224 static DEVICE_ATTR_RO(sample_mdev_dev);
1225
1226 static struct attribute *mdev_dev_attrs[] = {
1227         &dev_attr_sample_mdev_dev.attr,
1228         NULL,
1229 };
1230
1231 static const struct attribute_group mdev_dev_group = {
1232         .name  = "vendor",
1233         .attrs = mdev_dev_attrs,
1234 };
1235
1236 static const struct attribute_group *mdev_dev_groups[] = {
1237         &mdev_dev_group,
1238         NULL,
1239 };
1240
1241 static ssize_t name_show(struct mdev_type *mtype,
1242                          struct mdev_type_attribute *attr, char *buf)
1243 {
1244         static const char *name_str[2] = { "Single port serial",
1245                                            "Dual port serial" };
1246
1247         return sysfs_emit(buf, "%s\n",
1248                           name_str[mtype_get_type_group_id(mtype)]);
1249 }
1250
1251 static MDEV_TYPE_ATTR_RO(name);
1252
1253 static ssize_t available_instances_show(struct mdev_type *mtype,
1254                                         struct mdev_type_attribute *attr,
1255                                         char *buf)
1256 {
1257         unsigned int ports = mtype_get_type_group_id(mtype) + 1;
1258
1259         return sprintf(buf, "%d\n", atomic_read(&mdev_avail_ports) / ports);
1260 }
1261
1262 static MDEV_TYPE_ATTR_RO(available_instances);
1263
1264 static ssize_t device_api_show(struct mdev_type *mtype,
1265                                struct mdev_type_attribute *attr, char *buf)
1266 {
1267         return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
1268 }
1269
1270 static MDEV_TYPE_ATTR_RO(device_api);
1271
1272 static struct attribute *mdev_types_attrs[] = {
1273         &mdev_type_attr_name.attr,
1274         &mdev_type_attr_device_api.attr,
1275         &mdev_type_attr_available_instances.attr,
1276         NULL,
1277 };
1278
1279 static struct attribute_group mdev_type_group1 = {
1280         .name  = "1",
1281         .attrs = mdev_types_attrs,
1282 };
1283
1284 static struct attribute_group mdev_type_group2 = {
1285         .name  = "2",
1286         .attrs = mdev_types_attrs,
1287 };
1288
1289 static struct attribute_group *mdev_type_groups[] = {
1290         &mdev_type_group1,
1291         &mdev_type_group2,
1292         NULL,
1293 };
1294
1295 static const struct vfio_device_ops mtty_dev_ops = {
1296         .name = "vfio-mtty",
1297         .init = mtty_init_dev,
1298         .release = mtty_release_dev,
1299         .read = mtty_read,
1300         .write = mtty_write,
1301         .ioctl = mtty_ioctl,
1302 };
1303
1304 static struct mdev_driver mtty_driver = {
1305         .driver = {
1306                 .name = "mtty",
1307                 .owner = THIS_MODULE,
1308                 .mod_name = KBUILD_MODNAME,
1309                 .dev_groups = mdev_dev_groups,
1310         },
1311         .probe = mtty_probe,
1312         .remove = mtty_remove,
1313         .supported_type_groups = mdev_type_groups,
1314 };
1315
1316 static void mtty_device_release(struct device *dev)
1317 {
1318         dev_dbg(dev, "mtty: released\n");
1319 }
1320
1321 static int __init mtty_dev_init(void)
1322 {
1323         int ret = 0;
1324
1325         pr_info("mtty_dev: %s\n", __func__);
1326
1327         memset(&mtty_dev, 0, sizeof(mtty_dev));
1328
1329         idr_init(&mtty_dev.vd_idr);
1330
1331         ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK + 1,
1332                                   MTTY_NAME);
1333
1334         if (ret < 0) {
1335                 pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
1336                 return ret;
1337         }
1338
1339         cdev_init(&mtty_dev.vd_cdev, &vd_fops);
1340         cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK + 1);
1341
1342         pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
1343
1344         ret = mdev_register_driver(&mtty_driver);
1345         if (ret)
1346                 goto err_cdev;
1347
1348         mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
1349
1350         if (IS_ERR(mtty_dev.vd_class)) {
1351                 pr_err("Error: failed to register mtty_dev class\n");
1352                 ret = PTR_ERR(mtty_dev.vd_class);
1353                 goto err_driver;
1354         }
1355
1356         mtty_dev.dev.class = mtty_dev.vd_class;
1357         mtty_dev.dev.release = mtty_device_release;
1358         dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
1359
1360         ret = device_register(&mtty_dev.dev);
1361         if (ret)
1362                 goto err_class;
1363
1364         ret = mdev_register_device(&mtty_dev.dev, &mtty_driver);
1365         if (ret)
1366                 goto err_device;
1367         return 0;
1368
1369 err_device:
1370         device_unregister(&mtty_dev.dev);
1371 err_class:
1372         class_destroy(mtty_dev.vd_class);
1373 err_driver:
1374         mdev_unregister_driver(&mtty_driver);
1375 err_cdev:
1376         cdev_del(&mtty_dev.vd_cdev);
1377         unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
1378         return ret;
1379 }
1380
1381 static void __exit mtty_dev_exit(void)
1382 {
1383         mtty_dev.dev.bus = NULL;
1384         mdev_unregister_device(&mtty_dev.dev);
1385
1386         device_unregister(&mtty_dev.dev);
1387         idr_destroy(&mtty_dev.vd_idr);
1388         mdev_unregister_driver(&mtty_driver);
1389         cdev_del(&mtty_dev.vd_cdev);
1390         unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK + 1);
1391         class_destroy(mtty_dev.vd_class);
1392         mtty_dev.vd_class = NULL;
1393         pr_info("mtty_dev: Unloaded!\n");
1394 }
1395
1396 module_init(mtty_dev_init)
1397 module_exit(mtty_dev_exit)
1398
1399 MODULE_LICENSE("GPL v2");
1400 MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
1401 MODULE_VERSION(VERSION_STRING);
1402 MODULE_AUTHOR(DRIVER_AUTHOR);