usb: core: hub: disable autosuspend for TI TUSB8041
[platform/kernel/linux-starfive.git] / drivers / spi / spidev.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Simple synchronous userspace interface to SPI devices
4  *
5  * Copyright (C) 2006 SWAPP
6  *      Andrea Paterniani <a.paterniani@swapp-eng.it>
7  * Copyright (C) 2007 David Brownell (simplification, cleanup)
8  */
9
10 #include <linux/init.h>
11 #include <linux/ioctl.h>
12 #include <linux/fs.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/list.h>
16 #include <linux/errno.h>
17 #include <linux/mod_devicetable.h>
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/property.h>
21 #include <linux/slab.h>
22 #include <linux/compat.h>
23
24 #include <linux/spi/spi.h>
25 #include <linux/spi/spidev.h>
26
27 #include <linux/uaccess.h>
28
29
30 /*
31  * This supports access to SPI devices using normal userspace I/O calls.
32  * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
33  * and often mask message boundaries, full SPI support requires full duplex
34  * transfers.  There are several kinds of internal message boundaries to
35  * handle chipselect management and other protocol options.
36  *
37  * SPI has a character major number assigned.  We allocate minor numbers
38  * dynamically using a bitmask.  You must use hotplug tools, such as udev
39  * (or mdev with busybox) to create and destroy the /dev/spidevB.C device
40  * nodes, since there is no fixed association of minor numbers with any
41  * particular SPI bus or device.
42  */
43 #define SPIDEV_MAJOR                    153     /* assigned */
44 #define N_SPI_MINORS                    32      /* ... up to 256 */
45
46 static DECLARE_BITMAP(minors, N_SPI_MINORS);
47
48 static_assert(N_SPI_MINORS > 0 && N_SPI_MINORS <= 256);
49
50 /* Bit masks for spi_device.mode management.  Note that incorrect
51  * settings for some settings can cause *lots* of trouble for other
52  * devices on a shared bus:
53  *
54  *  - CS_HIGH ... this device will be active when it shouldn't be
55  *  - 3WIRE ... when active, it won't behave as it should
56  *  - NO_CS ... there will be no explicit message boundaries; this
57  *      is completely incompatible with the shared bus model
58  *  - READY ... transfers may proceed when they shouldn't.
59  *
60  * REVISIT should changing those flags be privileged?
61  */
62 #define SPI_MODE_MASK           (SPI_MODE_X_MASK | SPI_CS_HIGH \
63                                 | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
64                                 | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
65                                 | SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \
66                                 | SPI_RX_QUAD | SPI_RX_OCTAL \
67                                 | SPI_RX_CPHA_FLIP)
68
69 struct spidev_data {
70         dev_t                   devt;
71         spinlock_t              spi_lock;
72         struct spi_device       *spi;
73         struct list_head        device_entry;
74
75         /* TX/RX buffers are NULL unless this device is open (users > 0) */
76         struct mutex            buf_lock;
77         unsigned                users;
78         u8                      *tx_buffer;
79         u8                      *rx_buffer;
80         u32                     speed_hz;
81 };
82
83 static LIST_HEAD(device_list);
84 static DEFINE_MUTEX(device_list_lock);
85
86 static unsigned bufsiz = 4096;
87 module_param(bufsiz, uint, S_IRUGO);
88 MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
89
90 /*-------------------------------------------------------------------------*/
91
92 static ssize_t
93 spidev_sync(struct spidev_data *spidev, struct spi_message *message)
94 {
95         int status;
96         struct spi_device *spi;
97
98         spin_lock_irq(&spidev->spi_lock);
99         spi = spidev->spi;
100         spin_unlock_irq(&spidev->spi_lock);
101
102         if (spi == NULL)
103                 status = -ESHUTDOWN;
104         else
105                 status = spi_sync(spi, message);
106
107         if (status == 0)
108                 status = message->actual_length;
109
110         return status;
111 }
112
113 static inline ssize_t
114 spidev_sync_write(struct spidev_data *spidev, size_t len)
115 {
116         struct spi_transfer     t = {
117                         .tx_buf         = spidev->tx_buffer,
118                         .len            = len,
119                         .speed_hz       = spidev->speed_hz,
120                 };
121         struct spi_message      m;
122
123         spi_message_init(&m);
124         spi_message_add_tail(&t, &m);
125         return spidev_sync(spidev, &m);
126 }
127
128 static inline ssize_t
129 spidev_sync_read(struct spidev_data *spidev, size_t len)
130 {
131         struct spi_transfer     t = {
132                         .rx_buf         = spidev->rx_buffer,
133                         .len            = len,
134                         .speed_hz       = spidev->speed_hz,
135                 };
136         struct spi_message      m;
137
138         spi_message_init(&m);
139         spi_message_add_tail(&t, &m);
140         return spidev_sync(spidev, &m);
141 }
142
143 /*-------------------------------------------------------------------------*/
144
145 /* Read-only message with current device setup */
146 static ssize_t
147 spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
148 {
149         struct spidev_data      *spidev;
150         ssize_t                 status;
151
152         /* chipselect only toggles at start or end of operation */
153         if (count > bufsiz)
154                 return -EMSGSIZE;
155
156         spidev = filp->private_data;
157
158         mutex_lock(&spidev->buf_lock);
159         status = spidev_sync_read(spidev, count);
160         if (status > 0) {
161                 unsigned long   missing;
162
163                 missing = copy_to_user(buf, spidev->rx_buffer, status);
164                 if (missing == status)
165                         status = -EFAULT;
166                 else
167                         status = status - missing;
168         }
169         mutex_unlock(&spidev->buf_lock);
170
171         return status;
172 }
173
174 /* Write-only message with current device setup */
175 static ssize_t
176 spidev_write(struct file *filp, const char __user *buf,
177                 size_t count, loff_t *f_pos)
178 {
179         struct spidev_data      *spidev;
180         ssize_t                 status;
181         unsigned long           missing;
182
183         /* chipselect only toggles at start or end of operation */
184         if (count > bufsiz)
185                 return -EMSGSIZE;
186
187         spidev = filp->private_data;
188
189         mutex_lock(&spidev->buf_lock);
190         missing = copy_from_user(spidev->tx_buffer, buf, count);
191         if (missing == 0)
192                 status = spidev_sync_write(spidev, count);
193         else
194                 status = -EFAULT;
195         mutex_unlock(&spidev->buf_lock);
196
197         return status;
198 }
199
200 static int spidev_message(struct spidev_data *spidev,
201                 struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
202 {
203         struct spi_message      msg;
204         struct spi_transfer     *k_xfers;
205         struct spi_transfer     *k_tmp;
206         struct spi_ioc_transfer *u_tmp;
207         unsigned                n, total, tx_total, rx_total;
208         u8                      *tx_buf, *rx_buf;
209         int                     status = -EFAULT;
210
211         spi_message_init(&msg);
212         k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
213         if (k_xfers == NULL)
214                 return -ENOMEM;
215
216         /* Construct spi_message, copying any tx data to bounce buffer.
217          * We walk the array of user-provided transfers, using each one
218          * to initialize a kernel version of the same transfer.
219          */
220         tx_buf = spidev->tx_buffer;
221         rx_buf = spidev->rx_buffer;
222         total = 0;
223         tx_total = 0;
224         rx_total = 0;
225         for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
226                         n;
227                         n--, k_tmp++, u_tmp++) {
228                 /* Ensure that also following allocations from rx_buf/tx_buf will meet
229                  * DMA alignment requirements.
230                  */
231                 unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN);
232
233                 k_tmp->len = u_tmp->len;
234
235                 total += k_tmp->len;
236                 /* Since the function returns the total length of transfers
237                  * on success, restrict the total to positive int values to
238                  * avoid the return value looking like an error.  Also check
239                  * each transfer length to avoid arithmetic overflow.
240                  */
241                 if (total > INT_MAX || k_tmp->len > INT_MAX) {
242                         status = -EMSGSIZE;
243                         goto done;
244                 }
245
246                 if (u_tmp->rx_buf) {
247                         /* this transfer needs space in RX bounce buffer */
248                         rx_total += len_aligned;
249                         if (rx_total > bufsiz) {
250                                 status = -EMSGSIZE;
251                                 goto done;
252                         }
253                         k_tmp->rx_buf = rx_buf;
254                         rx_buf += len_aligned;
255                 }
256                 if (u_tmp->tx_buf) {
257                         /* this transfer needs space in TX bounce buffer */
258                         tx_total += len_aligned;
259                         if (tx_total > bufsiz) {
260                                 status = -EMSGSIZE;
261                                 goto done;
262                         }
263                         k_tmp->tx_buf = tx_buf;
264                         if (copy_from_user(tx_buf, (const u8 __user *)
265                                                 (uintptr_t) u_tmp->tx_buf,
266                                         u_tmp->len))
267                                 goto done;
268                         tx_buf += len_aligned;
269                 }
270
271                 k_tmp->cs_change = !!u_tmp->cs_change;
272                 k_tmp->tx_nbits = u_tmp->tx_nbits;
273                 k_tmp->rx_nbits = u_tmp->rx_nbits;
274                 k_tmp->bits_per_word = u_tmp->bits_per_word;
275                 k_tmp->delay.value = u_tmp->delay_usecs;
276                 k_tmp->delay.unit = SPI_DELAY_UNIT_USECS;
277                 k_tmp->speed_hz = u_tmp->speed_hz;
278                 k_tmp->word_delay.value = u_tmp->word_delay_usecs;
279                 k_tmp->word_delay.unit = SPI_DELAY_UNIT_USECS;
280                 if (!k_tmp->speed_hz)
281                         k_tmp->speed_hz = spidev->speed_hz;
282 #ifdef VERBOSE
283                 dev_dbg(&spidev->spi->dev,
284                         "  xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n",
285                         k_tmp->len,
286                         k_tmp->rx_buf ? "rx " : "",
287                         k_tmp->tx_buf ? "tx " : "",
288                         k_tmp->cs_change ? "cs " : "",
289                         k_tmp->bits_per_word ? : spidev->spi->bits_per_word,
290                         k_tmp->delay.value,
291                         k_tmp->word_delay.value,
292                         k_tmp->speed_hz ? : spidev->spi->max_speed_hz);
293 #endif
294                 spi_message_add_tail(k_tmp, &msg);
295         }
296
297         status = spidev_sync(spidev, &msg);
298         if (status < 0)
299                 goto done;
300
301         /* copy any rx data out of bounce buffer */
302         for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
303                         n;
304                         n--, k_tmp++, u_tmp++) {
305                 if (u_tmp->rx_buf) {
306                         if (copy_to_user((u8 __user *)
307                                         (uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf,
308                                         u_tmp->len)) {
309                                 status = -EFAULT;
310                                 goto done;
311                         }
312                 }
313         }
314         status = total;
315
316 done:
317         kfree(k_xfers);
318         return status;
319 }
320
321 static struct spi_ioc_transfer *
322 spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
323                 unsigned *n_ioc)
324 {
325         u32     tmp;
326
327         /* Check type, command number and direction */
328         if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
329                         || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
330                         || _IOC_DIR(cmd) != _IOC_WRITE)
331                 return ERR_PTR(-ENOTTY);
332
333         tmp = _IOC_SIZE(cmd);
334         if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
335                 return ERR_PTR(-EINVAL);
336         *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
337         if (*n_ioc == 0)
338                 return NULL;
339
340         /* copy into scratch area */
341         return memdup_user(u_ioc, tmp);
342 }
343
344 static long
345 spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
346 {
347         int                     retval = 0;
348         struct spidev_data      *spidev;
349         struct spi_device       *spi;
350         u32                     tmp;
351         unsigned                n_ioc;
352         struct spi_ioc_transfer *ioc;
353
354         /* Check type and command number */
355         if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
356                 return -ENOTTY;
357
358         /* guard against device removal before, or while,
359          * we issue this ioctl.
360          */
361         spidev = filp->private_data;
362         spin_lock_irq(&spidev->spi_lock);
363         spi = spi_dev_get(spidev->spi);
364         spin_unlock_irq(&spidev->spi_lock);
365
366         if (spi == NULL)
367                 return -ESHUTDOWN;
368
369         /* use the buffer lock here for triple duty:
370          *  - prevent I/O (from us) so calling spi_setup() is safe;
371          *  - prevent concurrent SPI_IOC_WR_* from morphing
372          *    data fields while SPI_IOC_RD_* reads them;
373          *  - SPI_IOC_MESSAGE needs the buffer locked "normally".
374          */
375         mutex_lock(&spidev->buf_lock);
376
377         switch (cmd) {
378         /* read requests */
379         case SPI_IOC_RD_MODE:
380         case SPI_IOC_RD_MODE32:
381                 tmp = spi->mode;
382
383                 {
384                         struct spi_controller *ctlr = spi->controller;
385
386                         if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
387                             ctlr->cs_gpiods[spi->chip_select])
388                                 tmp &= ~SPI_CS_HIGH;
389                 }
390
391                 if (cmd == SPI_IOC_RD_MODE)
392                         retval = put_user(tmp & SPI_MODE_MASK,
393                                           (__u8 __user *)arg);
394                 else
395                         retval = put_user(tmp & SPI_MODE_MASK,
396                                           (__u32 __user *)arg);
397                 break;
398         case SPI_IOC_RD_LSB_FIRST:
399                 retval = put_user((spi->mode & SPI_LSB_FIRST) ?  1 : 0,
400                                         (__u8 __user *)arg);
401                 break;
402         case SPI_IOC_RD_BITS_PER_WORD:
403                 retval = put_user(spi->bits_per_word, (__u8 __user *)arg);
404                 break;
405         case SPI_IOC_RD_MAX_SPEED_HZ:
406                 retval = put_user(spidev->speed_hz, (__u32 __user *)arg);
407                 break;
408
409         /* write requests */
410         case SPI_IOC_WR_MODE:
411         case SPI_IOC_WR_MODE32:
412                 if (cmd == SPI_IOC_WR_MODE)
413                         retval = get_user(tmp, (u8 __user *)arg);
414                 else
415                         retval = get_user(tmp, (u32 __user *)arg);
416                 if (retval == 0) {
417                         struct spi_controller *ctlr = spi->controller;
418                         u32     save = spi->mode;
419
420                         if (tmp & ~SPI_MODE_MASK) {
421                                 retval = -EINVAL;
422                                 break;
423                         }
424
425                         if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
426                             ctlr->cs_gpiods[spi->chip_select])
427                                 tmp |= SPI_CS_HIGH;
428
429                         tmp |= spi->mode & ~SPI_MODE_MASK;
430                         spi->mode = tmp & SPI_MODE_USER_MASK;
431                         retval = spi_setup(spi);
432                         if (retval < 0)
433                                 spi->mode = save;
434                         else
435                                 dev_dbg(&spi->dev, "spi mode %x\n", tmp);
436                 }
437                 break;
438         case SPI_IOC_WR_LSB_FIRST:
439                 retval = get_user(tmp, (__u8 __user *)arg);
440                 if (retval == 0) {
441                         u32     save = spi->mode;
442
443                         if (tmp)
444                                 spi->mode |= SPI_LSB_FIRST;
445                         else
446                                 spi->mode &= ~SPI_LSB_FIRST;
447                         retval = spi_setup(spi);
448                         if (retval < 0)
449                                 spi->mode = save;
450                         else
451                                 dev_dbg(&spi->dev, "%csb first\n",
452                                                 tmp ? 'l' : 'm');
453                 }
454                 break;
455         case SPI_IOC_WR_BITS_PER_WORD:
456                 retval = get_user(tmp, (__u8 __user *)arg);
457                 if (retval == 0) {
458                         u8      save = spi->bits_per_word;
459
460                         spi->bits_per_word = tmp;
461                         retval = spi_setup(spi);
462                         if (retval < 0)
463                                 spi->bits_per_word = save;
464                         else
465                                 dev_dbg(&spi->dev, "%d bits per word\n", tmp);
466                 }
467                 break;
468         case SPI_IOC_WR_MAX_SPEED_HZ: {
469                 u32 save;
470
471                 retval = get_user(tmp, (__u32 __user *)arg);
472                 if (retval)
473                         break;
474                 if (tmp == 0) {
475                         retval = -EINVAL;
476                         break;
477                 }
478
479                 save = spi->max_speed_hz;
480
481                 spi->max_speed_hz = tmp;
482                 retval = spi_setup(spi);
483                 if (retval == 0) {
484                         spidev->speed_hz = tmp;
485                         dev_dbg(&spi->dev, "%d Hz (max)\n", spidev->speed_hz);
486                 }
487
488                 spi->max_speed_hz = save;
489                 break;
490         }
491         default:
492                 /* segmented and/or full-duplex I/O request */
493                 /* Check message and copy into scratch area */
494                 ioc = spidev_get_ioc_message(cmd,
495                                 (struct spi_ioc_transfer __user *)arg, &n_ioc);
496                 if (IS_ERR(ioc)) {
497                         retval = PTR_ERR(ioc);
498                         break;
499                 }
500                 if (!ioc)
501                         break;  /* n_ioc is also 0 */
502
503                 /* translate to spi_message, execute */
504                 retval = spidev_message(spidev, ioc, n_ioc);
505                 kfree(ioc);
506                 break;
507         }
508
509         mutex_unlock(&spidev->buf_lock);
510         spi_dev_put(spi);
511         return retval;
512 }
513
514 #ifdef CONFIG_COMPAT
515 static long
516 spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
517                 unsigned long arg)
518 {
519         struct spi_ioc_transfer __user  *u_ioc;
520         int                             retval = 0;
521         struct spidev_data              *spidev;
522         struct spi_device               *spi;
523         unsigned                        n_ioc, n;
524         struct spi_ioc_transfer         *ioc;
525
526         u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
527
528         /* guard against device removal before, or while,
529          * we issue this ioctl.
530          */
531         spidev = filp->private_data;
532         spin_lock_irq(&spidev->spi_lock);
533         spi = spi_dev_get(spidev->spi);
534         spin_unlock_irq(&spidev->spi_lock);
535
536         if (spi == NULL)
537                 return -ESHUTDOWN;
538
539         /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
540         mutex_lock(&spidev->buf_lock);
541
542         /* Check message and copy into scratch area */
543         ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
544         if (IS_ERR(ioc)) {
545                 retval = PTR_ERR(ioc);
546                 goto done;
547         }
548         if (!ioc)
549                 goto done;      /* n_ioc is also 0 */
550
551         /* Convert buffer pointers */
552         for (n = 0; n < n_ioc; n++) {
553                 ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
554                 ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
555         }
556
557         /* translate to spi_message, execute */
558         retval = spidev_message(spidev, ioc, n_ioc);
559         kfree(ioc);
560
561 done:
562         mutex_unlock(&spidev->buf_lock);
563         spi_dev_put(spi);
564         return retval;
565 }
566
567 static long
568 spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
569 {
570         if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
571                         && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
572                         && _IOC_DIR(cmd) == _IOC_WRITE)
573                 return spidev_compat_ioc_message(filp, cmd, arg);
574
575         return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
576 }
577 #else
578 #define spidev_compat_ioctl NULL
579 #endif /* CONFIG_COMPAT */
580
581 static int spidev_open(struct inode *inode, struct file *filp)
582 {
583         struct spidev_data      *spidev = NULL, *iter;
584         int                     status = -ENXIO;
585
586         mutex_lock(&device_list_lock);
587
588         list_for_each_entry(iter, &device_list, device_entry) {
589                 if (iter->devt == inode->i_rdev) {
590                         status = 0;
591                         spidev = iter;
592                         break;
593                 }
594         }
595
596         if (!spidev) {
597                 pr_debug("spidev: nothing for minor %d\n", iminor(inode));
598                 goto err_find_dev;
599         }
600
601         if (!spidev->tx_buffer) {
602                 spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
603                 if (!spidev->tx_buffer) {
604                         dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
605                         status = -ENOMEM;
606                         goto err_find_dev;
607                 }
608         }
609
610         if (!spidev->rx_buffer) {
611                 spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
612                 if (!spidev->rx_buffer) {
613                         dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
614                         status = -ENOMEM;
615                         goto err_alloc_rx_buf;
616                 }
617         }
618
619         spidev->users++;
620         filp->private_data = spidev;
621         stream_open(inode, filp);
622
623         mutex_unlock(&device_list_lock);
624         return 0;
625
626 err_alloc_rx_buf:
627         kfree(spidev->tx_buffer);
628         spidev->tx_buffer = NULL;
629 err_find_dev:
630         mutex_unlock(&device_list_lock);
631         return status;
632 }
633
634 static int spidev_release(struct inode *inode, struct file *filp)
635 {
636         struct spidev_data      *spidev;
637         int                     dofree;
638
639         mutex_lock(&device_list_lock);
640         spidev = filp->private_data;
641         filp->private_data = NULL;
642
643         spin_lock_irq(&spidev->spi_lock);
644         /* ... after we unbound from the underlying device? */
645         dofree = (spidev->spi == NULL);
646         spin_unlock_irq(&spidev->spi_lock);
647
648         /* last close? */
649         spidev->users--;
650         if (!spidev->users) {
651
652                 kfree(spidev->tx_buffer);
653                 spidev->tx_buffer = NULL;
654
655                 kfree(spidev->rx_buffer);
656                 spidev->rx_buffer = NULL;
657
658                 if (dofree)
659                         kfree(spidev);
660                 else
661                         spidev->speed_hz = spidev->spi->max_speed_hz;
662         }
663 #ifdef CONFIG_SPI_SLAVE
664         if (!dofree)
665                 spi_slave_abort(spidev->spi);
666 #endif
667         mutex_unlock(&device_list_lock);
668
669         return 0;
670 }
671
672 static const struct file_operations spidev_fops = {
673         .owner =        THIS_MODULE,
674         /* REVISIT switch to aio primitives, so that userspace
675          * gets more complete API coverage.  It'll simplify things
676          * too, except for the locking.
677          */
678         .write =        spidev_write,
679         .read =         spidev_read,
680         .unlocked_ioctl = spidev_ioctl,
681         .compat_ioctl = spidev_compat_ioctl,
682         .open =         spidev_open,
683         .release =      spidev_release,
684         .llseek =       no_llseek,
685 };
686
687 /*-------------------------------------------------------------------------*/
688
689 /* The main reason to have this class is to make mdev/udev create the
690  * /dev/spidevB.C character device nodes exposing our userspace API.
691  * It also simplifies memory management.
692  */
693
694 static struct class *spidev_class;
695
696 static const struct spi_device_id spidev_spi_ids[] = {
697         { .name = "dh2228fv" },
698         { .name = "ltc2488" },
699         { .name = "sx1301" },
700         { .name = "bk4" },
701         { .name = "dhcom-board" },
702         { .name = "m53cpld" },
703         { .name = "spi-petra" },
704         { .name = "spi-authenta" },
705         {},
706 };
707 MODULE_DEVICE_TABLE(spi, spidev_spi_ids);
708
709 /*
710  * spidev should never be referenced in DT without a specific compatible string,
711  * it is a Linux implementation thing rather than a description of the hardware.
712  */
713 static int spidev_of_check(struct device *dev)
714 {
715         if (device_property_match_string(dev, "compatible", "spidev") < 0)
716                 return 0;
717
718         dev_err(dev, "spidev listed directly in DT is not supported\n");
719         return -EINVAL;
720 }
721
722 static const struct of_device_id spidev_dt_ids[] = {
723         { .compatible = "rohm,dh2228fv", .data = &spidev_of_check },
724         { .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
725         { .compatible = "semtech,sx1301", .data = &spidev_of_check },
726         { .compatible = "lwn,bk4", .data = &spidev_of_check },
727         { .compatible = "dh,dhcom-board", .data = &spidev_of_check },
728         { .compatible = "menlo,m53cpld", .data = &spidev_of_check },
729         { .compatible = "cisco,spi-petra", .data = &spidev_of_check },
730         { .compatible = "micron,spi-authenta", .data = &spidev_of_check },
731         {},
732 };
733 MODULE_DEVICE_TABLE(of, spidev_dt_ids);
734
735 /* Dummy SPI devices not to be used in production systems */
736 static int spidev_acpi_check(struct device *dev)
737 {
738         dev_warn(dev, "do not use this driver in production systems!\n");
739         return 0;
740 }
741
742 static const struct acpi_device_id spidev_acpi_ids[] = {
743         /*
744          * The ACPI SPT000* devices are only meant for development and
745          * testing. Systems used in production should have a proper ACPI
746          * description of the connected peripheral and they should also use
747          * a proper driver instead of poking directly to the SPI bus.
748          */
749         { "SPT0001", (kernel_ulong_t)&spidev_acpi_check },
750         { "SPT0002", (kernel_ulong_t)&spidev_acpi_check },
751         { "SPT0003", (kernel_ulong_t)&spidev_acpi_check },
752         {},
753 };
754 MODULE_DEVICE_TABLE(acpi, spidev_acpi_ids);
755
756 /*-------------------------------------------------------------------------*/
757
758 static int spidev_probe(struct spi_device *spi)
759 {
760         int (*match)(struct device *dev);
761         struct spidev_data      *spidev;
762         int                     status;
763         unsigned long           minor;
764
765         match = device_get_match_data(&spi->dev);
766         if (match) {
767                 status = match(&spi->dev);
768                 if (status)
769                         return status;
770         }
771
772         /* Allocate driver data */
773         spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
774         if (!spidev)
775                 return -ENOMEM;
776
777         /* Initialize the driver data */
778         spidev->spi = spi;
779         spin_lock_init(&spidev->spi_lock);
780         mutex_init(&spidev->buf_lock);
781
782         INIT_LIST_HEAD(&spidev->device_entry);
783
784         /* If we can allocate a minor number, hook up this device.
785          * Reusing minors is fine so long as udev or mdev is working.
786          */
787         mutex_lock(&device_list_lock);
788         minor = find_first_zero_bit(minors, N_SPI_MINORS);
789         if (minor < N_SPI_MINORS) {
790                 struct device *dev;
791
792                 spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
793                 dev = device_create(spidev_class, &spi->dev, spidev->devt,
794                                     spidev, "spidev%d.%d",
795                                     spi->master->bus_num, spi->chip_select);
796                 status = PTR_ERR_OR_ZERO(dev);
797         } else {
798                 dev_dbg(&spi->dev, "no minor number available!\n");
799                 status = -ENODEV;
800         }
801         if (status == 0) {
802                 set_bit(minor, minors);
803                 list_add(&spidev->device_entry, &device_list);
804         }
805         mutex_unlock(&device_list_lock);
806
807         spidev->speed_hz = spi->max_speed_hz;
808
809         if (status == 0)
810                 spi_set_drvdata(spi, spidev);
811         else
812                 kfree(spidev);
813
814         return status;
815 }
816
817 static void spidev_remove(struct spi_device *spi)
818 {
819         struct spidev_data      *spidev = spi_get_drvdata(spi);
820
821         /* prevent new opens */
822         mutex_lock(&device_list_lock);
823         /* make sure ops on existing fds can abort cleanly */
824         spin_lock_irq(&spidev->spi_lock);
825         spidev->spi = NULL;
826         spin_unlock_irq(&spidev->spi_lock);
827
828         list_del(&spidev->device_entry);
829         device_destroy(spidev_class, spidev->devt);
830         clear_bit(MINOR(spidev->devt), minors);
831         if (spidev->users == 0)
832                 kfree(spidev);
833         mutex_unlock(&device_list_lock);
834 }
835
836 static struct spi_driver spidev_spi_driver = {
837         .driver = {
838                 .name =         "spidev",
839                 .of_match_table = spidev_dt_ids,
840                 .acpi_match_table = spidev_acpi_ids,
841         },
842         .probe =        spidev_probe,
843         .remove =       spidev_remove,
844         .id_table =     spidev_spi_ids,
845
846         /* NOTE:  suspend/resume methods are not necessary here.
847          * We don't do anything except pass the requests to/from
848          * the underlying controller.  The refrigerator handles
849          * most issues; the controller driver handles the rest.
850          */
851 };
852
853 /*-------------------------------------------------------------------------*/
854
855 static int __init spidev_init(void)
856 {
857         int status;
858
859         /* Claim our 256 reserved device numbers.  Then register a class
860          * that will key udev/mdev to add/remove /dev nodes.  Last, register
861          * the driver which manages those device numbers.
862          */
863         status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
864         if (status < 0)
865                 return status;
866
867         spidev_class = class_create(THIS_MODULE, "spidev");
868         if (IS_ERR(spidev_class)) {
869                 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
870                 return PTR_ERR(spidev_class);
871         }
872
873         status = spi_register_driver(&spidev_spi_driver);
874         if (status < 0) {
875                 class_destroy(spidev_class);
876                 unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
877         }
878         return status;
879 }
880 module_init(spidev_init);
881
882 static void __exit spidev_exit(void)
883 {
884         spi_unregister_driver(&spidev_spi_driver);
885         class_destroy(spidev_class);
886         unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
887 }
888 module_exit(spidev_exit);
889
890 MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
891 MODULE_DESCRIPTION("User mode SPI device interface");
892 MODULE_LICENSE("GPL");
893 MODULE_ALIAS("spi:spidev");