Merge tag 'pmdomain-v6.6-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[platform/kernel/linux-rpi.git] / drivers / char / mem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/drivers/char/mem.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  Added devfs support.
8  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
9  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
10  */
11
12 #include <linux/mm.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/shmem_fs.h>
26 #include <linux/splice.h>
27 #include <linux/pfn.h>
28 #include <linux/export.h>
29 #include <linux/io.h>
30 #include <linux/uio.h>
31 #include <linux/uaccess.h>
32 #include <linux/security.h>
33
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
37
38 #define DEVMEM_MINOR    1
39 #define DEVPORT_MINOR   4
40
41 static inline unsigned long size_inside_page(unsigned long start,
42                                              unsigned long size)
43 {
44         unsigned long sz;
45
46         sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
47
48         return min(sz, size);
49 }
50
51 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
52 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
53 {
54         return addr + count <= __pa(high_memory);
55 }
56
57 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
58 {
59         return 1;
60 }
61 #endif
62
63 #ifdef CONFIG_STRICT_DEVMEM
64 static inline int page_is_allowed(unsigned long pfn)
65 {
66         return devmem_is_allowed(pfn);
67 }
68 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
69 {
70         u64 from = ((u64)pfn) << PAGE_SHIFT;
71         u64 to = from + size;
72         u64 cursor = from;
73
74         while (cursor < to) {
75                 if (!devmem_is_allowed(pfn))
76                         return 0;
77                 cursor += PAGE_SIZE;
78                 pfn++;
79         }
80         return 1;
81 }
82 #else
83 static inline int page_is_allowed(unsigned long pfn)
84 {
85         return 1;
86 }
87 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
88 {
89         return 1;
90 }
91 #endif
92
93 static inline bool should_stop_iteration(void)
94 {
95         if (need_resched())
96                 cond_resched();
97         return signal_pending(current);
98 }
99
100 /*
101  * This funcion reads the *physical* memory. The f_pos points directly to the
102  * memory location.
103  */
104 static ssize_t read_mem(struct file *file, char __user *buf,
105                         size_t count, loff_t *ppos)
106 {
107         phys_addr_t p = *ppos;
108         ssize_t read, sz;
109         void *ptr;
110         char *bounce;
111         int err;
112
113         if (p != *ppos)
114                 return 0;
115
116         if (!valid_phys_addr_range(p, count))
117                 return -EFAULT;
118         read = 0;
119 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
120         /* we don't have page 0 mapped on sparc and m68k.. */
121         if (p < PAGE_SIZE) {
122                 sz = size_inside_page(p, count);
123                 if (sz > 0) {
124                         if (clear_user(buf, sz))
125                                 return -EFAULT;
126                         buf += sz;
127                         p += sz;
128                         count -= sz;
129                         read += sz;
130                 }
131         }
132 #endif
133
134         bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
135         if (!bounce)
136                 return -ENOMEM;
137
138         while (count > 0) {
139                 unsigned long remaining;
140                 int allowed, probe;
141
142                 sz = size_inside_page(p, count);
143
144                 err = -EPERM;
145                 allowed = page_is_allowed(p >> PAGE_SHIFT);
146                 if (!allowed)
147                         goto failed;
148
149                 err = -EFAULT;
150                 if (allowed == 2) {
151                         /* Show zeros for restricted memory. */
152                         remaining = clear_user(buf, sz);
153                 } else {
154                         /*
155                          * On ia64 if a page has been mapped somewhere as
156                          * uncached, then it must also be accessed uncached
157                          * by the kernel or data corruption may occur.
158                          */
159                         ptr = xlate_dev_mem_ptr(p);
160                         if (!ptr)
161                                 goto failed;
162
163                         probe = copy_from_kernel_nofault(bounce, ptr, sz);
164                         unxlate_dev_mem_ptr(p, ptr);
165                         if (probe)
166                                 goto failed;
167
168                         remaining = copy_to_user(buf, bounce, sz);
169                 }
170
171                 if (remaining)
172                         goto failed;
173
174                 buf += sz;
175                 p += sz;
176                 count -= sz;
177                 read += sz;
178                 if (should_stop_iteration())
179                         break;
180         }
181         kfree(bounce);
182
183         *ppos += read;
184         return read;
185
186 failed:
187         kfree(bounce);
188         return err;
189 }
190
191 static ssize_t write_mem(struct file *file, const char __user *buf,
192                          size_t count, loff_t *ppos)
193 {
194         phys_addr_t p = *ppos;
195         ssize_t written, sz;
196         unsigned long copied;
197         void *ptr;
198
199         if (p != *ppos)
200                 return -EFBIG;
201
202         if (!valid_phys_addr_range(p, count))
203                 return -EFAULT;
204
205         written = 0;
206
207 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
208         /* we don't have page 0 mapped on sparc and m68k.. */
209         if (p < PAGE_SIZE) {
210                 sz = size_inside_page(p, count);
211                 /* Hmm. Do something? */
212                 buf += sz;
213                 p += sz;
214                 count -= sz;
215                 written += sz;
216         }
217 #endif
218
219         while (count > 0) {
220                 int allowed;
221
222                 sz = size_inside_page(p, count);
223
224                 allowed = page_is_allowed(p >> PAGE_SHIFT);
225                 if (!allowed)
226                         return -EPERM;
227
228                 /* Skip actual writing when a page is marked as restricted. */
229                 if (allowed == 1) {
230                         /*
231                          * On ia64 if a page has been mapped somewhere as
232                          * uncached, then it must also be accessed uncached
233                          * by the kernel or data corruption may occur.
234                          */
235                         ptr = xlate_dev_mem_ptr(p);
236                         if (!ptr) {
237                                 if (written)
238                                         break;
239                                 return -EFAULT;
240                         }
241
242                         copied = copy_from_user(ptr, buf, sz);
243                         unxlate_dev_mem_ptr(p, ptr);
244                         if (copied) {
245                                 written += sz - copied;
246                                 if (written)
247                                         break;
248                                 return -EFAULT;
249                         }
250                 }
251
252                 buf += sz;
253                 p += sz;
254                 count -= sz;
255                 written += sz;
256                 if (should_stop_iteration())
257                         break;
258         }
259
260         *ppos += written;
261         return written;
262 }
263
264 int __weak phys_mem_access_prot_allowed(struct file *file,
265         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
266 {
267         return 1;
268 }
269
270 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
271
272 /*
273  * Architectures vary in how they handle caching for addresses
274  * outside of main memory.
275  *
276  */
277 #ifdef pgprot_noncached
278 static int uncached_access(struct file *file, phys_addr_t addr)
279 {
280 #if defined(CONFIG_IA64)
281         /*
282          * On ia64, we ignore O_DSYNC because we cannot tolerate memory
283          * attribute aliases.
284          */
285         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
286 #else
287         /*
288          * Accessing memory above the top the kernel knows about or through a
289          * file pointer
290          * that was marked O_DSYNC will be done non-cached.
291          */
292         if (file->f_flags & O_DSYNC)
293                 return 1;
294         return addr >= __pa(high_memory);
295 #endif
296 }
297 #endif
298
299 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
300                                      unsigned long size, pgprot_t vma_prot)
301 {
302 #ifdef pgprot_noncached
303         phys_addr_t offset = pfn << PAGE_SHIFT;
304
305         if (uncached_access(file, offset))
306                 return pgprot_noncached(vma_prot);
307 #endif
308         return vma_prot;
309 }
310 #endif
311
312 #ifndef CONFIG_MMU
313 static unsigned long get_unmapped_area_mem(struct file *file,
314                                            unsigned long addr,
315                                            unsigned long len,
316                                            unsigned long pgoff,
317                                            unsigned long flags)
318 {
319         if (!valid_mmap_phys_addr_range(pgoff, len))
320                 return (unsigned long) -EINVAL;
321         return pgoff << PAGE_SHIFT;
322 }
323
324 /* permit direct mmap, for read, write or exec */
325 static unsigned memory_mmap_capabilities(struct file *file)
326 {
327         return NOMMU_MAP_DIRECT |
328                 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
329 }
330
331 static unsigned zero_mmap_capabilities(struct file *file)
332 {
333         return NOMMU_MAP_COPY;
334 }
335
336 /* can't do an in-place private mapping if there's no MMU */
337 static inline int private_mapping_ok(struct vm_area_struct *vma)
338 {
339         return is_nommu_shared_mapping(vma->vm_flags);
340 }
341 #else
342
343 static inline int private_mapping_ok(struct vm_area_struct *vma)
344 {
345         return 1;
346 }
347 #endif
348
349 static const struct vm_operations_struct mmap_mem_ops = {
350 #ifdef CONFIG_HAVE_IOREMAP_PROT
351         .access = generic_access_phys
352 #endif
353 };
354
355 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
356 {
357         size_t size = vma->vm_end - vma->vm_start;
358         phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
359
360         /* Does it even fit in phys_addr_t? */
361         if (offset >> PAGE_SHIFT != vma->vm_pgoff)
362                 return -EINVAL;
363
364         /* It's illegal to wrap around the end of the physical address space. */
365         if (offset + (phys_addr_t)size - 1 < offset)
366                 return -EINVAL;
367
368         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
369                 return -EINVAL;
370
371         if (!private_mapping_ok(vma))
372                 return -ENOSYS;
373
374         if (!range_is_allowed(vma->vm_pgoff, size))
375                 return -EPERM;
376
377         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
378                                                 &vma->vm_page_prot))
379                 return -EINVAL;
380
381         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
382                                                  size,
383                                                  vma->vm_page_prot);
384
385         vma->vm_ops = &mmap_mem_ops;
386
387         /* Remap-pfn-range will mark the range VM_IO */
388         if (remap_pfn_range(vma,
389                             vma->vm_start,
390                             vma->vm_pgoff,
391                             size,
392                             vma->vm_page_prot)) {
393                 return -EAGAIN;
394         }
395         return 0;
396 }
397
398 static ssize_t read_port(struct file *file, char __user *buf,
399                          size_t count, loff_t *ppos)
400 {
401         unsigned long i = *ppos;
402         char __user *tmp = buf;
403
404         if (!access_ok(buf, count))
405                 return -EFAULT;
406         while (count-- > 0 && i < 65536) {
407                 if (__put_user(inb(i), tmp) < 0)
408                         return -EFAULT;
409                 i++;
410                 tmp++;
411         }
412         *ppos = i;
413         return tmp-buf;
414 }
415
416 static ssize_t write_port(struct file *file, const char __user *buf,
417                           size_t count, loff_t *ppos)
418 {
419         unsigned long i = *ppos;
420         const char __user *tmp = buf;
421
422         if (!access_ok(buf, count))
423                 return -EFAULT;
424         while (count-- > 0 && i < 65536) {
425                 char c;
426
427                 if (__get_user(c, tmp)) {
428                         if (tmp > buf)
429                                 break;
430                         return -EFAULT;
431                 }
432                 outb(c, i);
433                 i++;
434                 tmp++;
435         }
436         *ppos = i;
437         return tmp-buf;
438 }
439
440 static ssize_t read_null(struct file *file, char __user *buf,
441                          size_t count, loff_t *ppos)
442 {
443         return 0;
444 }
445
446 static ssize_t write_null(struct file *file, const char __user *buf,
447                           size_t count, loff_t *ppos)
448 {
449         return count;
450 }
451
452 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
453 {
454         return 0;
455 }
456
457 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
458 {
459         size_t count = iov_iter_count(from);
460         iov_iter_advance(from, count);
461         return count;
462 }
463
464 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
465                         struct splice_desc *sd)
466 {
467         return sd->len;
468 }
469
470 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
471                                  loff_t *ppos, size_t len, unsigned int flags)
472 {
473         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
474 }
475
476 static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
477 {
478         return 0;
479 }
480
481 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
482 {
483         size_t written = 0;
484
485         while (iov_iter_count(iter)) {
486                 size_t chunk = iov_iter_count(iter), n;
487
488                 if (chunk > PAGE_SIZE)
489                         chunk = PAGE_SIZE;      /* Just for latency reasons */
490                 n = iov_iter_zero(chunk, iter);
491                 if (!n && iov_iter_count(iter))
492                         return written ? written : -EFAULT;
493                 written += n;
494                 if (signal_pending(current))
495                         return written ? written : -ERESTARTSYS;
496                 if (!need_resched())
497                         continue;
498                 if (iocb->ki_flags & IOCB_NOWAIT)
499                         return written ? written : -EAGAIN;
500                 cond_resched();
501         }
502         return written;
503 }
504
505 static ssize_t read_zero(struct file *file, char __user *buf,
506                          size_t count, loff_t *ppos)
507 {
508         size_t cleared = 0;
509
510         while (count) {
511                 size_t chunk = min_t(size_t, count, PAGE_SIZE);
512                 size_t left;
513
514                 left = clear_user(buf + cleared, chunk);
515                 if (unlikely(left)) {
516                         cleared += (chunk - left);
517                         if (!cleared)
518                                 return -EFAULT;
519                         break;
520                 }
521                 cleared += chunk;
522                 count -= chunk;
523
524                 if (signal_pending(current))
525                         break;
526                 cond_resched();
527         }
528
529         return cleared;
530 }
531
532 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
533 {
534 #ifndef CONFIG_MMU
535         return -ENOSYS;
536 #endif
537         if (vma->vm_flags & VM_SHARED)
538                 return shmem_zero_setup(vma);
539         vma_set_anonymous(vma);
540         return 0;
541 }
542
543 static unsigned long get_unmapped_area_zero(struct file *file,
544                                 unsigned long addr, unsigned long len,
545                                 unsigned long pgoff, unsigned long flags)
546 {
547 #ifdef CONFIG_MMU
548         if (flags & MAP_SHARED) {
549                 /*
550                  * mmap_zero() will call shmem_zero_setup() to create a file,
551                  * so use shmem's get_unmapped_area in case it can be huge;
552                  * and pass NULL for file as in mmap.c's get_unmapped_area(),
553                  * so as not to confuse shmem with our handle on "/dev/zero".
554                  */
555                 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
556         }
557
558         /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
559         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
560 #else
561         return -ENOSYS;
562 #endif
563 }
564
565 static ssize_t write_full(struct file *file, const char __user *buf,
566                           size_t count, loff_t *ppos)
567 {
568         return -ENOSPC;
569 }
570
571 /*
572  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
573  * can fopen() both devices with "a" now.  This was previously impossible.
574  * -- SRB.
575  */
576 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
577 {
578         return file->f_pos = 0;
579 }
580
581 /*
582  * The memory devices use the full 32/64 bits of the offset, and so we cannot
583  * check against negative addresses: they are ok. The return value is weird,
584  * though, in that case (0).
585  *
586  * also note that seeking relative to the "end of file" isn't supported:
587  * it has no meaning, so it returns -EINVAL.
588  */
589 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
590 {
591         loff_t ret;
592
593         inode_lock(file_inode(file));
594         switch (orig) {
595         case SEEK_CUR:
596                 offset += file->f_pos;
597                 fallthrough;
598         case SEEK_SET:
599                 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
600                 if ((unsigned long long)offset >= -MAX_ERRNO) {
601                         ret = -EOVERFLOW;
602                         break;
603                 }
604                 file->f_pos = offset;
605                 ret = file->f_pos;
606                 force_successful_syscall_return();
607                 break;
608         default:
609                 ret = -EINVAL;
610         }
611         inode_unlock(file_inode(file));
612         return ret;
613 }
614
615 static int open_port(struct inode *inode, struct file *filp)
616 {
617         int rc;
618
619         if (!capable(CAP_SYS_RAWIO))
620                 return -EPERM;
621
622         rc = security_locked_down(LOCKDOWN_DEV_MEM);
623         if (rc)
624                 return rc;
625
626         if (iminor(inode) != DEVMEM_MINOR)
627                 return 0;
628
629         /*
630          * Use a unified address space to have a single point to manage
631          * revocations when drivers want to take over a /dev/mem mapped
632          * range.
633          */
634         filp->f_mapping = iomem_get_mapping();
635
636         return 0;
637 }
638
639 #define zero_lseek      null_lseek
640 #define full_lseek      null_lseek
641 #define write_zero      write_null
642 #define write_iter_zero write_iter_null
643 #define open_mem        open_port
644
645 static const struct file_operations __maybe_unused mem_fops = {
646         .llseek         = memory_lseek,
647         .read           = read_mem,
648         .write          = write_mem,
649         .mmap           = mmap_mem,
650         .open           = open_mem,
651 #ifndef CONFIG_MMU
652         .get_unmapped_area = get_unmapped_area_mem,
653         .mmap_capabilities = memory_mmap_capabilities,
654 #endif
655 };
656
657 static const struct file_operations null_fops = {
658         .llseek         = null_lseek,
659         .read           = read_null,
660         .write          = write_null,
661         .read_iter      = read_iter_null,
662         .write_iter     = write_iter_null,
663         .splice_write   = splice_write_null,
664         .uring_cmd      = uring_cmd_null,
665 };
666
667 static const struct file_operations __maybe_unused port_fops = {
668         .llseek         = memory_lseek,
669         .read           = read_port,
670         .write          = write_port,
671         .open           = open_port,
672 };
673
674 static const struct file_operations zero_fops = {
675         .llseek         = zero_lseek,
676         .write          = write_zero,
677         .read_iter      = read_iter_zero,
678         .read           = read_zero,
679         .write_iter     = write_iter_zero,
680         .mmap           = mmap_zero,
681         .get_unmapped_area = get_unmapped_area_zero,
682 #ifndef CONFIG_MMU
683         .mmap_capabilities = zero_mmap_capabilities,
684 #endif
685 };
686
687 static const struct file_operations full_fops = {
688         .llseek         = full_lseek,
689         .read_iter      = read_iter_zero,
690         .write          = write_full,
691 };
692
693 static const struct memdev {
694         const char *name;
695         const struct file_operations *fops;
696         fmode_t fmode;
697         umode_t mode;
698 } devlist[] = {
699 #ifdef CONFIG_DEVMEM
700         [DEVMEM_MINOR] = { "mem", &mem_fops, FMODE_UNSIGNED_OFFSET, 0 },
701 #endif
702         [3] = { "null", &null_fops, FMODE_NOWAIT, 0666 },
703 #ifdef CONFIG_DEVPORT
704         [4] = { "port", &port_fops, 0, 0 },
705 #endif
706         [5] = { "zero", &zero_fops, FMODE_NOWAIT, 0666 },
707         [7] = { "full", &full_fops, 0, 0666 },
708         [8] = { "random", &random_fops, FMODE_NOWAIT, 0666 },
709         [9] = { "urandom", &urandom_fops, FMODE_NOWAIT, 0666 },
710 #ifdef CONFIG_PRINTK
711         [11] = { "kmsg", &kmsg_fops, 0, 0644 },
712 #endif
713 };
714
715 static int memory_open(struct inode *inode, struct file *filp)
716 {
717         int minor;
718         const struct memdev *dev;
719
720         minor = iminor(inode);
721         if (minor >= ARRAY_SIZE(devlist))
722                 return -ENXIO;
723
724         dev = &devlist[minor];
725         if (!dev->fops)
726                 return -ENXIO;
727
728         filp->f_op = dev->fops;
729         filp->f_mode |= dev->fmode;
730
731         if (dev->fops->open)
732                 return dev->fops->open(inode, filp);
733
734         return 0;
735 }
736
737 static const struct file_operations memory_fops = {
738         .open = memory_open,
739         .llseek = noop_llseek,
740 };
741
742 static char *mem_devnode(const struct device *dev, umode_t *mode)
743 {
744         if (mode && devlist[MINOR(dev->devt)].mode)
745                 *mode = devlist[MINOR(dev->devt)].mode;
746         return NULL;
747 }
748
749 static const struct class mem_class = {
750         .name           = "mem",
751         .devnode        = mem_devnode,
752 };
753
754 static int __init chr_dev_init(void)
755 {
756         int retval;
757         int minor;
758
759         if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
760                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
761
762         retval = class_register(&mem_class);
763         if (retval)
764                 return retval;
765
766         for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
767                 if (!devlist[minor].name)
768                         continue;
769
770                 /*
771                  * Create /dev/port?
772                  */
773                 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
774                         continue;
775
776                 device_create(&mem_class, NULL, MKDEV(MEM_MAJOR, minor),
777                               NULL, devlist[minor].name);
778         }
779
780         return tty_init();
781 }
782
783 fs_initcall(chr_dev_init);