PCI: exynos: Remove deprecated PHY initialization code
[platform/kernel/linux-starfive.git] / drivers / char / mem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/drivers/char/mem.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  Added devfs support.
8  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
9  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
10  */
11
12 #include <linux/mm.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/ptrace.h>
23 #include <linux/device.h>
24 #include <linux/highmem.h>
25 #include <linux/backing-dev.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
30 #include <linux/io.h>
31 #include <linux/uio.h>
32
33 #include <linux/uaccess.h>
34
35 #ifdef CONFIG_IA64
36 # include <linux/efi.h>
37 #endif
38
39 #define DEVPORT_MINOR   4
40
41 static inline unsigned long size_inside_page(unsigned long start,
42                                              unsigned long size)
43 {
44         unsigned long sz;
45
46         sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
47
48         return min(sz, size);
49 }
50
51 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
52 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
53 {
54         return addr + count <= __pa(high_memory);
55 }
56
57 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
58 {
59         return 1;
60 }
61 #endif
62
63 #ifdef CONFIG_STRICT_DEVMEM
64 static inline int page_is_allowed(unsigned long pfn)
65 {
66         return devmem_is_allowed(pfn);
67 }
68 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
69 {
70         u64 from = ((u64)pfn) << PAGE_SHIFT;
71         u64 to = from + size;
72         u64 cursor = from;
73
74         while (cursor < to) {
75                 if (!devmem_is_allowed(pfn))
76                         return 0;
77                 cursor += PAGE_SIZE;
78                 pfn++;
79         }
80         return 1;
81 }
82 #else
83 static inline int page_is_allowed(unsigned long pfn)
84 {
85         return 1;
86 }
87 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
88 {
89         return 1;
90 }
91 #endif
92
93 #ifndef unxlate_dev_mem_ptr
94 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
95 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
96 {
97 }
98 #endif
99
100 /*
101  * This funcion reads the *physical* memory. The f_pos points directly to the
102  * memory location.
103  */
104 static ssize_t read_mem(struct file *file, char __user *buf,
105                         size_t count, loff_t *ppos)
106 {
107         phys_addr_t p = *ppos;
108         ssize_t read, sz;
109         void *ptr;
110
111         if (p != *ppos)
112                 return 0;
113
114         if (!valid_phys_addr_range(p, count))
115                 return -EFAULT;
116         read = 0;
117 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
118         /* we don't have page 0 mapped on sparc and m68k.. */
119         if (p < PAGE_SIZE) {
120                 sz = size_inside_page(p, count);
121                 if (sz > 0) {
122                         if (clear_user(buf, sz))
123                                 return -EFAULT;
124                         buf += sz;
125                         p += sz;
126                         count -= sz;
127                         read += sz;
128                 }
129         }
130 #endif
131
132         while (count > 0) {
133                 unsigned long remaining;
134                 int allowed;
135
136                 sz = size_inside_page(p, count);
137
138                 allowed = page_is_allowed(p >> PAGE_SHIFT);
139                 if (!allowed)
140                         return -EPERM;
141                 if (allowed == 2) {
142                         /* Show zeros for restricted memory. */
143                         remaining = clear_user(buf, sz);
144                 } else {
145                         /*
146                          * On ia64 if a page has been mapped somewhere as
147                          * uncached, then it must also be accessed uncached
148                          * by the kernel or data corruption may occur.
149                          */
150                         ptr = xlate_dev_mem_ptr(p);
151                         if (!ptr)
152                                 return -EFAULT;
153
154                         remaining = copy_to_user(buf, ptr, sz);
155
156                         unxlate_dev_mem_ptr(p, ptr);
157                 }
158
159                 if (remaining)
160                         return -EFAULT;
161
162                 buf += sz;
163                 p += sz;
164                 count -= sz;
165                 read += sz;
166         }
167
168         *ppos += read;
169         return read;
170 }
171
172 static ssize_t write_mem(struct file *file, const char __user *buf,
173                          size_t count, loff_t *ppos)
174 {
175         phys_addr_t p = *ppos;
176         ssize_t written, sz;
177         unsigned long copied;
178         void *ptr;
179
180         if (p != *ppos)
181                 return -EFBIG;
182
183         if (!valid_phys_addr_range(p, count))
184                 return -EFAULT;
185
186         written = 0;
187
188 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
189         /* we don't have page 0 mapped on sparc and m68k.. */
190         if (p < PAGE_SIZE) {
191                 sz = size_inside_page(p, count);
192                 /* Hmm. Do something? */
193                 buf += sz;
194                 p += sz;
195                 count -= sz;
196                 written += sz;
197         }
198 #endif
199
200         while (count > 0) {
201                 int allowed;
202
203                 sz = size_inside_page(p, count);
204
205                 allowed = page_is_allowed(p >> PAGE_SHIFT);
206                 if (!allowed)
207                         return -EPERM;
208
209                 /* Skip actual writing when a page is marked as restricted. */
210                 if (allowed == 1) {
211                         /*
212                          * On ia64 if a page has been mapped somewhere as
213                          * uncached, then it must also be accessed uncached
214                          * by the kernel or data corruption may occur.
215                          */
216                         ptr = xlate_dev_mem_ptr(p);
217                         if (!ptr) {
218                                 if (written)
219                                         break;
220                                 return -EFAULT;
221                         }
222
223                         copied = copy_from_user(ptr, buf, sz);
224                         unxlate_dev_mem_ptr(p, ptr);
225                         if (copied) {
226                                 written += sz - copied;
227                                 if (written)
228                                         break;
229                                 return -EFAULT;
230                         }
231                 }
232
233                 buf += sz;
234                 p += sz;
235                 count -= sz;
236                 written += sz;
237         }
238
239         *ppos += written;
240         return written;
241 }
242
243 int __weak phys_mem_access_prot_allowed(struct file *file,
244         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
245 {
246         return 1;
247 }
248
249 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
250
251 /*
252  * Architectures vary in how they handle caching for addresses
253  * outside of main memory.
254  *
255  */
256 #ifdef pgprot_noncached
257 static int uncached_access(struct file *file, phys_addr_t addr)
258 {
259 #if defined(CONFIG_IA64)
260         /*
261          * On ia64, we ignore O_DSYNC because we cannot tolerate memory
262          * attribute aliases.
263          */
264         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
265 #elif defined(CONFIG_MIPS)
266         {
267                 extern int __uncached_access(struct file *file,
268                                              unsigned long addr);
269
270                 return __uncached_access(file, addr);
271         }
272 #else
273         /*
274          * Accessing memory above the top the kernel knows about or through a
275          * file pointer
276          * that was marked O_DSYNC will be done non-cached.
277          */
278         if (file->f_flags & O_DSYNC)
279                 return 1;
280         return addr >= __pa(high_memory);
281 #endif
282 }
283 #endif
284
285 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
286                                      unsigned long size, pgprot_t vma_prot)
287 {
288 #ifdef pgprot_noncached
289         phys_addr_t offset = pfn << PAGE_SHIFT;
290
291         if (uncached_access(file, offset))
292                 return pgprot_noncached(vma_prot);
293 #endif
294         return vma_prot;
295 }
296 #endif
297
298 #ifndef CONFIG_MMU
299 static unsigned long get_unmapped_area_mem(struct file *file,
300                                            unsigned long addr,
301                                            unsigned long len,
302                                            unsigned long pgoff,
303                                            unsigned long flags)
304 {
305         if (!valid_mmap_phys_addr_range(pgoff, len))
306                 return (unsigned long) -EINVAL;
307         return pgoff << PAGE_SHIFT;
308 }
309
310 /* permit direct mmap, for read, write or exec */
311 static unsigned memory_mmap_capabilities(struct file *file)
312 {
313         return NOMMU_MAP_DIRECT |
314                 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
315 }
316
317 static unsigned zero_mmap_capabilities(struct file *file)
318 {
319         return NOMMU_MAP_COPY;
320 }
321
322 /* can't do an in-place private mapping if there's no MMU */
323 static inline int private_mapping_ok(struct vm_area_struct *vma)
324 {
325         return vma->vm_flags & VM_MAYSHARE;
326 }
327 #else
328
329 static inline int private_mapping_ok(struct vm_area_struct *vma)
330 {
331         return 1;
332 }
333 #endif
334
335 static const struct vm_operations_struct mmap_mem_ops = {
336 #ifdef CONFIG_HAVE_IOREMAP_PROT
337         .access = generic_access_phys
338 #endif
339 };
340
341 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
342 {
343         size_t size = vma->vm_end - vma->vm_start;
344         phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
345
346         /* Does it even fit in phys_addr_t? */
347         if (offset >> PAGE_SHIFT != vma->vm_pgoff)
348                 return -EINVAL;
349
350         /* It's illegal to wrap around the end of the physical address space. */
351         if (offset + (phys_addr_t)size - 1 < offset)
352                 return -EINVAL;
353
354         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
355                 return -EINVAL;
356
357         if (!private_mapping_ok(vma))
358                 return -ENOSYS;
359
360         if (!range_is_allowed(vma->vm_pgoff, size))
361                 return -EPERM;
362
363         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
364                                                 &vma->vm_page_prot))
365                 return -EINVAL;
366
367         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
368                                                  size,
369                                                  vma->vm_page_prot);
370
371         vma->vm_ops = &mmap_mem_ops;
372
373         /* Remap-pfn-range will mark the range VM_IO */
374         if (remap_pfn_range(vma,
375                             vma->vm_start,
376                             vma->vm_pgoff,
377                             size,
378                             vma->vm_page_prot)) {
379                 return -EAGAIN;
380         }
381         return 0;
382 }
383
384 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
385 {
386         unsigned long pfn;
387
388         /* Turn a kernel-virtual address into a physical page frame */
389         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
390
391         /*
392          * RED-PEN: on some architectures there is more mapped memory than
393          * available in mem_map which pfn_valid checks for. Perhaps should add a
394          * new macro here.
395          *
396          * RED-PEN: vmalloc is not supported right now.
397          */
398         if (!pfn_valid(pfn))
399                 return -EIO;
400
401         vma->vm_pgoff = pfn;
402         return mmap_mem(file, vma);
403 }
404
405 /*
406  * This function reads the *virtual* memory as seen by the kernel.
407  */
408 static ssize_t read_kmem(struct file *file, char __user *buf,
409                          size_t count, loff_t *ppos)
410 {
411         unsigned long p = *ppos;
412         ssize_t low_count, read, sz;
413         char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
414         int err = 0;
415
416         read = 0;
417         if (p < (unsigned long) high_memory) {
418                 low_count = count;
419                 if (count > (unsigned long)high_memory - p)
420                         low_count = (unsigned long)high_memory - p;
421
422 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
423                 /* we don't have page 0 mapped on sparc and m68k.. */
424                 if (p < PAGE_SIZE && low_count > 0) {
425                         sz = size_inside_page(p, low_count);
426                         if (clear_user(buf, sz))
427                                 return -EFAULT;
428                         buf += sz;
429                         p += sz;
430                         read += sz;
431                         low_count -= sz;
432                         count -= sz;
433                 }
434 #endif
435                 while (low_count > 0) {
436                         sz = size_inside_page(p, low_count);
437
438                         /*
439                          * On ia64 if a page has been mapped somewhere as
440                          * uncached, then it must also be accessed uncached
441                          * by the kernel or data corruption may occur
442                          */
443                         kbuf = xlate_dev_kmem_ptr((void *)p);
444                         if (!virt_addr_valid(kbuf))
445                                 return -ENXIO;
446
447                         if (copy_to_user(buf, kbuf, sz))
448                                 return -EFAULT;
449                         buf += sz;
450                         p += sz;
451                         read += sz;
452                         low_count -= sz;
453                         count -= sz;
454                 }
455         }
456
457         if (count > 0) {
458                 kbuf = (char *)__get_free_page(GFP_KERNEL);
459                 if (!kbuf)
460                         return -ENOMEM;
461                 while (count > 0) {
462                         sz = size_inside_page(p, count);
463                         if (!is_vmalloc_or_module_addr((void *)p)) {
464                                 err = -ENXIO;
465                                 break;
466                         }
467                         sz = vread(kbuf, (char *)p, sz);
468                         if (!sz)
469                                 break;
470                         if (copy_to_user(buf, kbuf, sz)) {
471                                 err = -EFAULT;
472                                 break;
473                         }
474                         count -= sz;
475                         buf += sz;
476                         read += sz;
477                         p += sz;
478                 }
479                 free_page((unsigned long)kbuf);
480         }
481         *ppos = p;
482         return read ? read : err;
483 }
484
485
486 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
487                                 size_t count, loff_t *ppos)
488 {
489         ssize_t written, sz;
490         unsigned long copied;
491
492         written = 0;
493 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
494         /* we don't have page 0 mapped on sparc and m68k.. */
495         if (p < PAGE_SIZE) {
496                 sz = size_inside_page(p, count);
497                 /* Hmm. Do something? */
498                 buf += sz;
499                 p += sz;
500                 count -= sz;
501                 written += sz;
502         }
503 #endif
504
505         while (count > 0) {
506                 void *ptr;
507
508                 sz = size_inside_page(p, count);
509
510                 /*
511                  * On ia64 if a page has been mapped somewhere as uncached, then
512                  * it must also be accessed uncached by the kernel or data
513                  * corruption may occur.
514                  */
515                 ptr = xlate_dev_kmem_ptr((void *)p);
516                 if (!virt_addr_valid(ptr))
517                         return -ENXIO;
518
519                 copied = copy_from_user(ptr, buf, sz);
520                 if (copied) {
521                         written += sz - copied;
522                         if (written)
523                                 break;
524                         return -EFAULT;
525                 }
526                 buf += sz;
527                 p += sz;
528                 count -= sz;
529                 written += sz;
530         }
531
532         *ppos += written;
533         return written;
534 }
535
536 /*
537  * This function writes to the *virtual* memory as seen by the kernel.
538  */
539 static ssize_t write_kmem(struct file *file, const char __user *buf,
540                           size_t count, loff_t *ppos)
541 {
542         unsigned long p = *ppos;
543         ssize_t wrote = 0;
544         ssize_t virtr = 0;
545         char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
546         int err = 0;
547
548         if (p < (unsigned long) high_memory) {
549                 unsigned long to_write = min_t(unsigned long, count,
550                                                (unsigned long)high_memory - p);
551                 wrote = do_write_kmem(p, buf, to_write, ppos);
552                 if (wrote != to_write)
553                         return wrote;
554                 p += wrote;
555                 buf += wrote;
556                 count -= wrote;
557         }
558
559         if (count > 0) {
560                 kbuf = (char *)__get_free_page(GFP_KERNEL);
561                 if (!kbuf)
562                         return wrote ? wrote : -ENOMEM;
563                 while (count > 0) {
564                         unsigned long sz = size_inside_page(p, count);
565                         unsigned long n;
566
567                         if (!is_vmalloc_or_module_addr((void *)p)) {
568                                 err = -ENXIO;
569                                 break;
570                         }
571                         n = copy_from_user(kbuf, buf, sz);
572                         if (n) {
573                                 err = -EFAULT;
574                                 break;
575                         }
576                         vwrite(kbuf, (char *)p, sz);
577                         count -= sz;
578                         buf += sz;
579                         virtr += sz;
580                         p += sz;
581                 }
582                 free_page((unsigned long)kbuf);
583         }
584
585         *ppos = p;
586         return virtr + wrote ? : err;
587 }
588
589 static ssize_t read_port(struct file *file, char __user *buf,
590                          size_t count, loff_t *ppos)
591 {
592         unsigned long i = *ppos;
593         char __user *tmp = buf;
594
595         if (!access_ok(VERIFY_WRITE, buf, count))
596                 return -EFAULT;
597         while (count-- > 0 && i < 65536) {
598                 if (__put_user(inb(i), tmp) < 0)
599                         return -EFAULT;
600                 i++;
601                 tmp++;
602         }
603         *ppos = i;
604         return tmp-buf;
605 }
606
607 static ssize_t write_port(struct file *file, const char __user *buf,
608                           size_t count, loff_t *ppos)
609 {
610         unsigned long i = *ppos;
611         const char __user *tmp = buf;
612
613         if (!access_ok(VERIFY_READ, buf, count))
614                 return -EFAULT;
615         while (count-- > 0 && i < 65536) {
616                 char c;
617
618                 if (__get_user(c, tmp)) {
619                         if (tmp > buf)
620                                 break;
621                         return -EFAULT;
622                 }
623                 outb(c, i);
624                 i++;
625                 tmp++;
626         }
627         *ppos = i;
628         return tmp-buf;
629 }
630
631 static ssize_t read_null(struct file *file, char __user *buf,
632                          size_t count, loff_t *ppos)
633 {
634         return 0;
635 }
636
637 static ssize_t write_null(struct file *file, const char __user *buf,
638                           size_t count, loff_t *ppos)
639 {
640         return count;
641 }
642
643 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
644 {
645         return 0;
646 }
647
648 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
649 {
650         size_t count = iov_iter_count(from);
651         iov_iter_advance(from, count);
652         return count;
653 }
654
655 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
656                         struct splice_desc *sd)
657 {
658         return sd->len;
659 }
660
661 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
662                                  loff_t *ppos, size_t len, unsigned int flags)
663 {
664         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
665 }
666
667 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
668 {
669         size_t written = 0;
670
671         while (iov_iter_count(iter)) {
672                 size_t chunk = iov_iter_count(iter), n;
673
674                 if (chunk > PAGE_SIZE)
675                         chunk = PAGE_SIZE;      /* Just for latency reasons */
676                 n = iov_iter_zero(chunk, iter);
677                 if (!n && iov_iter_count(iter))
678                         return written ? written : -EFAULT;
679                 written += n;
680                 if (signal_pending(current))
681                         return written ? written : -ERESTARTSYS;
682                 cond_resched();
683         }
684         return written;
685 }
686
687 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
688 {
689 #ifndef CONFIG_MMU
690         return -ENOSYS;
691 #endif
692         if (vma->vm_flags & VM_SHARED)
693                 return shmem_zero_setup(vma);
694         return 0;
695 }
696
697 static unsigned long get_unmapped_area_zero(struct file *file,
698                                 unsigned long addr, unsigned long len,
699                                 unsigned long pgoff, unsigned long flags)
700 {
701 #ifdef CONFIG_MMU
702         if (flags & MAP_SHARED) {
703                 /*
704                  * mmap_zero() will call shmem_zero_setup() to create a file,
705                  * so use shmem's get_unmapped_area in case it can be huge;
706                  * and pass NULL for file as in mmap.c's get_unmapped_area(),
707                  * so as not to confuse shmem with our handle on "/dev/zero".
708                  */
709                 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
710         }
711
712         /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
713         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
714 #else
715         return -ENOSYS;
716 #endif
717 }
718
719 static ssize_t write_full(struct file *file, const char __user *buf,
720                           size_t count, loff_t *ppos)
721 {
722         return -ENOSPC;
723 }
724
725 /*
726  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
727  * can fopen() both devices with "a" now.  This was previously impossible.
728  * -- SRB.
729  */
730 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
731 {
732         return file->f_pos = 0;
733 }
734
735 /*
736  * The memory devices use the full 32/64 bits of the offset, and so we cannot
737  * check against negative addresses: they are ok. The return value is weird,
738  * though, in that case (0).
739  *
740  * also note that seeking relative to the "end of file" isn't supported:
741  * it has no meaning, so it returns -EINVAL.
742  */
743 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
744 {
745         loff_t ret;
746
747         inode_lock(file_inode(file));
748         switch (orig) {
749         case SEEK_CUR:
750                 offset += file->f_pos;
751         case SEEK_SET:
752                 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
753                 if ((unsigned long long)offset >= -MAX_ERRNO) {
754                         ret = -EOVERFLOW;
755                         break;
756                 }
757                 file->f_pos = offset;
758                 ret = file->f_pos;
759                 force_successful_syscall_return();
760                 break;
761         default:
762                 ret = -EINVAL;
763         }
764         inode_unlock(file_inode(file));
765         return ret;
766 }
767
768 static int open_port(struct inode *inode, struct file *filp)
769 {
770         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
771 }
772
773 #define zero_lseek      null_lseek
774 #define full_lseek      null_lseek
775 #define write_zero      write_null
776 #define write_iter_zero write_iter_null
777 #define open_mem        open_port
778 #define open_kmem       open_mem
779
780 static const struct file_operations __maybe_unused mem_fops = {
781         .llseek         = memory_lseek,
782         .read           = read_mem,
783         .write          = write_mem,
784         .mmap           = mmap_mem,
785         .open           = open_mem,
786 #ifndef CONFIG_MMU
787         .get_unmapped_area = get_unmapped_area_mem,
788         .mmap_capabilities = memory_mmap_capabilities,
789 #endif
790 };
791
792 static const struct file_operations __maybe_unused kmem_fops = {
793         .llseek         = memory_lseek,
794         .read           = read_kmem,
795         .write          = write_kmem,
796         .mmap           = mmap_kmem,
797         .open           = open_kmem,
798 #ifndef CONFIG_MMU
799         .get_unmapped_area = get_unmapped_area_mem,
800         .mmap_capabilities = memory_mmap_capabilities,
801 #endif
802 };
803
804 static const struct file_operations null_fops = {
805         .llseek         = null_lseek,
806         .read           = read_null,
807         .write          = write_null,
808         .read_iter      = read_iter_null,
809         .write_iter     = write_iter_null,
810         .splice_write   = splice_write_null,
811 };
812
813 static const struct file_operations __maybe_unused port_fops = {
814         .llseek         = memory_lseek,
815         .read           = read_port,
816         .write          = write_port,
817         .open           = open_port,
818 };
819
820 static const struct file_operations zero_fops = {
821         .llseek         = zero_lseek,
822         .write          = write_zero,
823         .read_iter      = read_iter_zero,
824         .write_iter     = write_iter_zero,
825         .mmap           = mmap_zero,
826         .get_unmapped_area = get_unmapped_area_zero,
827 #ifndef CONFIG_MMU
828         .mmap_capabilities = zero_mmap_capabilities,
829 #endif
830 };
831
832 static const struct file_operations full_fops = {
833         .llseek         = full_lseek,
834         .read_iter      = read_iter_zero,
835         .write          = write_full,
836 };
837
838 static const struct memdev {
839         const char *name;
840         umode_t mode;
841         const struct file_operations *fops;
842         fmode_t fmode;
843 } devlist[] = {
844 #ifdef CONFIG_DEVMEM
845          [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
846 #endif
847 #ifdef CONFIG_DEVKMEM
848          [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
849 #endif
850          [3] = { "null", 0666, &null_fops, 0 },
851 #ifdef CONFIG_DEVPORT
852          [4] = { "port", 0, &port_fops, 0 },
853 #endif
854          [5] = { "zero", 0666, &zero_fops, 0 },
855          [7] = { "full", 0666, &full_fops, 0 },
856          [8] = { "random", 0666, &random_fops, 0 },
857          [9] = { "urandom", 0666, &urandom_fops, 0 },
858 #ifdef CONFIG_PRINTK
859         [11] = { "kmsg", 0644, &kmsg_fops, 0 },
860 #endif
861 };
862
863 static int memory_open(struct inode *inode, struct file *filp)
864 {
865         int minor;
866         const struct memdev *dev;
867
868         minor = iminor(inode);
869         if (minor >= ARRAY_SIZE(devlist))
870                 return -ENXIO;
871
872         dev = &devlist[minor];
873         if (!dev->fops)
874                 return -ENXIO;
875
876         filp->f_op = dev->fops;
877         filp->f_mode |= dev->fmode;
878
879         if (dev->fops->open)
880                 return dev->fops->open(inode, filp);
881
882         return 0;
883 }
884
885 static const struct file_operations memory_fops = {
886         .open = memory_open,
887         .llseek = noop_llseek,
888 };
889
890 static char *mem_devnode(struct device *dev, umode_t *mode)
891 {
892         if (mode && devlist[MINOR(dev->devt)].mode)
893                 *mode = devlist[MINOR(dev->devt)].mode;
894         return NULL;
895 }
896
897 static struct class *mem_class;
898
899 static int __init chr_dev_init(void)
900 {
901         int minor;
902
903         if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
904                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
905
906         mem_class = class_create(THIS_MODULE, "mem");
907         if (IS_ERR(mem_class))
908                 return PTR_ERR(mem_class);
909
910         mem_class->devnode = mem_devnode;
911         for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
912                 if (!devlist[minor].name)
913                         continue;
914
915                 /*
916                  * Create /dev/port?
917                  */
918                 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
919                         continue;
920
921                 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
922                               NULL, devlist[minor].name);
923         }
924
925         return tty_init();
926 }
927
928 fs_initcall(chr_dev_init);