binfmt_elf: PIE: make PF_RANDOMIZE check comment more accurate
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / proc / kcore.c
1 /*
2  *      fs/proc/kcore.c kernel ELF core dumper
3  *
4  *      Modelled on fs/exec.c:aout_core_dump()
5  *      Jeremy Fitzhardinge <jeremy@sw.oz.au>
6  *      ELF version written by David Howells <David.Howells@nexor.co.uk>
7  *      Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
8  *      Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
9  *      Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
10  */
11
12 #include <linux/mm.h>
13 #include <linux/proc_fs.h>
14 #include <linux/user.h>
15 #include <linux/capability.h>
16 #include <linux/elf.h>
17 #include <linux/elfcore.h>
18 #include <linux/notifier.h>
19 #include <linux/vmalloc.h>
20 #include <linux/highmem.h>
21 #include <linux/printk.h>
22 #include <linux/bootmem.h>
23 #include <linux/init.h>
24 #include <linux/slab.h>
25 #include <asm/uaccess.h>
26 #include <asm/io.h>
27 #include <linux/list.h>
28 #include <linux/ioport.h>
29 #include <linux/memory.h>
30 #include <asm/sections.h>
31
32 #define CORE_STR "CORE"
33
34 #ifndef ELF_CORE_EFLAGS
35 #define ELF_CORE_EFLAGS 0
36 #endif
37
38 static struct proc_dir_entry *proc_root_kcore;
39
40
41 #ifndef kc_vaddr_to_offset
42 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
43 #endif
44 #ifndef kc_offset_to_vaddr
45 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
46 #endif
47
48 /* An ELF note in memory */
49 struct memelfnote
50 {
51         const char *name;
52         int type;
53         unsigned int datasz;
54         void *data;
55 };
56
57 static LIST_HEAD(kclist_head);
58 static DEFINE_RWLOCK(kclist_lock);
59 static int kcore_need_update = 1;
60
61 void
62 kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
63 {
64         new->addr = (unsigned long)addr;
65         new->size = size;
66         new->type = type;
67
68         write_lock(&kclist_lock);
69         list_add_tail(&new->list, &kclist_head);
70         write_unlock(&kclist_lock);
71 }
72
73 static size_t get_kcore_size(int *nphdr, size_t *elf_buflen)
74 {
75         size_t try, size;
76         struct kcore_list *m;
77
78         *nphdr = 1; /* PT_NOTE */
79         size = 0;
80
81         list_for_each_entry(m, &kclist_head, list) {
82                 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
83                 if (try > size)
84                         size = try;
85                 *nphdr = *nphdr + 1;
86         }
87         *elf_buflen =   sizeof(struct elfhdr) + 
88                         (*nphdr + 2)*sizeof(struct elf_phdr) + 
89                         3 * ((sizeof(struct elf_note)) +
90                              roundup(sizeof(CORE_STR), 4)) +
91                         roundup(sizeof(struct elf_prstatus), 4) +
92                         roundup(sizeof(struct elf_prpsinfo), 4) +
93                         roundup(sizeof(struct task_struct), 4);
94         *elf_buflen = PAGE_ALIGN(*elf_buflen);
95         return size + *elf_buflen;
96 }
97
98 static void free_kclist_ents(struct list_head *head)
99 {
100         struct kcore_list *tmp, *pos;
101
102         list_for_each_entry_safe(pos, tmp, head, list) {
103                 list_del(&pos->list);
104                 kfree(pos);
105         }
106 }
107 /*
108  * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list.
109  */
110 static void __kcore_update_ram(struct list_head *list)
111 {
112         int nphdr;
113         size_t size;
114         struct kcore_list *tmp, *pos;
115         LIST_HEAD(garbage);
116
117         write_lock(&kclist_lock);
118         if (kcore_need_update) {
119                 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
120                         if (pos->type == KCORE_RAM
121                                 || pos->type == KCORE_VMEMMAP)
122                                 list_move(&pos->list, &garbage);
123                 }
124                 list_splice_tail(list, &kclist_head);
125         } else
126                 list_splice(list, &garbage);
127         kcore_need_update = 0;
128         proc_root_kcore->size = get_kcore_size(&nphdr, &size);
129         write_unlock(&kclist_lock);
130
131         free_kclist_ents(&garbage);
132 }
133
134
135 #ifdef CONFIG_HIGHMEM
136 /*
137  * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
138  * because memory hole is not as big as !HIGHMEM case.
139  * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
140  */
141 static int kcore_update_ram(void)
142 {
143         LIST_HEAD(head);
144         struct kcore_list *ent;
145         int ret = 0;
146
147         ent = kmalloc(sizeof(*ent), GFP_KERNEL);
148         if (!ent)
149                 return -ENOMEM;
150         ent->addr = (unsigned long)__va(0);
151         ent->size = max_low_pfn << PAGE_SHIFT;
152         ent->type = KCORE_RAM;
153         list_add(&ent->list, &head);
154         __kcore_update_ram(&head);
155         return ret;
156 }
157
158 #else /* !CONFIG_HIGHMEM */
159
160 #ifdef CONFIG_SPARSEMEM_VMEMMAP
161 /* calculate vmemmap's address from given system ram pfn and register it */
162 static int
163 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
164 {
165         unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
166         unsigned long nr_pages = ent->size >> PAGE_SHIFT;
167         unsigned long start, end;
168         struct kcore_list *vmm, *tmp;
169
170
171         start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
172         end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
173         end = ALIGN(end, PAGE_SIZE);
174         /* overlap check (because we have to align page */
175         list_for_each_entry(tmp, head, list) {
176                 if (tmp->type != KCORE_VMEMMAP)
177                         continue;
178                 if (start < tmp->addr + tmp->size)
179                         if (end > tmp->addr)
180                                 end = tmp->addr;
181         }
182         if (start < end) {
183                 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
184                 if (!vmm)
185                         return 0;
186                 vmm->addr = start;
187                 vmm->size = end - start;
188                 vmm->type = KCORE_VMEMMAP;
189                 list_add_tail(&vmm->list, head);
190         }
191         return 1;
192
193 }
194 #else
195 static int
196 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
197 {
198         return 1;
199 }
200
201 #endif
202
203 static int
204 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
205 {
206         struct list_head *head = (struct list_head *)arg;
207         struct kcore_list *ent;
208
209         ent = kmalloc(sizeof(*ent), GFP_KERNEL);
210         if (!ent)
211                 return -ENOMEM;
212         ent->addr = (unsigned long)__va((pfn << PAGE_SHIFT));
213         ent->size = nr_pages << PAGE_SHIFT;
214
215         /* Sanity check: Can happen in 32bit arch...maybe */
216         if (ent->addr < (unsigned long) __va(0))
217                 goto free_out;
218
219         /* cut not-mapped area. ....from ppc-32 code. */
220         if (ULONG_MAX - ent->addr < ent->size)
221                 ent->size = ULONG_MAX - ent->addr;
222
223         /* cut when vmalloc() area is higher than direct-map area */
224         if (VMALLOC_START > (unsigned long)__va(0)) {
225                 if (ent->addr > VMALLOC_START)
226                         goto free_out;
227                 if (VMALLOC_START - ent->addr < ent->size)
228                         ent->size = VMALLOC_START - ent->addr;
229         }
230
231         ent->type = KCORE_RAM;
232         list_add_tail(&ent->list, head);
233
234         if (!get_sparsemem_vmemmap_info(ent, head)) {
235                 list_del(&ent->list);
236                 goto free_out;
237         }
238
239         return 0;
240 free_out:
241         kfree(ent);
242         return 1;
243 }
244
245 static int kcore_update_ram(void)
246 {
247         int nid, ret;
248         unsigned long end_pfn;
249         LIST_HEAD(head);
250
251         /* Not inialized....update now */
252         /* find out "max pfn" */
253         end_pfn = 0;
254         for_each_node_state(nid, N_MEMORY) {
255                 unsigned long node_end;
256                 node_end  = NODE_DATA(nid)->node_start_pfn +
257                         NODE_DATA(nid)->node_spanned_pages;
258                 if (end_pfn < node_end)
259                         end_pfn = node_end;
260         }
261         /* scan 0 to max_pfn */
262         ret = walk_system_ram_range(0, end_pfn, &head, kclist_add_private);
263         if (ret) {
264                 free_kclist_ents(&head);
265                 return -ENOMEM;
266         }
267         __kcore_update_ram(&head);
268         return ret;
269 }
270 #endif /* CONFIG_HIGHMEM */
271
272 /*****************************************************************************/
273 /*
274  * determine size of ELF note
275  */
276 static int notesize(struct memelfnote *en)
277 {
278         int sz;
279
280         sz = sizeof(struct elf_note);
281         sz += roundup((strlen(en->name) + 1), 4);
282         sz += roundup(en->datasz, 4);
283
284         return sz;
285 } /* end notesize() */
286
287 /*****************************************************************************/
288 /*
289  * store a note in the header buffer
290  */
291 static char *storenote(struct memelfnote *men, char *bufp)
292 {
293         struct elf_note en;
294
295 #define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
296
297         en.n_namesz = strlen(men->name) + 1;
298         en.n_descsz = men->datasz;
299         en.n_type = men->type;
300
301         DUMP_WRITE(&en, sizeof(en));
302         DUMP_WRITE(men->name, en.n_namesz);
303
304         /* XXX - cast from long long to long to avoid need for libgcc.a */
305         bufp = (char*) roundup((unsigned long)bufp,4);
306         DUMP_WRITE(men->data, men->datasz);
307         bufp = (char*) roundup((unsigned long)bufp,4);
308
309 #undef DUMP_WRITE
310
311         return bufp;
312 } /* end storenote() */
313
314 /*
315  * store an ELF coredump header in the supplied buffer
316  * nphdr is the number of elf_phdr to insert
317  */
318 static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
319 {
320         struct elf_prstatus prstatus;   /* NT_PRSTATUS */
321         struct elf_prpsinfo prpsinfo;   /* NT_PRPSINFO */
322         struct elf_phdr *nhdr, *phdr;
323         struct elfhdr *elf;
324         struct memelfnote notes[3];
325         off_t offset = 0;
326         struct kcore_list *m;
327
328         /* setup ELF header */
329         elf = (struct elfhdr *) bufp;
330         bufp += sizeof(struct elfhdr);
331         offset += sizeof(struct elfhdr);
332         memcpy(elf->e_ident, ELFMAG, SELFMAG);
333         elf->e_ident[EI_CLASS]  = ELF_CLASS;
334         elf->e_ident[EI_DATA]   = ELF_DATA;
335         elf->e_ident[EI_VERSION]= EV_CURRENT;
336         elf->e_ident[EI_OSABI] = ELF_OSABI;
337         memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
338         elf->e_type     = ET_CORE;
339         elf->e_machine  = ELF_ARCH;
340         elf->e_version  = EV_CURRENT;
341         elf->e_entry    = 0;
342         elf->e_phoff    = sizeof(struct elfhdr);
343         elf->e_shoff    = 0;
344         elf->e_flags    = ELF_CORE_EFLAGS;
345         elf->e_ehsize   = sizeof(struct elfhdr);
346         elf->e_phentsize= sizeof(struct elf_phdr);
347         elf->e_phnum    = nphdr;
348         elf->e_shentsize= 0;
349         elf->e_shnum    = 0;
350         elf->e_shstrndx = 0;
351
352         /* setup ELF PT_NOTE program header */
353         nhdr = (struct elf_phdr *) bufp;
354         bufp += sizeof(struct elf_phdr);
355         offset += sizeof(struct elf_phdr);
356         nhdr->p_type    = PT_NOTE;
357         nhdr->p_offset  = 0;
358         nhdr->p_vaddr   = 0;
359         nhdr->p_paddr   = 0;
360         nhdr->p_filesz  = 0;
361         nhdr->p_memsz   = 0;
362         nhdr->p_flags   = 0;
363         nhdr->p_align   = 0;
364
365         /* setup ELF PT_LOAD program header for every area */
366         list_for_each_entry(m, &kclist_head, list) {
367                 phdr = (struct elf_phdr *) bufp;
368                 bufp += sizeof(struct elf_phdr);
369                 offset += sizeof(struct elf_phdr);
370
371                 phdr->p_type    = PT_LOAD;
372                 phdr->p_flags   = PF_R|PF_W|PF_X;
373                 phdr->p_offset  = kc_vaddr_to_offset(m->addr) + dataoff;
374                 phdr->p_vaddr   = (size_t)m->addr;
375                 phdr->p_paddr   = 0;
376                 phdr->p_filesz  = phdr->p_memsz = m->size;
377                 phdr->p_align   = PAGE_SIZE;
378         }
379
380         /*
381          * Set up the notes in similar form to SVR4 core dumps made
382          * with info from their /proc.
383          */
384         nhdr->p_offset  = offset;
385
386         /* set up the process status */
387         notes[0].name = CORE_STR;
388         notes[0].type = NT_PRSTATUS;
389         notes[0].datasz = sizeof(struct elf_prstatus);
390         notes[0].data = &prstatus;
391
392         memset(&prstatus, 0, sizeof(struct elf_prstatus));
393
394         nhdr->p_filesz  = notesize(&notes[0]);
395         bufp = storenote(&notes[0], bufp);
396
397         /* set up the process info */
398         notes[1].name   = CORE_STR;
399         notes[1].type   = NT_PRPSINFO;
400         notes[1].datasz = sizeof(struct elf_prpsinfo);
401         notes[1].data   = &prpsinfo;
402
403         memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
404         prpsinfo.pr_state       = 0;
405         prpsinfo.pr_sname       = 'R';
406         prpsinfo.pr_zomb        = 0;
407
408         strcpy(prpsinfo.pr_fname, "vmlinux");
409         strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);
410
411         nhdr->p_filesz  += notesize(&notes[1]);
412         bufp = storenote(&notes[1], bufp);
413
414         /* set up the task structure */
415         notes[2].name   = CORE_STR;
416         notes[2].type   = NT_TASKSTRUCT;
417         notes[2].datasz = sizeof(struct task_struct);
418         notes[2].data   = current;
419
420         nhdr->p_filesz  += notesize(&notes[2]);
421         bufp = storenote(&notes[2], bufp);
422
423 } /* end elf_kcore_store_hdr() */
424
425 /*****************************************************************************/
426 /*
427  * read from the ELF header and then kernel memory
428  */
429 static ssize_t
430 read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
431 {
432         ssize_t acc = 0;
433         size_t size, tsz;
434         size_t elf_buflen;
435         int nphdr;
436         unsigned long start;
437
438         read_lock(&kclist_lock);
439         size = get_kcore_size(&nphdr, &elf_buflen);
440
441         if (buflen == 0 || *fpos >= size) {
442                 read_unlock(&kclist_lock);
443                 return 0;
444         }
445
446         /* trim buflen to not go beyond EOF */
447         if (buflen > size - *fpos)
448                 buflen = size - *fpos;
449
450         /* construct an ELF core header if we'll need some of it */
451         if (*fpos < elf_buflen) {
452                 char * elf_buf;
453
454                 tsz = elf_buflen - *fpos;
455                 if (buflen < tsz)
456                         tsz = buflen;
457                 elf_buf = kzalloc(elf_buflen, GFP_ATOMIC);
458                 if (!elf_buf) {
459                         read_unlock(&kclist_lock);
460                         return -ENOMEM;
461                 }
462                 elf_kcore_store_hdr(elf_buf, nphdr, elf_buflen);
463                 read_unlock(&kclist_lock);
464                 if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
465                         kfree(elf_buf);
466                         return -EFAULT;
467                 }
468                 kfree(elf_buf);
469                 buflen -= tsz;
470                 *fpos += tsz;
471                 buffer += tsz;
472                 acc += tsz;
473
474                 /* leave now if filled buffer already */
475                 if (buflen == 0)
476                         return acc;
477         } else
478                 read_unlock(&kclist_lock);
479
480         /*
481          * Check to see if our file offset matches with any of
482          * the addresses in the elf_phdr on our list.
483          */
484         start = kc_offset_to_vaddr(*fpos - elf_buflen);
485         if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
486                 tsz = buflen;
487                 
488         while (buflen) {
489                 struct kcore_list *m;
490
491                 read_lock(&kclist_lock);
492                 list_for_each_entry(m, &kclist_head, list) {
493                         if (start >= m->addr && start < (m->addr+m->size))
494                                 break;
495                 }
496                 read_unlock(&kclist_lock);
497
498                 if (&m->list == &kclist_head) {
499                         if (clear_user(buffer, tsz))
500                                 return -EFAULT;
501                 } else if (is_vmalloc_or_module_addr((void *)start)) {
502                         char * elf_buf;
503
504                         elf_buf = kzalloc(tsz, GFP_KERNEL);
505                         if (!elf_buf)
506                                 return -ENOMEM;
507                         vread(elf_buf, (char *)start, tsz);
508                         /* we have to zero-fill user buffer even if no read */
509                         if (copy_to_user(buffer, elf_buf, tsz)) {
510                                 kfree(elf_buf);
511                                 return -EFAULT;
512                         }
513                         kfree(elf_buf);
514                 } else {
515                         if (kern_addr_valid(start)) {
516                                 unsigned long n;
517
518                                 n = copy_to_user(buffer, (char *)start, tsz);
519                                 /*
520                                  * We cannot distinguish between fault on source
521                                  * and fault on destination. When this happens
522                                  * we clear too and hope it will trigger the
523                                  * EFAULT again.
524                                  */
525                                 if (n) { 
526                                         if (clear_user(buffer + tsz - n,
527                                                                 n))
528                                                 return -EFAULT;
529                                 }
530                         } else {
531                                 if (clear_user(buffer, tsz))
532                                         return -EFAULT;
533                         }
534                 }
535                 buflen -= tsz;
536                 *fpos += tsz;
537                 buffer += tsz;
538                 acc += tsz;
539                 start += tsz;
540                 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
541         }
542
543         return acc;
544 }
545
546
547 static int open_kcore(struct inode *inode, struct file *filp)
548 {
549         if (!capable(CAP_SYS_RAWIO))
550                 return -EPERM;
551         if (kcore_need_update)
552                 kcore_update_ram();
553         if (i_size_read(inode) != proc_root_kcore->size) {
554                 mutex_lock(&inode->i_mutex);
555                 i_size_write(inode, proc_root_kcore->size);
556                 mutex_unlock(&inode->i_mutex);
557         }
558         return 0;
559 }
560
561
562 static const struct file_operations proc_kcore_operations = {
563         .read           = read_kcore,
564         .open           = open_kcore,
565         .llseek         = default_llseek,
566 };
567
568 /* just remember that we have to update kcore */
569 static int __meminit kcore_callback(struct notifier_block *self,
570                                     unsigned long action, void *arg)
571 {
572         switch (action) {
573         case MEM_ONLINE:
574         case MEM_OFFLINE:
575                 write_lock(&kclist_lock);
576                 kcore_need_update = 1;
577                 write_unlock(&kclist_lock);
578         }
579         return NOTIFY_OK;
580 }
581
582 static struct notifier_block kcore_callback_nb __meminitdata = {
583         .notifier_call = kcore_callback,
584         .priority = 0,
585 };
586
587 static struct kcore_list kcore_vmalloc;
588
589 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
590 static struct kcore_list kcore_text;
591 /*
592  * If defined, special segment is used for mapping kernel text instead of
593  * direct-map area. We need to create special TEXT section.
594  */
595 static void __init proc_kcore_text_init(void)
596 {
597         kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
598 }
599 #else
600 static void __init proc_kcore_text_init(void)
601 {
602 }
603 #endif
604
605 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
606 /*
607  * MODULES_VADDR has no intersection with VMALLOC_ADDR.
608  */
609 struct kcore_list kcore_modules;
610 static void __init add_modules_range(void)
611 {
612         kclist_add(&kcore_modules, (void *)MODULES_VADDR,
613                         MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
614 }
615 #else
616 static void __init add_modules_range(void)
617 {
618 }
619 #endif
620
621 static int __init proc_kcore_init(void)
622 {
623         proc_root_kcore = proc_create("kcore", S_IRUSR, NULL,
624                                       &proc_kcore_operations);
625         if (!proc_root_kcore) {
626                 pr_err("couldn't create /proc/kcore\n");
627                 return 0; /* Always returns 0. */
628         }
629         /* Store text area if it's special */
630         proc_kcore_text_init();
631         /* Store vmalloc area */
632         kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
633                 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
634         add_modules_range();
635         /* Store direct-map area from physical memory map */
636         kcore_update_ram();
637         register_hotmemory_notifier(&kcore_callback_nb);
638
639         return 0;
640 }
641 module_init(proc_kcore_init);