1 // SPDX-License-Identifier: GPL-2.0
3 * fs/proc/kcore.c kernel ELF core dumper
5 * Modelled on fs/exec.c:aout_core_dump()
6 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
7 * ELF version written by David Howells <David.Howells@nexor.co.uk>
8 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
9 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
10 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
13 #include <linux/crash_core.h>
15 #include <linux/proc_fs.h>
16 #include <linux/kcore.h>
17 #include <linux/user.h>
18 #include <linux/capability.h>
19 #include <linux/elf.h>
20 #include <linux/elfcore.h>
21 #include <linux/notifier.h>
22 #include <linux/vmalloc.h>
23 #include <linux/highmem.h>
24 #include <linux/printk.h>
25 #include <linux/memblock.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
30 #include <linux/list.h>
31 #include <linux/ioport.h>
32 #include <linux/memory.h>
33 #include <linux/sched/task.h>
34 #include <linux/security.h>
35 #include <asm/sections.h>
38 #define CORE_STR "CORE"
40 #ifndef ELF_CORE_EFLAGS
41 #define ELF_CORE_EFLAGS 0
44 static struct proc_dir_entry *proc_root_kcore;
47 #ifndef kc_vaddr_to_offset
48 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
50 #ifndef kc_offset_to_vaddr
51 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
54 static LIST_HEAD(kclist_head);
55 static DECLARE_RWSEM(kclist_lock);
56 static int kcore_need_update = 1;
59 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
60 * Same as oldmem_pfn_is_ram in vmcore
62 static int (*mem_pfn_is_ram)(unsigned long pfn);
64 int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
72 static int pfn_is_ram(unsigned long pfn)
75 return mem_pfn_is_ram(pfn);
80 /* This doesn't grab kclist_lock, so it should only be used at init time. */
81 void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
84 new->addr = (unsigned long)addr;
88 list_add_tail(&new->list, &kclist_head);
91 static size_t get_kcore_size(int *nphdr, size_t *phdrs_len, size_t *notes_len,
97 *nphdr = 1; /* PT_NOTE */
100 list_for_each_entry(m, &kclist_head, list) {
101 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
107 *phdrs_len = *nphdr * sizeof(struct elf_phdr);
108 *notes_len = (4 * sizeof(struct elf_note) +
109 3 * ALIGN(sizeof(CORE_STR), 4) +
110 VMCOREINFO_NOTE_NAME_BYTES +
111 ALIGN(sizeof(struct elf_prstatus), 4) +
112 ALIGN(sizeof(struct elf_prpsinfo), 4) +
113 ALIGN(arch_task_struct_size, 4) +
114 ALIGN(vmcoreinfo_size, 4));
115 *data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + *phdrs_len +
117 return *data_offset + size;
120 #ifdef CONFIG_HIGHMEM
122 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
123 * because memory hole is not as big as !HIGHMEM case.
124 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
126 static int kcore_ram_list(struct list_head *head)
128 struct kcore_list *ent;
130 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
133 ent->addr = (unsigned long)__va(0);
134 ent->size = max_low_pfn << PAGE_SHIFT;
135 ent->type = KCORE_RAM;
136 list_add(&ent->list, head);
140 #else /* !CONFIG_HIGHMEM */
142 #ifdef CONFIG_SPARSEMEM_VMEMMAP
143 /* calculate vmemmap's address from given system ram pfn and register it */
145 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
147 unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
148 unsigned long nr_pages = ent->size >> PAGE_SHIFT;
149 unsigned long start, end;
150 struct kcore_list *vmm, *tmp;
153 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
154 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
155 end = PAGE_ALIGN(end);
156 /* overlap check (because we have to align page */
157 list_for_each_entry(tmp, head, list) {
158 if (tmp->type != KCORE_VMEMMAP)
160 if (start < tmp->addr + tmp->size)
165 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
169 vmm->size = end - start;
170 vmm->type = KCORE_VMEMMAP;
171 list_add_tail(&vmm->list, head);
178 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
186 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
188 struct list_head *head = (struct list_head *)arg;
189 struct kcore_list *ent;
195 p = pfn_to_page(pfn);
197 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
200 ent->addr = (unsigned long)page_to_virt(p);
201 ent->size = nr_pages << PAGE_SHIFT;
203 if (!virt_addr_valid(ent->addr))
206 /* cut not-mapped area. ....from ppc-32 code. */
207 if (ULONG_MAX - ent->addr < ent->size)
208 ent->size = ULONG_MAX - ent->addr;
211 * We've already checked virt_addr_valid so we know this address
212 * is a valid pointer, therefore we can check against it to determine
215 if (VMALLOC_START > ent->addr) {
216 if (VMALLOC_START - ent->addr < ent->size)
217 ent->size = VMALLOC_START - ent->addr;
220 ent->type = KCORE_RAM;
221 list_add_tail(&ent->list, head);
223 if (!get_sparsemem_vmemmap_info(ent, head)) {
224 list_del(&ent->list);
234 static int kcore_ram_list(struct list_head *list)
237 unsigned long end_pfn;
239 /* Not inialized....update now */
240 /* find out "max pfn" */
242 for_each_node_state(nid, N_MEMORY) {
243 unsigned long node_end;
244 node_end = node_end_pfn(nid);
245 if (end_pfn < node_end)
248 /* scan 0 to max_pfn */
249 ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
254 #endif /* CONFIG_HIGHMEM */
256 static int kcore_update_ram(void)
261 size_t phdrs_len, notes_len, data_offset;
262 struct kcore_list *tmp, *pos;
265 down_write(&kclist_lock);
266 if (!xchg(&kcore_need_update, 0))
269 ret = kcore_ram_list(&list);
271 /* Couldn't get the RAM list, try again next time. */
272 WRITE_ONCE(kcore_need_update, 1);
273 list_splice_tail(&list, &garbage);
277 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
278 if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
279 list_move(&pos->list, &garbage);
281 list_splice_tail(&list, &kclist_head);
283 proc_root_kcore->size = get_kcore_size(&nphdr, &phdrs_len, ¬es_len,
287 up_write(&kclist_lock);
288 list_for_each_entry_safe(pos, tmp, &garbage, list) {
289 list_del(&pos->list);
295 static void append_kcore_note(char *notes, size_t *i, const char *name,
296 unsigned int type, const void *desc,
299 struct elf_note *note = (struct elf_note *)¬es[*i];
301 note->n_namesz = strlen(name) + 1;
302 note->n_descsz = descsz;
305 memcpy(¬es[*i], name, note->n_namesz);
306 *i = ALIGN(*i + note->n_namesz, 4);
307 memcpy(¬es[*i], desc, descsz);
308 *i = ALIGN(*i + descsz, 4);
312 read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
314 char *buf = file->private_data;
315 size_t phdrs_offset, notes_offset, data_offset;
316 size_t page_offline_frozen = 1;
317 size_t phdrs_len, notes_len;
318 struct kcore_list *m;
322 size_t orig_buflen = buflen;
325 down_read(&kclist_lock);
327 * Don't race against drivers that set PageOffline() and expect no
328 * further page access.
330 page_offline_freeze();
332 get_kcore_size(&nphdr, &phdrs_len, ¬es_len, &data_offset);
333 phdrs_offset = sizeof(struct elfhdr);
334 notes_offset = phdrs_offset + phdrs_len;
336 /* ELF file header. */
337 if (buflen && *fpos < sizeof(struct elfhdr)) {
338 struct elfhdr ehdr = {
344 [EI_CLASS] = ELF_CLASS,
345 [EI_DATA] = ELF_DATA,
346 [EI_VERSION] = EV_CURRENT,
347 [EI_OSABI] = ELF_OSABI,
350 .e_machine = ELF_ARCH,
351 .e_version = EV_CURRENT,
352 .e_phoff = sizeof(struct elfhdr),
353 .e_flags = ELF_CORE_EFLAGS,
354 .e_ehsize = sizeof(struct elfhdr),
355 .e_phentsize = sizeof(struct elf_phdr),
359 tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
360 if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) {
370 /* ELF program headers. */
371 if (buflen && *fpos < phdrs_offset + phdrs_len) {
372 struct elf_phdr *phdrs, *phdr;
374 phdrs = kzalloc(phdrs_len, GFP_KERNEL);
380 phdrs[0].p_type = PT_NOTE;
381 phdrs[0].p_offset = notes_offset;
382 phdrs[0].p_filesz = notes_len;
385 list_for_each_entry(m, &kclist_head, list) {
386 phdr->p_type = PT_LOAD;
387 phdr->p_flags = PF_R | PF_W | PF_X;
388 phdr->p_offset = kc_vaddr_to_offset(m->addr) + data_offset;
389 phdr->p_vaddr = (size_t)m->addr;
390 if (m->type == KCORE_RAM)
391 phdr->p_paddr = __pa(m->addr);
392 else if (m->type == KCORE_TEXT)
393 phdr->p_paddr = __pa_symbol(m->addr);
395 phdr->p_paddr = (elf_addr_t)-1;
396 phdr->p_filesz = phdr->p_memsz = m->size;
397 phdr->p_align = PAGE_SIZE;
401 tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos);
402 if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset,
415 /* ELF note segment. */
416 if (buflen && *fpos < notes_offset + notes_len) {
417 struct elf_prstatus prstatus = {};
418 struct elf_prpsinfo prpsinfo = {
420 .pr_fname = "vmlinux",
425 strlcpy(prpsinfo.pr_psargs, saved_command_line,
426 sizeof(prpsinfo.pr_psargs));
428 notes = kzalloc(notes_len, GFP_KERNEL);
434 append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
436 append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
438 append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
439 arch_task_struct_size);
441 * vmcoreinfo_size is mostly constant after init time, but it
442 * can be changed by crash_save_vmcoreinfo(). Racing here with a
443 * panic on another CPU before the machine goes down is insanely
444 * unlikely, but it's better to not leave potential buffer
445 * overflows lying around, regardless.
447 append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
449 min(vmcoreinfo_size, notes_len - i));
451 tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos);
452 if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) {
465 * Check to see if our file offset matches with any of
466 * the addresses in the elf_phdr on our list.
468 start = kc_offset_to_vaddr(*fpos - data_offset);
469 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
478 * If this is the first iteration or the address is not within
479 * the previous entry, search for a matching entry.
481 if (!m || start < m->addr || start >= m->addr + m->size) {
482 struct kcore_list *iter;
485 list_for_each_entry(iter, &kclist_head, list) {
486 if (start >= iter->addr &&
487 start < iter->addr + iter->size) {
494 if (page_offline_frozen++ % MAX_ORDER_NR_PAGES == 0) {
497 page_offline_freeze();
501 if (clear_user(buffer, tsz)) {
510 vread(buf, (char *)start, tsz);
511 /* we have to zero-fill user buffer even if no read */
512 if (copy_to_user(buffer, buf, tsz)) {
518 /* User page is handled prior to normal kernel page: */
519 if (copy_to_user(buffer, (char *)start, tsz)) {
525 pfn = __pa(start) >> PAGE_SHIFT;
526 page = pfn_to_online_page(pfn);
529 * Don't read offline sections, logically offline pages
530 * (e.g., inflated in a balloon), hwpoisoned pages,
531 * and explicitly excluded physical ranges.
533 if (!page || PageOffline(page) ||
534 is_page_hwpoison(page) || !pfn_is_ram(pfn)) {
535 if (clear_user(buffer, tsz)) {
544 if (kern_addr_valid(start)) {
546 * Using bounce buffer to bypass the
547 * hardened user copy kernel text checks.
549 if (copy_from_kernel_nofault(buf, (void *)start,
551 if (clear_user(buffer, tsz)) {
556 if (copy_to_user(buffer, buf, tsz)) {
562 if (clear_user(buffer, tsz)) {
569 pr_warn_once("Unhandled KCORE type: %d\n", m->type);
570 if (clear_user(buffer, tsz)) {
580 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
585 up_read(&kclist_lock);
588 return orig_buflen - buflen;
591 static int open_kcore(struct inode *inode, struct file *filp)
593 int ret = security_locked_down(LOCKDOWN_KCORE);
595 if (!capable(CAP_SYS_RAWIO))
601 filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
602 if (!filp->private_data)
605 if (kcore_need_update)
607 if (i_size_read(inode) != proc_root_kcore->size) {
609 i_size_write(inode, proc_root_kcore->size);
615 static int release_kcore(struct inode *inode, struct file *file)
617 kfree(file->private_data);
621 static const struct proc_ops kcore_proc_ops = {
622 .proc_read = read_kcore,
623 .proc_open = open_kcore,
624 .proc_release = release_kcore,
625 .proc_lseek = default_llseek,
628 /* just remember that we have to update kcore */
629 static int __meminit kcore_callback(struct notifier_block *self,
630 unsigned long action, void *arg)
635 kcore_need_update = 1;
641 static struct notifier_block kcore_callback_nb __meminitdata = {
642 .notifier_call = kcore_callback,
646 static struct kcore_list kcore_vmalloc;
648 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
649 static struct kcore_list kcore_text;
651 * If defined, special segment is used for mapping kernel text instead of
652 * direct-map area. We need to create special TEXT section.
654 static void __init proc_kcore_text_init(void)
656 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
659 static void __init proc_kcore_text_init(void)
664 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
666 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
668 static struct kcore_list kcore_modules;
669 static void __init add_modules_range(void)
671 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
672 kclist_add(&kcore_modules, (void *)MODULES_VADDR,
673 MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
677 static void __init add_modules_range(void)
682 static int __init proc_kcore_init(void)
684 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
685 if (!proc_root_kcore) {
686 pr_err("couldn't create /proc/kcore\n");
687 return 0; /* Always returns 0. */
689 /* Store text area if it's special */
690 proc_kcore_text_init();
691 /* Store vmalloc area */
692 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
693 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
695 /* Store direct-map area from physical memory map */
697 register_hotmemory_notifier(&kcore_callback_nb);
701 fs_initcall(proc_kcore_init);