1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/coredump.h>
4 #include <linux/elfcore.h>
5 #include <linux/kernel.h>
8 #include <asm/cpufeature.h>
11 #define for_each_mte_vma(vmi, vma) \
12 if (system_supports_mte()) \
13 for_each_vma(vmi, vma) \
14 if (vma->vm_flags & VM_MTE)
16 static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
18 if (vma->vm_flags & VM_DONTDUMP)
21 return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
24 /* Derived from dump_user_range(); start/end must be page-aligned */
25 static int mte_dump_tag_range(struct coredump_params *cprm,
26 unsigned long start, unsigned long end)
32 for (addr = start; addr < end; addr += PAGE_SIZE) {
33 struct page *page = get_dump_page(addr);
36 * get_dump_page() returns NULL when encountering an empty
37 * page table entry that would otherwise have been filled with
38 * the zero page. Skip the equivalent tag dump which would
39 * have been all zeros.
42 dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
47 * Pages mapped in user space as !pte_access_permitted() (e.g.
48 * PROT_EXEC only) may not have the PG_mte_tagged flag set.
50 if (!test_bit(PG_mte_tagged, &page->flags)) {
52 dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
57 tags = mte_allocate_tag_storage();
65 mte_save_page_tags(page_address(page), tags);
67 if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
68 mte_free_tag_storage(tags);
75 mte_free_tag_storage(tags);
80 Elf_Half elf_core_extra_phdrs(void)
82 struct vm_area_struct *vma;
84 VMA_ITERATOR(vmi, current->mm, 0);
86 for_each_mte_vma(vmi, vma)
92 int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
94 struct vm_area_struct *vma;
95 VMA_ITERATOR(vmi, current->mm, 0);
97 for_each_mte_vma(vmi, vma) {
100 phdr.p_type = PT_AARCH64_MEMTAG_MTE;
101 phdr.p_offset = offset;
102 phdr.p_vaddr = vma->vm_start;
104 phdr.p_filesz = mte_vma_tag_dump_size(vma);
105 phdr.p_memsz = vma->vm_end - vma->vm_start;
106 offset += phdr.p_filesz;
110 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
117 size_t elf_core_extra_data_size(void)
119 struct vm_area_struct *vma;
120 size_t data_size = 0;
121 VMA_ITERATOR(vmi, current->mm, 0);
123 for_each_mte_vma(vmi, vma)
124 data_size += mte_vma_tag_dump_size(vma);
129 int elf_core_write_extra_data(struct coredump_params *cprm)
131 struct vm_area_struct *vma;
132 VMA_ITERATOR(vmi, current->mm, 0);
134 for_each_mte_vma(vmi, vma) {
135 if (vma->vm_flags & VM_DONTDUMP)
138 if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))