Merge branch 'for-next/mte' into for-next/core
authorWill Deacon <will@kernel.org>
Fri, 2 Oct 2020 11:16:11 +0000 (12:16 +0100)
committerWill Deacon <will@kernel.org>
Fri, 2 Oct 2020 11:16:11 +0000 (12:16 +0100)
Add userspace support for the Memory Tagging Extension introduced by
Armv8.5.

(Catalin Marinas and others)
* for-next/mte: (30 commits)
  arm64: mte: Fix typo in memory tagging ABI documentation
  arm64: mte: Add Memory Tagging Extension documentation
  arm64: mte: Kconfig entry
  arm64: mte: Save tags when hibernating
  arm64: mte: Enable swap of tagged pages
  mm: Add arch hooks for saving/restoring tags
  fs: Handle intra-page faults in copy_mount_options()
  arm64: mte: ptrace: Add NT_ARM_TAGGED_ADDR_CTRL regset
  arm64: mte: ptrace: Add PTRACE_{PEEK,POKE}MTETAGS support
  arm64: mte: Allow {set,get}_tagged_addr_ctrl() on non-current tasks
  arm64: mte: Restore the GCR_EL1 register after a suspend
  arm64: mte: Allow user control of the generated random tags via prctl()
  arm64: mte: Allow user control of the tag check mode via prctl()
  mm: Allow arm64 mmap(PROT_MTE) on RAM-based files
  arm64: mte: Validate the PROT_MTE request via arch_validate_flags()
  mm: Introduce arch_validate_flags()
  arm64: mte: Add PROT_MTE support to mmap() and mprotect()
  mm: Introduce arch_calc_vm_flag_bits()
  arm64: mte: Tags-aware aware memcmp_pages() implementation
  arm64: Avoid unnecessary clear_user_page() indirection
  ...

22 files changed:
1  2 
arch/arm64/Kconfig
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/hwcap.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/process.c
arch/arm64/kernel/signal.c
arch/arm64/kernel/suspend.c
arch/arm64/kvm/sys_regs.c
arch/arm64/mm/Makefile
arch/arm64/mm/fault.c
arch/arm64/mm/ptdump.c

Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 88f160afc468b39b410a4cd33b8eda4dc2c020bb,5fb9b728459b884f2bb24378c403dda765e74f17..bbaf0bc4ad60966ea35a730aa5a746355fdcf424
@@@ -57,8 -59,10 +57,9 @@@ arm64-reloc-test-y := reloc_test_core.
  obj-$(CONFIG_CRASH_DUMP)              += crash_dump.o
  obj-$(CONFIG_CRASH_CORE)              += crash_core.o
  obj-$(CONFIG_ARM_SDE_INTERFACE)               += sdei.o
 -obj-$(CONFIG_ARM64_SSBD)              += ssbd.o
  obj-$(CONFIG_ARM64_PTR_AUTH)          += pointer_auth.o
  obj-$(CONFIG_SHADOW_CALL_STACK)               += scs.o
+ obj-$(CONFIG_ARM64_MTE)                       += mte.o
  
  obj-y                                 += vdso/ probes/
  obj-$(CONFIG_COMPAT_VDSO)             += vdso32/
index 79207c3235530de83dc58dbb4a574973c698c836,add9da5d8ea34075b8023ac40ff508c9f6b73dd4..dcc165b3fc046b8573a579f04dd3e71474c7c471
@@@ -227,7 -228,9 +228,9 @@@ static const struct arm64_ftr_bits ftr_
  static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MPAMFRAC_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_RASFRAC_SHIFT, 4, 0),
 -      ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_MTE_SHIFT, 4, ID_AA64PFR1_MTE_NI),
 +      ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
                                    FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_BT_SHIFT, 4, 0),
        ARM64_FTR_END,
index 25113245825c0b16c1181c6e01cbbe05b52a63d5,6104b87f021dca3a46603af1b1fc37812cd333a8..6a7bb3729d605dd50281794b9563010f71e50385
@@@ -43,56 -43,58 +43,57 @@@ static const char *icache_policy_str[] 
  unsigned long __icache_flags;
  
  static const char *const hwcap_str[] = {
 -      "fp",
 -      "asimd",
 -      "evtstrm",
 -      "aes",
 -      "pmull",
 -      "sha1",
 -      "sha2",
 -      "crc32",
 -      "atomics",
 -      "fphp",
 -      "asimdhp",
 -      "cpuid",
 -      "asimdrdm",
 -      "jscvt",
 -      "fcma",
 -      "lrcpc",
 -      "dcpop",
 -      "sha3",
 -      "sm3",
 -      "sm4",
 -      "asimddp",
 -      "sha512",
 -      "sve",
 -      "asimdfhm",
 -      "dit",
 -      "uscat",
 -      "ilrcpc",
 -      "flagm",
 -      "ssbs",
 -      "sb",
 -      "paca",
 -      "pacg",
 -      "dcpodp",
 -      "sve2",
 -      "sveaes",
 -      "svepmull",
 -      "svebitperm",
 -      "svesha3",
 -      "svesm4",
 -      "flagm2",
 -      "frint",
 -      "svei8mm",
 -      "svef32mm",
 -      "svef64mm",
 -      "svebf16",
 -      "i8mm",
 -      "bf16",
 -      "dgh",
 -      "rng",
 -      "bti",
 -      "mte",
 -      NULL
 +      [KERNEL_HWCAP_FP]               = "fp",
 +      [KERNEL_HWCAP_ASIMD]            = "asimd",
 +      [KERNEL_HWCAP_EVTSTRM]          = "evtstrm",
 +      [KERNEL_HWCAP_AES]              = "aes",
 +      [KERNEL_HWCAP_PMULL]            = "pmull",
 +      [KERNEL_HWCAP_SHA1]             = "sha1",
 +      [KERNEL_HWCAP_SHA2]             = "sha2",
 +      [KERNEL_HWCAP_CRC32]            = "crc32",
 +      [KERNEL_HWCAP_ATOMICS]          = "atomics",
 +      [KERNEL_HWCAP_FPHP]             = "fphp",
 +      [KERNEL_HWCAP_ASIMDHP]          = "asimdhp",
 +      [KERNEL_HWCAP_CPUID]            = "cpuid",
 +      [KERNEL_HWCAP_ASIMDRDM]         = "asimdrdm",
 +      [KERNEL_HWCAP_JSCVT]            = "jscvt",
 +      [KERNEL_HWCAP_FCMA]             = "fcma",
 +      [KERNEL_HWCAP_LRCPC]            = "lrcpc",
 +      [KERNEL_HWCAP_DCPOP]            = "dcpop",
 +      [KERNEL_HWCAP_SHA3]             = "sha3",
 +      [KERNEL_HWCAP_SM3]              = "sm3",
 +      [KERNEL_HWCAP_SM4]              = "sm4",
 +      [KERNEL_HWCAP_ASIMDDP]          = "asimddp",
 +      [KERNEL_HWCAP_SHA512]           = "sha512",
 +      [KERNEL_HWCAP_SVE]              = "sve",
 +      [KERNEL_HWCAP_ASIMDFHM]         = "asimdfhm",
 +      [KERNEL_HWCAP_DIT]              = "dit",
 +      [KERNEL_HWCAP_USCAT]            = "uscat",
 +      [KERNEL_HWCAP_ILRCPC]           = "ilrcpc",
 +      [KERNEL_HWCAP_FLAGM]            = "flagm",
 +      [KERNEL_HWCAP_SSBS]             = "ssbs",
 +      [KERNEL_HWCAP_SB]               = "sb",
 +      [KERNEL_HWCAP_PACA]             = "paca",
 +      [KERNEL_HWCAP_PACG]             = "pacg",
 +      [KERNEL_HWCAP_DCPODP]           = "dcpodp",
 +      [KERNEL_HWCAP_SVE2]             = "sve2",
 +      [KERNEL_HWCAP_SVEAES]           = "sveaes",
 +      [KERNEL_HWCAP_SVEPMULL]         = "svepmull",
 +      [KERNEL_HWCAP_SVEBITPERM]       = "svebitperm",
 +      [KERNEL_HWCAP_SVESHA3]          = "svesha3",
 +      [KERNEL_HWCAP_SVESM4]           = "svesm4",
 +      [KERNEL_HWCAP_FLAGM2]           = "flagm2",
 +      [KERNEL_HWCAP_FRINT]            = "frint",
 +      [KERNEL_HWCAP_SVEI8MM]          = "svei8mm",
 +      [KERNEL_HWCAP_SVEF32MM]         = "svef32mm",
 +      [KERNEL_HWCAP_SVEF64MM]         = "svef64mm",
 +      [KERNEL_HWCAP_SVEBF16]          = "svebf16",
 +      [KERNEL_HWCAP_I8MM]             = "i8mm",
 +      [KERNEL_HWCAP_BF16]             = "bf16",
 +      [KERNEL_HWCAP_DGH]              = "dgh",
 +      [KERNEL_HWCAP_RNG]              = "rng",
 +      [KERNEL_HWCAP_BTI]              = "bti",
++      [KERNEL_HWCAP_MTE]              = "mte",
  };
  
  #ifdef CONFIG_COMPAT
index aeb337029d567410419f718658f116ec0c7cd33c,ff34461524d4cd70270442a61aee5679cb218af3..f30007dff35f7eb89eb8aa5e7e8eb34fa75f009e
@@@ -145,8 -146,35 +145,34 @@@ alternative_cb   spectre_v4_patch_fw_miti
        nop                                     // Patched to SMC/HVC #0
  alternative_cb_end
  .L__asm_ssbd_skip\@:
 -#endif
        .endm
  
+       /* Check for MTE asynchronous tag check faults */
+       .macro check_mte_async_tcf, flgs, tmp
+ #ifdef CONFIG_ARM64_MTE
+ alternative_if_not ARM64_MTE
+       b       1f
+ alternative_else_nop_endif
+       mrs_s   \tmp, SYS_TFSRE0_EL1
+       tbz     \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
+       /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
+       orr     \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
+       str     \flgs, [tsk, #TSK_TI_FLAGS]
+       msr_s   SYS_TFSRE0_EL1, xzr
+ 1:
+ #endif
+       .endm
+       /* Clear the MTE asynchronous tag check faults */
+       .macro clear_mte_async_tcf
+ #ifdef CONFIG_ARM64_MTE
+ alternative_if ARM64_MTE
+       dsb     ish
+       msr_s   SYS_TFSRE0_EL1, xzr
+ alternative_else_nop_endif
+ #endif
+       .endm
        .macro  kernel_entry, el, regsize = 64
        .if     \regsize == 32
        mov     w0, w0                          // zero upper 32 bits of x0
Simple merge
Simple merge
Simple merge
index 584c14ce3c860d814cbf60658e2dec9aba0f701a,62c239cd60c27c4cd3d22fb7d1943576842740eb..96cd347c7a4651597fa2459d4256188134792c1c
@@@ -72,7 -73,11 +73,10 @@@ void notrace __cpu_suspend_exit(void
         * have turned the mitigation on. If the user has forcefully
         * disabled it, make sure their wishes are obeyed.
         */
 -      if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
 -              arm64_set_ssbd_mitigation(false);
 +      spectre_v4_enable_mitigation(NULL);
+       /* Restore additional MTE-specific configuration */
+       mte_suspend_exit();
  }
  
  /*
index 7b8a8f6169d06f0d7a28ed7e58264af23a19b5e1,379f4969d0bd7e8338cb2c9792f485f9026ec94d..9ca270603980869b10ec09c112a8ee55b881844f
@@@ -1131,9 -1131,8 +1131,11 @@@ static u64 read_id_reg(const struct kvm
                if (!vcpu_has_sve(vcpu))
                        val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
                val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
 +              if (!(val & (0xfUL << ID_AA64PFR0_CSV2_SHIFT)) &&
 +                  arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
 +                      val |= (1UL << ID_AA64PFR0_CSV2_SHIFT);
+       } else if (id == SYS_ID_AA64PFR1_EL1) {
+               val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);
        } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
                val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
                         (0xfUL << ID_AA64ISAR1_API_SHIFT) |
Simple merge
Simple merge
index 265284dc942df34fe75caa469dfc8c6c0b936c22,0000000000000000000000000000000000000000..807dc634bbd2464a993f33524cb495f9df2ba9f2
mode 100644,000000..100644
--- /dev/null
@@@ -1,389 -1,0 +1,393 @@@
 +// SPDX-License-Identifier: GPL-2.0-only
 +/*
 + * Copyright (c) 2014, The Linux Foundation. All rights reserved.
 + * Debug helper to dump the current kernel pagetables of the system
 + * so that we can see what the various memory ranges are set to.
 + *
 + * Derived from x86 and arm implementation:
 + * (C) Copyright 2008 Intel Corporation
 + *
 + * Author: Arjan van de Ven <arjan@linux.intel.com>
 + */
 +#include <linux/debugfs.h>
 +#include <linux/errno.h>
 +#include <linux/fs.h>
 +#include <linux/io.h>
 +#include <linux/init.h>
 +#include <linux/mm.h>
 +#include <linux/ptdump.h>
 +#include <linux/sched.h>
 +#include <linux/seq_file.h>
 +
 +#include <asm/fixmap.h>
 +#include <asm/kasan.h>
 +#include <asm/memory.h>
 +#include <asm/pgtable-hwdef.h>
 +#include <asm/ptdump.h>
 +
 +
 +enum address_markers_idx {
 +      PAGE_OFFSET_NR = 0,
 +      PAGE_END_NR,
 +#ifdef CONFIG_KASAN
 +      KASAN_START_NR,
 +#endif
 +};
 +
 +static struct addr_marker address_markers[] = {
 +      { PAGE_OFFSET,                  "Linear Mapping start" },
 +      { 0 /* PAGE_END */,             "Linear Mapping end" },
 +#ifdef CONFIG_KASAN
 +      { 0 /* KASAN_SHADOW_START */,   "Kasan shadow start" },
 +      { KASAN_SHADOW_END,             "Kasan shadow end" },
 +#endif
 +      { BPF_JIT_REGION_START,         "BPF start" },
 +      { BPF_JIT_REGION_END,           "BPF end" },
 +      { MODULES_VADDR,                "Modules start" },
 +      { MODULES_END,                  "Modules end" },
 +      { VMALLOC_START,                "vmalloc() area" },
 +      { VMALLOC_END,                  "vmalloc() end" },
 +      { FIXADDR_START,                "Fixmap start" },
 +      { FIXADDR_TOP,                  "Fixmap end" },
 +      { PCI_IO_START,                 "PCI I/O start" },
 +      { PCI_IO_END,                   "PCI I/O end" },
 +#ifdef CONFIG_SPARSEMEM_VMEMMAP
 +      { VMEMMAP_START,                "vmemmap start" },
 +      { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
 +#endif
 +      { -1,                           NULL },
 +};
 +
 +#define pt_dump_seq_printf(m, fmt, args...)   \
 +({                                            \
 +      if (m)                                  \
 +              seq_printf(m, fmt, ##args);     \
 +})
 +
 +#define pt_dump_seq_puts(m, fmt)      \
 +({                                    \
 +      if (m)                          \
 +              seq_printf(m, fmt);     \
 +})
 +
 +/*
 + * The page dumper groups page table entries of the same type into a single
 + * description. It uses pg_state to track the range information while
 + * iterating over the pte entries. When the continuity is broken it then
 + * dumps out a description of the range.
 + */
 +struct pg_state {
 +      struct ptdump_state ptdump;
 +      struct seq_file *seq;
 +      const struct addr_marker *marker;
 +      unsigned long start_address;
 +      int level;
 +      u64 current_prot;
 +      bool check_wx;
 +      unsigned long wx_pages;
 +      unsigned long uxn_pages;
 +};
 +
 +struct prot_bits {
 +      u64             mask;
 +      u64             val;
 +      const char      *set;
 +      const char      *clear;
 +};
 +
 +static const struct prot_bits pte_bits[] = {
 +      {
 +              .mask   = PTE_VALID,
 +              .val    = PTE_VALID,
 +              .set    = " ",
 +              .clear  = "F",
 +      }, {
 +              .mask   = PTE_USER,
 +              .val    = PTE_USER,
 +              .set    = "USR",
 +              .clear  = "   ",
 +      }, {
 +              .mask   = PTE_RDONLY,
 +              .val    = PTE_RDONLY,
 +              .set    = "ro",
 +              .clear  = "RW",
 +      }, {
 +              .mask   = PTE_PXN,
 +              .val    = PTE_PXN,
 +              .set    = "NX",
 +              .clear  = "x ",
 +      }, {
 +              .mask   = PTE_SHARED,
 +              .val    = PTE_SHARED,
 +              .set    = "SHD",
 +              .clear  = "   ",
 +      }, {
 +              .mask   = PTE_AF,
 +              .val    = PTE_AF,
 +              .set    = "AF",
 +              .clear  = "  ",
 +      }, {
 +              .mask   = PTE_NG,
 +              .val    = PTE_NG,
 +              .set    = "NG",
 +              .clear  = "  ",
 +      }, {
 +              .mask   = PTE_CONT,
 +              .val    = PTE_CONT,
 +              .set    = "CON",
 +              .clear  = "   ",
 +      }, {
 +              .mask   = PTE_TABLE_BIT,
 +              .val    = PTE_TABLE_BIT,
 +              .set    = "   ",
 +              .clear  = "BLK",
 +      }, {
 +              .mask   = PTE_UXN,
 +              .val    = PTE_UXN,
 +              .set    = "UXN",
 +              .clear  = "   ",
 +      }, {
 +              .mask   = PTE_GP,
 +              .val    = PTE_GP,
 +              .set    = "GP",
 +              .clear  = "  ",
 +      }, {
 +              .mask   = PTE_ATTRINDX_MASK,
 +              .val    = PTE_ATTRINDX(MT_DEVICE_nGnRnE),
 +              .set    = "DEVICE/nGnRnE",
 +      }, {
 +              .mask   = PTE_ATTRINDX_MASK,
 +              .val    = PTE_ATTRINDX(MT_DEVICE_nGnRE),
 +              .set    = "DEVICE/nGnRE",
 +      }, {
 +              .mask   = PTE_ATTRINDX_MASK,
 +              .val    = PTE_ATTRINDX(MT_DEVICE_GRE),
 +              .set    = "DEVICE/GRE",
 +      }, {
 +              .mask   = PTE_ATTRINDX_MASK,
 +              .val    = PTE_ATTRINDX(MT_NORMAL_NC),
 +              .set    = "MEM/NORMAL-NC",
 +      }, {
 +              .mask   = PTE_ATTRINDX_MASK,
 +              .val    = PTE_ATTRINDX(MT_NORMAL),
 +              .set    = "MEM/NORMAL",
++      }, {
++              .mask   = PTE_ATTRINDX_MASK,
++              .val    = PTE_ATTRINDX(MT_NORMAL_TAGGED),
++              .set    = "MEM/NORMAL-TAGGED",
 +      }
 +};
 +
 +struct pg_level {
 +      const struct prot_bits *bits;
 +      const char *name;
 +      size_t num;
 +      u64 mask;
 +};
 +
 +static struct pg_level pg_level[] = {
 +      { /* pgd */
 +              .name   = "PGD",
 +              .bits   = pte_bits,
 +              .num    = ARRAY_SIZE(pte_bits),
 +      }, { /* p4d */
 +              .name   = "P4D",
 +              .bits   = pte_bits,
 +              .num    = ARRAY_SIZE(pte_bits),
 +      }, { /* pud */
 +              .name   = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
 +              .bits   = pte_bits,
 +              .num    = ARRAY_SIZE(pte_bits),
 +      }, { /* pmd */
 +              .name   = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
 +              .bits   = pte_bits,
 +              .num    = ARRAY_SIZE(pte_bits),
 +      }, { /* pte */
 +              .name   = "PTE",
 +              .bits   = pte_bits,
 +              .num    = ARRAY_SIZE(pte_bits),
 +      },
 +};
 +
 +static void dump_prot(struct pg_state *st, const struct prot_bits *bits,
 +                      size_t num)
 +{
 +      unsigned i;
 +
 +      for (i = 0; i < num; i++, bits++) {
 +              const char *s;
 +
 +              if ((st->current_prot & bits->mask) == bits->val)
 +                      s = bits->set;
 +              else
 +                      s = bits->clear;
 +
 +              if (s)
 +                      pt_dump_seq_printf(st->seq, " %s", s);
 +      }
 +}
 +
 +static void note_prot_uxn(struct pg_state *st, unsigned long addr)
 +{
 +      if (!st->check_wx)
 +              return;
 +
 +      if ((st->current_prot & PTE_UXN) == PTE_UXN)
 +              return;
 +
 +      WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n",
 +                (void *)st->start_address, (void *)st->start_address);
 +
 +      st->uxn_pages += (addr - st->start_address) / PAGE_SIZE;
 +}
 +
 +static void note_prot_wx(struct pg_state *st, unsigned long addr)
 +{
 +      if (!st->check_wx)
 +              return;
 +      if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY)
 +              return;
 +      if ((st->current_prot & PTE_PXN) == PTE_PXN)
 +              return;
 +
 +      WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n",
 +                (void *)st->start_address, (void *)st->start_address);
 +
 +      st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
 +}
 +
 +static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level,
 +                    u64 val)
 +{
 +      struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
 +      static const char units[] = "KMGTPE";
 +      u64 prot = 0;
 +
 +      if (level >= 0)
 +              prot = val & pg_level[level].mask;
 +
 +      if (st->level == -1) {
 +              st->level = level;
 +              st->current_prot = prot;
 +              st->start_address = addr;
 +              pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
 +      } else if (prot != st->current_prot || level != st->level ||
 +                 addr >= st->marker[1].start_address) {
 +              const char *unit = units;
 +              unsigned long delta;
 +
 +              if (st->current_prot) {
 +                      note_prot_uxn(st, addr);
 +                      note_prot_wx(st, addr);
 +              }
 +
 +              pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx   ",
 +                                 st->start_address, addr);
 +
 +              delta = (addr - st->start_address) >> 10;
 +              while (!(delta & 1023) && unit[1]) {
 +                      delta >>= 10;
 +                      unit++;
 +              }
 +              pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
 +                                 pg_level[st->level].name);
 +              if (st->current_prot && pg_level[st->level].bits)
 +                      dump_prot(st, pg_level[st->level].bits,
 +                                pg_level[st->level].num);
 +              pt_dump_seq_puts(st->seq, "\n");
 +
 +              if (addr >= st->marker[1].start_address) {
 +                      st->marker++;
 +                      pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
 +              }
 +
 +              st->start_address = addr;
 +              st->current_prot = prot;
 +              st->level = level;
 +      }
 +
 +      if (addr >= st->marker[1].start_address) {
 +              st->marker++;
 +              pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
 +      }
 +
 +}
 +
 +void ptdump_walk(struct seq_file *s, struct ptdump_info *info)
 +{
 +      unsigned long end = ~0UL;
 +      struct pg_state st;
 +
 +      if (info->base_addr < TASK_SIZE_64)
 +              end = TASK_SIZE_64;
 +
 +      st = (struct pg_state){
 +              .seq = s,
 +              .marker = info->markers,
 +              .ptdump = {
 +                      .note_page = note_page,
 +                      .range = (struct ptdump_range[]){
 +                              {info->base_addr, end},
 +                              {0, 0}
 +                      }
 +              }
 +      };
 +
 +      ptdump_walk_pgd(&st.ptdump, info->mm, NULL);
 +}
 +
 +static void ptdump_initialize(void)
 +{
 +      unsigned i, j;
 +
 +      for (i = 0; i < ARRAY_SIZE(pg_level); i++)
 +              if (pg_level[i].bits)
 +                      for (j = 0; j < pg_level[i].num; j++)
 +                              pg_level[i].mask |= pg_level[i].bits[j].mask;
 +}
 +
 +static struct ptdump_info kernel_ptdump_info = {
 +      .mm             = &init_mm,
 +      .markers        = address_markers,
 +      .base_addr      = PAGE_OFFSET,
 +};
 +
 +void ptdump_check_wx(void)
 +{
 +      struct pg_state st = {
 +              .seq = NULL,
 +              .marker = (struct addr_marker[]) {
 +                      { 0, NULL},
 +                      { -1, NULL},
 +              },
 +              .level = -1,
 +              .check_wx = true,
 +              .ptdump = {
 +                      .note_page = note_page,
 +                      .range = (struct ptdump_range[]) {
 +                              {PAGE_OFFSET, ~0UL},
 +                              {0, 0}
 +                      }
 +              }
 +      };
 +
 +      ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
 +
 +      if (st.wx_pages || st.uxn_pages)
 +              pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n",
 +                      st.wx_pages, st.uxn_pages);
 +      else
 +              pr_info("Checked W+X mappings: passed, no W+X pages found\n");
 +}
 +
 +static int ptdump_init(void)
 +{
 +      address_markers[PAGE_END_NR].start_address = PAGE_END;
 +#ifdef CONFIG_KASAN
 +      address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START;
 +#endif
 +      ptdump_initialize();
 +      ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
 +      return 0;
 +}
 +device_initcall(ptdump_init);