riscv: Introduce huge page support for 32/64bit kernel
authorAlexandre Ghiti <alex@ghiti.fr>
Sun, 26 May 2019 12:50:38 +0000 (08:50 -0400)
committerPaul Walmsley <paul.walmsley@sifive.com>
Wed, 3 Jul 2019 22:23:38 +0000 (15:23 -0700)
This patch implements both 4MB huge page support for 32bit kernel
and 2MB/1GB huge pages support for 64bit kernel.

Signed-off-by: Alexandre Ghiti <alex@ghiti.fr>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Paul Walmsley <paul.walmsley@sifive.com>
arch/riscv/Kconfig
arch/riscv/include/asm/hugetlb.h [new file with mode: 0644]
arch/riscv/include/asm/page.h
arch/riscv/include/asm/pgtable.h
arch/riscv/mm/Makefile
arch/riscv/mm/hugetlbpage.c [new file with mode: 0644]

index 69e6527..a7252b4 100644 (file)
@@ -50,6 +50,8 @@ config RISCV
        select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_MMIOWB
        select HAVE_EBPF_JIT if 64BIT
+       select ARCH_HAS_GIGANTIC_PAGE
+       select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
 
 config MMU
        def_bool y
@@ -64,6 +66,12 @@ config PAGE_OFFSET
        default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
        default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
 
+config ARCH_WANT_GENERAL_HUGETLB
+       def_bool y
+
+config SYS_SUPPORTS_HUGETLBFS
+       def_bool y
+
 config STACKTRACE_SUPPORT
        def_bool y
 
diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
new file mode 100644 (file)
index 0000000..728a5db
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_HUGETLB_H
+#define _ASM_RISCV_HUGETLB_H
+
+#include <asm-generic/hugetlb.h>
+#include <asm/page.h>
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+                                        unsigned long addr,
+                                        unsigned long len) {
+       return 0;
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
+#endif /* _ASM_RISCV_HUGETLB_H */
index d3e5f6c..707e00a 100644 (file)
 #define PAGE_SIZE      (_AC(1, UL) << PAGE_SHIFT)
 #define PAGE_MASK      (~(PAGE_SIZE - 1))
 
+#ifdef CONFIG_64BIT
+#define HUGE_MAX_HSTATE                2
+#else
+#define HUGE_MAX_HSTATE                1
+#endif
+#define HPAGE_SHIFT            PMD_SHIFT
+#define HPAGE_SIZE             (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK              (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER      (HPAGE_SHIFT - PAGE_SHIFT)
+
 /*
  * PAGE_OFFSET -- the first address of the first page of memory.
  * When not using MMU this corresponds to the first free page in
index f7c3f7d..18ea56f 100644 (file)
@@ -113,7 +113,6 @@ static inline void pmd_clear(pmd_t *pmdp)
        set_pmd(pmdp, __pmd(0));
 }
 
-
 static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
 {
        return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
@@ -250,6 +249,11 @@ static inline pte_t pte_mkspecial(pte_t pte)
        return __pte(pte_val(pte) | _PAGE_SPECIAL);
 }
 
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+       return pte;
+}
+
 /* Modify page protection bits */
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
@@ -409,7 +413,7 @@ static inline void pgtable_cache_init(void)
 #define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
 
 /*
- * Task size is 0x40000000000 for RV64 or 0xb800000 for RV32.
+ * Task size is 0x4000000000 for RV64 or 0xb800000 for RV32.
  * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
  */
 #ifdef CONFIG_64BIT
index fc51d3b..74055e1 100644 (file)
@@ -12,3 +12,5 @@ obj-y += ioremap.o
 obj-y += cacheflush.o
 obj-y += context.o
 obj-y += sifive_l2_cache.o
+
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
new file mode 100644 (file)
index 0000000..0d4747e
--- /dev/null
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/hugetlb.h>
+#include <linux/err.h>
+
+int pud_huge(pud_t pud)
+{
+       return pud_present(pud) &&
+               (pud_val(pud) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
+}
+
+int pmd_huge(pmd_t pmd)
+{
+       return pmd_present(pmd) &&
+               (pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
+}
+
+static __init int setup_hugepagesz(char *opt)
+{
+       unsigned long ps = memparse(opt, &opt);
+
+       if (ps == HPAGE_SIZE) {
+               hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT);
+       } else if (IS_ENABLED(CONFIG_64BIT) && ps == PUD_SIZE) {
+               hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+       } else {
+               hugetlb_bad_size();
+               pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20);
+               return 0;
+       }
+
+       return 1;
+}
+__setup("hugepagesz=", setup_hugepagesz);
+
+#ifdef CONFIG_CONTIG_ALLOC
+static __init int gigantic_pages_init(void)
+{
+       /* With CONTIG_ALLOC, we can allocate gigantic pages at runtime */
+       if (IS_ENABLED(CONFIG_64BIT) && !size_to_hstate(1UL << PUD_SHIFT))
+               hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+       return 0;
+}
+arch_initcall(gigantic_pages_init);
+#endif