From 18747151308f9e0fb63766057957617ec4afa190 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Tue, 10 Feb 2015 14:10:12 -0800 Subject: [PATCH] arc: drop _PAGE_FILE and pte_file()-related helpers We've replaced remap_file_pages(2) implementation with emulation. Nobody creates non-linear mapping anymore. Signed-off-by: Kirill A. Shutemov Acked-by: Vineet Gupta Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arc/include/asm/pgtable.h | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 6b0b7f7e..bdc8cca 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -61,7 +61,6 @@ #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ #define _PAGE_READ (1<<5) /* Page has user read perm (H) */ #define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */ -#define _PAGE_FILE (1<<7) /* page cache/ swap (S) */ #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */ @@ -73,7 +72,6 @@ #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ #define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */ -#define _PAGE_FILE (1<<6) /* page cache/ swap (S) */ #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr @@ -268,15 +266,6 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep) pte; \ }) -/* TBD: Non linear mapping stuff */ -static inline int pte_file(pte_t pte) -{ - return pte_val(pte) & _PAGE_FILE; -} - -#define PTE_FILE_MAX_BITS 30 -#define pgoff_to_pte(x) __pte(x) -#define pte_to_pgoff(x) (pte_val(x) >> 2) #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) #define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) @@ -364,7 +353,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, /* Encode swap {type,off} tuple into PTE * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that - * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier" + * PAGE_PRESENT is zero in a PTE holding swap "identifier" */ #define __swp_entry(type, off) ((swp_entry_t) { \ ((type) & 0x1f) | ((off) << 13) }) -- 2.7.4