Use page_to_phys and pfn_to_page to avoid open-coded mem_map usage.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
{
int i, total = 0, reserved = 0;
int shared = 0, cached = 0;
+ struct page *page;
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr;
while (i-- > 0) {
+ page = pfn_to_page(i);
total++;
- if (PageReserved(mem_map+i))
+ if (PageReserved(page))
reserved++;
- else if (PageSwapCache(mem_map+i))
+ else if (PageSwapCache(page))
cached++;
- else if (page_count(mem_map+i))
- shared += page_count(mem_map+i) - 1;
+ else if (page_count(page))
+ shared += page_count(page) - 1;
}
printk("%d pages of RAM\n",total);
printk("%d reserved pages\n",reserved);
return __io_virt(address);
}
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
-
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
static inline void * ioremap (unsigned long offset, unsigned long size)
#define __pa(x) (unsigned long)(x)
#define __va(x) (void *)(unsigned long)(x)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
{
- pmd_populate_kernel(mm, pmd, (pte_t *)((page-mem_map) << PAGE_SHIFT));
+ pmd_populate_kernel(mm, pmd, (pte_t *)page_to_phys(page));
}
/*
*/
static inline int page_test_and_clear_dirty(struct page *page)
{
- unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT);
+ unsigned long physpage = page_to_phys(page);
int skey = page_get_storage_key(physpage);
if (skey & _PAGE_CHANGED)
*/
static inline int page_test_and_clear_young(struct page *page)
{
- unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT);
+ unsigned long physpage = page_to_phys(page);
int ccode;
- asm volatile (
- "rrbe 0,%1\n"
- "ipm %0\n"
- "srl %0,28\n"
+ asm volatile(
+ " rrbe 0,%1\n"
+ " ipm %0\n"
+ " srl %0,28\n"
: "=d" (ccode) : "a" (physpage) : "cc" );
return ccode & 2;
}
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
{
- unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT);
+ unsigned long physpage = page_to_phys(page);
return mk_pte_phys(physpage, pgprot);
}
#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
-#define pmd_page(pmd) (mem_map+(pmd_val(pmd) >> PAGE_SHIFT))
+#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
#define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK)
-#define pgd_page(pgd) (mem_map+(pgd_val(pgd) >> PAGE_SHIFT))
+#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
/* to find an entry in a page-table-directory */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))