From: Russell King Date: Thu, 8 Dec 2011 18:02:04 +0000 (+0000) Subject: Merge branch 'for-rmk' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas... X-Git-Tag: v3.3-rc1~18^2~3^2~14 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=6ae25a5b9d7ba86d6ac19c403dfa57dae6caa73d;p=platform%2Fkernel%2Flinux-exynos.git Merge branch 'for-rmk' of git://git./linux/kernel/git/cmarinas/linux into devel-stable Conflicts: arch/arm/mm/ioremap.c --- 6ae25a5b9d7ba86d6ac19c403dfa57dae6caa73d diff --cc arch/arm/include/asm/pgtable.h index a784859,3ddcf66..3f2f0eb --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@@ -20,10 -20,16 +20,15 @@@ #else + #include #include -#include #include + #ifdef CONFIG_ARM_LPAE + #include + #else #include + #endif /* * Just any arbitrary offset to the start of the vmalloc VM area: the diff --cc arch/arm/mm/ioremap.c index 12c7ad2,d1f78ba..80632e8 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@@ -194,7 -208,14 +202,8 @@@ void __iomem * __arm_ioremap_pfn_caller */ if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) return NULL; + #endif - /* - * Don't allow RAM to be mapped - this causes problems with ARMv6+ - */ - if (WARN_ON(pfn_valid(pfn))) - return NULL; - type = get_mem_type(mtype); if (!type) return NULL; @@@ -329,34 -322,28 +338,34 @@@ __arm_ioremap_exec(unsigned long phys_a void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); -#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) - struct vm_struct **p, *tmp; + struct vm_struct *vm; - /* - * If this is a section based mapping we need to handle it - * specially as the VM subsystem does not know how to handle - * such a beast. We need the lock here b/c we need to clear - * all the mappings before the area can be reclaimed - * by someone else. - */ - write_lock(&vmlist_lock); - for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { - if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { - if (tmp->flags & VM_ARM_SECTION_MAPPING) { - unmap_area_sections((unsigned long)tmp->addr, - tmp->size); - } + read_lock(&vmlist_lock); + for (vm = vmlist; vm; vm = vm->next) { + if (vm->addr > addr) + break; + if (!(vm->flags & VM_IOREMAP)) + continue; + /* If this is a static mapping we must leave it alone */ + if ((vm->flags & VM_ARM_STATIC_MAPPING) && + (vm->addr <= addr) && (vm->addr + vm->size > addr)) { + read_unlock(&vmlist_lock); + return; + } - #ifndef CONFIG_SMP ++#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) + /* + * If this is a section based mapping we need to handle it + * specially as the VM subsystem does not know how to handle + * such a beast. + */ + if ((vm->addr == addr) && + (vm->flags & VM_ARM_SECTION_MAPPING)) { + unmap_area_sections((unsigned long)vm->addr, vm->size); break; } - } - write_unlock(&vmlist_lock); #endif + } + read_unlock(&vmlist_lock); vunmap(addr); }