This introduces some much overdue chainsawing of the fixed PMB support.
fixed PMB was introduced initially to work around the fact that dynamic
PMB mode was relatively broken, though they were never intended to
converge. The main areas where there are differences are whether the
system is booted in 29-bit mode or 32-bit mode, and whether legacy
mappings are to be preserved. Any system booting in true 32-bit mode will
not care about legacy mappings, so these are roughly decoupled.
Regardless of the entry point, PMB and 32BIT are directly related as far
as the kernel is concerned, so we also switch back to having one select
the other.
With legacy mappings iterated through and applied in the initialization
path it's now possible to finally merge the two implementations and
permit dynamic remapping overtop of remaining entries regardless of
whether boot mappings are crafted by hand or inherited from the boot
loader.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
$(obj)/romimage/vmlinux: $(obj)/zImage FORCE
$(Q)$(MAKE) $(build)=$(obj)/romimage $@
-KERNEL_MEMORY := 0x00000000
-ifeq ($(CONFIG_PMB_FIXED),y)
-KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
+KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
$$[$(CONFIG_MEMORY_START) & 0x1fffffff]')
-endif
-ifeq ($(CONFIG_29BIT),y)
-KERNEL_MEMORY := $(shell /bin/bash -c 'printf "0x%08x" \
- $$[$(CONFIG_MEMORY_START)]')
-endif
KERNEL_LOAD := $(shell /bin/bash -c 'printf "0x%08x" \
$$[$(CONFIG_PAGE_OFFSET) + \
/* Returns the privileged segment base of a given address */
#define PXSEG(a) (((unsigned long)(a)) & 0xe0000000)
-#if defined(CONFIG_29BIT) || defined(CONFIG_PMB_FIXED)
+#ifdef CONFIG_29BIT
/*
* Map an address to a certain privileged segment
*/
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P3SEG))
#define P4SEGADDR(a) \
((__typeof__(a))(((unsigned long)(a) & 0x1fffffff) | P4SEG))
-#endif /* 29BIT || PMB_FIXED */
+#endif /* 29BIT */
#endif /* P1SEG */
/* Check if an address can be reached in 29 bits */
}
static inline void __iomem *
-__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
+__ioremap_29bit(unsigned long offset, unsigned long size, unsigned long flags)
{
-#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB)
+#ifdef CONFIG_29BIT
unsigned long last_addr = offset + size - 1;
-#endif
- void __iomem *ret;
- ret = __ioremap_trapped(offset, size);
- if (ret)
- return ret;
-
-#if defined(CONFIG_SUPERH32) && !defined(CONFIG_PMB_FIXED) && !defined(CONFIG_PMB)
/*
* For P1 and P2 space this is trivial, as everything is already
* mapped. Uncached access for P1 addresses are done through P2.
return (void __iomem *)P4SEGADDR(offset);
#endif
+ return NULL;
+}
+
+static inline void __iomem *
+__ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
+{
+ void __iomem *ret;
+
+ ret = __ioremap_trapped(offset, size);
+ if (ret)
+ return ret;
+
+ ret = __ioremap_29bit(offset, size, flags);
+ if (ret)
+ return ret;
+
return __ioremap(offset, size, flags);
}
#else
struct pmb_entry *link;
};
+#ifdef CONFIG_PMB
/* arch/sh/mm/pmb.c */
long pmb_remap(unsigned long virt, unsigned long phys,
unsigned long size, unsigned long flags);
void pmb_unmap(unsigned long addr);
int pmb_init(void);
+#else
+static inline long pmb_remap(unsigned long virt, unsigned long phys,
+ unsigned long size, unsigned long flags)
+{
+ return -EINVAL
+}
+
+static inline void pmb_unmap(unsigned long addr)
+{
+}
+
+static inline int pmb_init(void)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_PMB */
+
#endif /* __ASSEMBLY__ */
#endif /* __MMU_H */
* is not visible (it is part of the PMB mapping) and so needs to be
* added or subtracted as required.
*/
-#if defined(CONFIG_PMB_FIXED)
+#if defined(CONFIG_PMB_LEGACY)
/* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */
#define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START))
#define __pa(x) ((unsigned long)(x) - PMB_OFFSET)
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
+#include <asm/mmu.h>
+#include <cpu/mmu_context.h>
#ifdef CONFIG_CPU_SH4A
#define SYNCO() synco
.long 1 /* LOADER_TYPE */
.long 0x00000000 /* INITRD_START */
.long 0x00000000 /* INITRD_SIZE */
-#if defined(CONFIG_32BIT) && defined(CONFIG_PMB_FIXED)
+#ifdef CONFIG_32BIT
.long 0x53453f00 + 32 /* "SE?" = 32 bit */
#else
.long 0x53453f00 + 29 /* "SE?" = 29 bit */
sh_mv.mv_setup(cmdline_p);
paging_init();
-
-#ifdef CONFIG_PMB_ENABLE
pmb_init();
-#endif
#ifdef CONFIG_SMP
plat_smp_setup();
#include <asm/cache.h>
#include <asm/vmlinux.lds.h>
+#if defined(CONFIG_32BIT) && !defined(CONFIG_PMB_LEGACY)
+#define MEMORY_OFFSET 0
+#else
+#define MEMORY_OFFSET (CONFIG_MEMORY_START & 0x1fffffff)
+#endif
+
ENTRY(_start)
SECTIONS
{
-#ifdef CONFIG_PMB_FIXED
- . = CONFIG_PAGE_OFFSET + (CONFIG_MEMORY_START & 0x1fffffff) +
- CONFIG_ZERO_PAGE_OFFSET;
-#elif defined(CONFIG_32BIT)
- . = CONFIG_PAGE_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
-#else
- . = CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START + CONFIG_ZERO_PAGE_OFFSET;
-#endif
+ . = CONFIG_PAGE_OFFSET + MEMORY_OFFSET + CONFIG_ZERO_PAGE_OFFSET;
_text = .; /* Text and read-only data */
bool
default y if CPU_SH5
-config PMB_ENABLE
- bool "Support 32-bit physical addressing through PMB"
- depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP
- help
- If you say Y here, physical addressing will be extended to
- 32-bits through the SH-4A PMB. If this is not set, legacy
- 29-bit physical addressing will be used.
-
-choice
- prompt "PMB handling type"
- depends on PMB_ENABLE
- default PMB_FIXED
-
config PMB
- bool "PMB"
+ bool "Support 32-bit physical addressing through PMB"
depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP
+ select 32BIT
help
If you say Y here, physical addressing will be extended to
32-bits through the SH-4A PMB. If this is not set, legacy
29-bit physical addressing will be used.
-config PMB_FIXED
- bool "fixed PMB"
- depends on MMU && EXPERIMENTAL && CPU_SH4A && !CPU_SH4AL_DSP
+config PMB_LEGACY
+ bool "Support legacy boot mappings for PMB"
+ depends on PMB
select 32BIT
help
If this option is enabled, fixed PMB mappings are inherited
management. This is the closest to legacy 29-bit physical mode,
and allows systems to support up to 512MiB of system memory.
-endchoice
-
config X2TLB
bool "Enable extended TLB mode"
depends on (CPU_SHX2 || CPU_SHX3) && MMU && EXPERIMENTAL
endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
-obj-$(CONFIG_PMB_ENABLE) += pmb.o
+obj-$(CONFIG_PMB) += pmb.o
obj-$(CONFIG_NUMA) += numa.o
# Special flags for fault_64.o. This puts restrictions on the number of
*
* Privileged Space Mapping Buffer (PMB) Support.
*
- * Copyright (C) 2005, 2006, 2007 Paul Mundt
+ * Copyright (C) 2005 - 2010 Paul Mundt
*
* P1/P2 Section mapping definitions from map32.h, which was:
*
} while (pmbe);
}
-#ifdef CONFIG_PMB
-int __uses_jump_to_uncached pmb_init(void)
-{
- unsigned int i;
- long size, ret;
-
- jump_to_uncached();
-
- /*
- * Insert PMB entries for the P1 and P2 areas so that, after
- * we've switched the MMU to 32-bit mode, the semantics of P1
- * and P2 are the same as in 29-bit mode, e.g.
- *
- * P1 - provides a cached window onto physical memory
- * P2 - provides an uncached window onto physical memory
- */
- size = __MEMORY_START + __MEMORY_SIZE;
-
- ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
- BUG_ON(ret != size);
-
- ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
- BUG_ON(ret != size);
-
- ctrl_outl(0, PMB_IRMCR);
-
- /* PMB.SE and UB[7] */
- ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
-
- /* Flush out the TLB */
- i = ctrl_inl(MMUCR);
- i |= MMUCR_TI;
- ctrl_outl(i, MMUCR);
-
- back_to_cached();
-
- return 0;
-}
-#else
-int __uses_jump_to_uncached pmb_init(void)
+#ifdef CONFIG_PMB_LEGACY
+static int pmb_apply_legacy_mappings(void)
{
int i;
unsigned long addr, data;
-
- jump_to_uncached();
+ unsigned int applied = 0;
for (i = 0; i < PMB_ENTRY_MAX; i++) {
struct pmb_entry *pmbe;
pmbe = pmb_alloc(vpn, ppn, flags, i);
WARN_ON(IS_ERR(pmbe));
+
+ applied++;
+ }
+
+ return (applied == 0);
+}
+#else
+static inline int pmb_apply_legacy_mappings(void)
+{
+ return 1;
+}
+#endif
+
+int __uses_jump_to_uncached pmb_init(void)
+{
+ unsigned int i;
+ unsigned long size, ret;
+
+ jump_to_uncached();
+
+ /*
+ * Attempt to apply the legacy boot mappings if configured. If
+ * this is successful then we simply carry on with those and
+ * don't bother establishing additional memory mappings. Dynamic
+ * device mappings through pmb_remap() can still be bolted on
+ * after this.
+ */
+ ret = pmb_apply_legacy_mappings();
+ if (ret == 0) {
+ back_to_cached();
+ return 0;
}
+ /*
+ * Insert PMB entries for the P1 and P2 areas so that, after
+ * we've switched the MMU to 32-bit mode, the semantics of P1
+ * and P2 are the same as in 29-bit mode, e.g.
+ *
+ * P1 - provides a cached window onto physical memory
+ * P2 - provides an uncached window onto physical memory
+ */
+ size = (unsigned long)__MEMORY_START + __MEMORY_SIZE;
+
+ ret = pmb_remap(P1SEG, 0x00000000, size, PMB_C);
+ BUG_ON(ret != size);
+
+ ret = pmb_remap(P2SEG, 0x00000000, size, PMB_WT | PMB_UB);
+ BUG_ON(ret != size);
+
+ ctrl_outl(0, PMB_IRMCR);
+
+ /* PMB.SE and UB[7] */
+ ctrl_outl(PASCR_SE | (1 << 7), PMB_PASCR);
+
+ /* Flush out the TLB */
+ i = ctrl_inl(MMUCR);
+ i |= MMUCR_TI;
+ ctrl_outl(i, MMUCR);
+
back_to_cached();
return 0;
}
-#endif /* CONFIG_PMB */
static int pmb_seq_show(struct seq_file *file, void *iter)
{
{
return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
}
-
subsys_initcall(pmb_sysdev_init);
#endif